charm-2.1.1/src/0000775000175000017500000000000012672604473012375 5ustar marcomarcocharm-2.1.1/src/gopkg.in/0000775000175000017500000000000012672604603014104 5ustar marcomarcocharm-2.1.1/src/gopkg.in/natefinch/0000775000175000017500000000000012672604527016050 5ustar marcomarcocharm-2.1.1/src/gopkg.in/natefinch/lumberjack.v2/0000775000175000017500000000000012672604530020507 5ustar marcomarcocharm-2.1.1/src/gopkg.in/natefinch/lumberjack.v2/LICENSE0000664000175000017500000000206512672604530021517 0ustar marcomarcoThe MIT License (MIT) Copyright (c) 2014 Nate Finch Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.charm-2.1.1/src/gopkg.in/natefinch/lumberjack.v2/lumberjack_test.go0000664000175000017500000003671412672604530024227 0ustar marcomarcopackage lumberjack import ( "encoding/json" "fmt" "io/ioutil" "os" "path/filepath" "testing" "time" "github.com/BurntSushi/toml" "gopkg.in/yaml.v2" ) // !!!NOTE!!! // // Running these tests in parallel will almost certainly cause sporadic (or even // regular) failures, because they're all messing with the same global variable // that controls the logic's mocked time.Now. So... don't do that. // Since all the tests uses the time to determine filenames etc, we need to // control the wall clock as much as possible, which means having a wall clock // that doesn't change unless we want it to. var fakeCurrentTime = time.Now() func fakeTime() time.Time { return fakeCurrentTime } func TestNewFile(t *testing.T) { currentTime = fakeTime dir := makeTempDir("TestNewFile", t) defer os.RemoveAll(dir) l := &Logger{ Filename: logFile(dir), } defer l.Close() b := []byte("boo!") n, err := l.Write(b) isNil(err, t) equals(len(b), n, t) existsWithLen(logFile(dir), n, t) fileCount(dir, 1, t) } func TestOpenExisting(t *testing.T) { currentTime = fakeTime dir := makeTempDir("TestOpenExisting", t) defer os.RemoveAll(dir) filename := logFile(dir) data := []byte("foo!") err := ioutil.WriteFile(filename, data, 0644) isNil(err, t) existsWithLen(filename, len(data), t) l := &Logger{ Filename: filename, } defer l.Close() b := []byte("boo!") n, err := l.Write(b) isNil(err, t) equals(len(b), n, t) // make sure the file got appended existsWithLen(filename, len(data)+n, t) // make sure no other files were created fileCount(dir, 1, t) } func TestWriteTooLong(t *testing.T) { currentTime = fakeTime megabyte = 1 dir := makeTempDir("TestWriteTooLong", t) defer os.RemoveAll(dir) l := &Logger{ Filename: logFile(dir), MaxSize: 5, } defer l.Close() b := []byte("booooooooooooooo!") n, err := l.Write(b) notNil(err, t) equals(0, n, t) equals(err.Error(), fmt.Sprintf("write length %d exceeds maximum file size %d", len(b), l.MaxSize), t) _, err = os.Stat(logFile(dir)) assert(os.IsNotExist(err), t, "File exists, but should not have been created") } func TestMakeLogDir(t *testing.T) { currentTime = fakeTime dir := time.Now().Format("TestMakeLogDir" + backupTimeFormat) dir = filepath.Join(os.TempDir(), dir) defer os.RemoveAll(dir) filename := logFile(dir) l := &Logger{ Filename: filename, } defer l.Close() b := []byte("boo!") n, err := l.Write(b) isNil(err, t) equals(len(b), n, t) existsWithLen(logFile(dir), n, t) fileCount(dir, 1, t) } func TestDefaultFilename(t *testing.T) { currentTime = fakeTime dir := os.TempDir() filename := filepath.Join(dir, filepath.Base(os.Args[0])+"-lumberjack.log") defer os.Remove(filename) l := &Logger{} defer l.Close() b := []byte("boo!") n, err := l.Write(b) isNil(err, t) equals(len(b), n, t) existsWithLen(filename, n, t) } func TestAutoRotate(t *testing.T) { currentTime = fakeTime megabyte = 1 dir := makeTempDir("TestAutoRotate", t) defer os.RemoveAll(dir) filename := logFile(dir) l := &Logger{ Filename: filename, MaxSize: 10, } defer l.Close() b := []byte("boo!") n, err := l.Write(b) isNil(err, t) equals(len(b), n, t) existsWithLen(filename, n, t) fileCount(dir, 1, t) newFakeTime() b2 := []byte("foooooo!") n, err = l.Write(b2) isNil(err, t) equals(len(b2), n, t) // the old logfile should be moved aside and the main logfile should have // only the last write in it. existsWithLen(filename, n, t) // the backup file will use the current fake time and have the old contents. existsWithLen(backupFile(dir), len(b), t) fileCount(dir, 2, t) } func TestFirstWriteRotate(t *testing.T) { currentTime = fakeTime megabyte = 1 dir := makeTempDir("TestFirstWriteRotate", t) defer os.RemoveAll(dir) filename := logFile(dir) l := &Logger{ Filename: filename, MaxSize: 10, } defer l.Close() start := []byte("boooooo!") err := ioutil.WriteFile(filename, start, 0600) isNil(err, t) newFakeTime() // this would make us rotate b := []byte("fooo!") n, err := l.Write(b) isNil(err, t) equals(len(b), n, t) existsWithLen(filename, n, t) existsWithLen(backupFile(dir), len(start), t) fileCount(dir, 2, t) } func TestMaxBackups(t *testing.T) { currentTime = fakeTime megabyte = 1 dir := makeTempDir("TestMaxBackups", t) defer os.RemoveAll(dir) filename := logFile(dir) l := &Logger{ Filename: filename, MaxSize: 10, MaxBackups: 1, } defer l.Close() b := []byte("boo!") n, err := l.Write(b) isNil(err, t) equals(len(b), n, t) existsWithLen(filename, n, t) fileCount(dir, 1, t) newFakeTime() // this will put us over the max b2 := []byte("foooooo!") n, err = l.Write(b2) isNil(err, t) equals(len(b2), n, t) // this will use the new fake time secondFilename := backupFile(dir) existsWithLen(secondFilename, len(b), t) // make sure the old file still exists with the same size. existsWithLen(filename, n, t) fileCount(dir, 2, t) newFakeTime() // this will make us rotate again n, err = l.Write(b2) isNil(err, t) equals(len(b2), n, t) // this will use the new fake time thirdFilename := backupFile(dir) existsWithLen(thirdFilename, len(b2), t) existsWithLen(filename, n, t) // we need to wait a little bit since the files get deleted on a different // goroutine. <-time.After(time.Millisecond * 10) // should only have two files in the dir still fileCount(dir, 2, t) // second file name should still exist existsWithLen(thirdFilename, len(b2), t) // should have deleted the first backup notExist(secondFilename, t) // now test that we don't delete directories or non-logfile files newFakeTime() // create a file that is close to but different from the logfile name. // It shouldn't get caught by our deletion filters. notlogfile := logFile(dir) + ".foo" err = ioutil.WriteFile(notlogfile, []byte("data"), 0644) isNil(err, t) // Make a directory that exactly matches our log file filters... it still // shouldn't get caught by the deletion filter since it's a directory. notlogfiledir := backupFile(dir) err = os.Mkdir(notlogfiledir, 0700) isNil(err, t) newFakeTime() // this will make us rotate again n, err = l.Write(b2) isNil(err, t) equals(len(b2), n, t) // this will use the new fake time fourthFilename := backupFile(dir) existsWithLen(fourthFilename, len(b2), t) // we need to wait a little bit since the files get deleted on a different // goroutine. <-time.After(time.Millisecond * 10) // We should have four things in the directory now - the 2 log files, the // not log file, and the directory fileCount(dir, 4, t) // third file name should still exist existsWithLen(filename, n, t) existsWithLen(fourthFilename, len(b2), t) // should have deleted the first filename notExist(thirdFilename, t) // the not-a-logfile should still exist exists(notlogfile, t) // the directory exists(notlogfiledir, t) } func TestCleanupExistingBackups(t *testing.T) { // test that if we start with more backup files than we're supposed to have // in total, that extra ones get cleaned up when we rotate. currentTime = fakeTime megabyte = 1 dir := makeTempDir("TestCleanupExistingBackups", t) defer os.RemoveAll(dir) // make 3 backup files data := []byte("data") backup := backupFile(dir) err := ioutil.WriteFile(backup, data, 0644) isNil(err, t) newFakeTime() backup = backupFile(dir) err = ioutil.WriteFile(backup, data, 0644) isNil(err, t) newFakeTime() backup = backupFile(dir) err = ioutil.WriteFile(backup, data, 0644) isNil(err, t) // now create a primary log file with some data filename := logFile(dir) err = ioutil.WriteFile(filename, data, 0644) isNil(err, t) l := &Logger{ Filename: filename, MaxSize: 10, MaxBackups: 1, } defer l.Close() newFakeTime() b2 := []byte("foooooo!") n, err := l.Write(b2) isNil(err, t) equals(len(b2), n, t) // we need to wait a little bit since the files get deleted on a different // goroutine. <-time.After(time.Millisecond * 10) // now we should only have 2 files left - the primary and one backup fileCount(dir, 2, t) } func TestMaxAge(t *testing.T) { currentTime = fakeTime megabyte = 1 dir := makeTempDir("TestMaxAge", t) defer os.RemoveAll(dir) filename := logFile(dir) l := &Logger{ Filename: filename, MaxSize: 10, MaxAge: 1, } defer l.Close() b := []byte("boo!") n, err := l.Write(b) isNil(err, t) equals(len(b), n, t) existsWithLen(filename, n, t) fileCount(dir, 1, t) // two days later newFakeTime() b2 := []byte("foooooo!") n, err = l.Write(b2) isNil(err, t) equals(len(b2), n, t) existsWithLen(backupFile(dir), len(b), t) // we need to wait a little bit since the files get deleted on a different // goroutine. <-time.After(10 * time.Millisecond) // We should still have 2 log files, since the most recent backup was just // created. fileCount(dir, 2, t) existsWithLen(filename, len(b2), t) // we should have deleted the old file due to being too old existsWithLen(backupFile(dir), len(b), t) // two days later newFakeTime() b3 := []byte("foooooo!") n, err = l.Write(b2) isNil(err, t) equals(len(b3), n, t) existsWithLen(backupFile(dir), len(b2), t) // we need to wait a little bit since the files get deleted on a different // goroutine. <-time.After(10 * time.Millisecond) // We should have 2 log files - the main log file, and the most recent // backup. The earlier backup is past the cutoff and should be gone. fileCount(dir, 2, t) existsWithLen(filename, len(b3), t) // we should have deleted the old file due to being too old existsWithLen(backupFile(dir), len(b2), t) } func TestOldLogFiles(t *testing.T) { currentTime = fakeTime megabyte = 1 dir := makeTempDir("TestOldLogFiles", t) defer os.RemoveAll(dir) filename := logFile(dir) data := []byte("data") err := ioutil.WriteFile(filename, data, 07) isNil(err, t) // This gives us a time with the same precision as the time we get from the // timestamp in the name. t1, err := time.Parse(backupTimeFormat, fakeTime().UTC().Format(backupTimeFormat)) isNil(err, t) backup := backupFile(dir) err = ioutil.WriteFile(backup, data, 07) isNil(err, t) newFakeTime() t2, err := time.Parse(backupTimeFormat, fakeTime().UTC().Format(backupTimeFormat)) isNil(err, t) backup2 := backupFile(dir) err = ioutil.WriteFile(backup2, data, 07) isNil(err, t) l := &Logger{Filename: filename} files, err := l.oldLogFiles() isNil(err, t) equals(2, len(files), t) // should be sorted by newest file first, which would be t2 equals(t2, files[0].timestamp, t) equals(t1, files[1].timestamp, t) } func TestTimeFromName(t *testing.T) { l := &Logger{Filename: "/var/log/myfoo/foo.log"} prefix, ext := l.prefixAndExt() val := l.timeFromName("foo-2014-05-04T14-44-33.555.log", prefix, ext) equals("2014-05-04T14-44-33.555", val, t) val = l.timeFromName("foo-2014-05-04T14-44-33.555", prefix, ext) equals("", val, t) val = l.timeFromName("2014-05-04T14-44-33.555.log", prefix, ext) equals("", val, t) val = l.timeFromName("foo.log", prefix, ext) equals("", val, t) } func TestLocalTime(t *testing.T) { currentTime = fakeTime megabyte = 1 dir := makeTempDir("TestLocalTime", t) defer os.RemoveAll(dir) l := &Logger{ Filename: logFile(dir), MaxSize: 10, LocalTime: true, } defer l.Close() b := []byte("boo!") n, err := l.Write(b) isNil(err, t) equals(len(b), n, t) b2 := []byte("fooooooo!") n2, err := l.Write(b2) isNil(err, t) equals(len(b2), n2, t) existsWithLen(logFile(dir), n2, t) existsWithLen(backupFileLocal(dir), n, t) } func TestRotate(t *testing.T) { currentTime = fakeTime dir := makeTempDir("TestRotate", t) defer os.RemoveAll(dir) filename := logFile(dir) l := &Logger{ Filename: filename, MaxBackups: 1, MaxSize: 100, // megabytes } defer l.Close() b := []byte("boo!") n, err := l.Write(b) isNil(err, t) equals(len(b), n, t) existsWithLen(filename, n, t) fileCount(dir, 1, t) newFakeTime() err = l.Rotate() isNil(err, t) // we need to wait a little bit since the files get deleted on a different // goroutine. <-time.After(10 * time.Millisecond) filename2 := backupFile(dir) existsWithLen(filename2, n, t) existsWithLen(filename, 0, t) fileCount(dir, 2, t) newFakeTime() err = l.Rotate() isNil(err, t) // we need to wait a little bit since the files get deleted on a different // goroutine. <-time.After(10 * time.Millisecond) filename3 := backupFile(dir) existsWithLen(filename3, 0, t) existsWithLen(filename, 0, t) fileCount(dir, 2, t) b2 := []byte("foooooo!") n, err = l.Write(b2) isNil(err, t) equals(len(b2), n, t) // this will use the new fake time existsWithLen(filename, n, t) } func TestJson(t *testing.T) { data := []byte(` { "filename": "foo", "maxsize": 5, "maxage": 10, "maxbackups": 3, "localtime": true }`[1:]) l := Logger{} err := json.Unmarshal(data, &l) isNil(err, t) equals("foo", l.Filename, t) equals(5, l.MaxSize, t) equals(10, l.MaxAge, t) equals(3, l.MaxBackups, t) equals(true, l.LocalTime, t) } func TestYaml(t *testing.T) { data := []byte(` filename: foo maxsize: 5 maxage: 10 maxbackups: 3 localtime: true`[1:]) l := Logger{} err := yaml.Unmarshal(data, &l) isNil(err, t) equals("foo", l.Filename, t) equals(5, l.MaxSize, t) equals(10, l.MaxAge, t) equals(3, l.MaxBackups, t) equals(true, l.LocalTime, t) } func TestToml(t *testing.T) { data := ` filename = "foo" maxsize = 5 maxage = 10 maxbackups = 3 localtime = true`[1:] l := Logger{} md, err := toml.Decode(data, &l) isNil(err, t) equals("foo", l.Filename, t) equals(5, l.MaxSize, t) equals(10, l.MaxAge, t) equals(3, l.MaxBackups, t) equals(true, l.LocalTime, t) equals(0, len(md.Undecoded()), t) } // makeTempDir creates a file with a semi-unique name in the OS temp directory. // It should be based on the name of the test, to keep parallel tests from // colliding, and must be cleaned up after the test is finished. func makeTempDir(name string, t testing.TB) string { dir := time.Now().Format(name + backupTimeFormat) dir = filepath.Join(os.TempDir(), dir) isNilUp(os.Mkdir(dir, 0777), t, 1) return dir } // existsWithLen checks that the given file exists and has the correct length. func existsWithLen(path string, length int, t testing.TB) { info, err := os.Stat(path) isNilUp(err, t, 1) equalsUp(int64(length), info.Size(), t, 1) } // logFile returns the log file name in the given directory for the current fake // time. func logFile(dir string) string { return filepath.Join(dir, "foobar.log") } func backupFile(dir string) string { return filepath.Join(dir, "foobar-"+fakeTime().UTC().Format(backupTimeFormat)+".log") } func backupFileLocal(dir string) string { return filepath.Join(dir, "foobar-"+fakeTime().Format(backupTimeFormat)+".log") } // logFileLocal returns the log file name in the given directory for the current // fake time using the local timezone. func logFileLocal(dir string) string { return filepath.Join(dir, fakeTime().Format(backupTimeFormat)) } // fileCount checks that the number of files in the directory is exp. func fileCount(dir string, exp int, t testing.TB) { files, err := ioutil.ReadDir(dir) isNilUp(err, t, 1) // Make sure no other files were created. equalsUp(exp, len(files), t, 1) } // newFakeTime sets the fake "current time" to two days later. func newFakeTime() { fakeCurrentTime = fakeCurrentTime.Add(time.Hour * 24 * 2) } func notExist(path string, t testing.TB) { _, err := os.Stat(path) assertUp(os.IsNotExist(err), t, 1, "expected to get os.IsNotExist, but instead got %v", err) } func exists(path string, t testing.TB) { _, err := os.Stat(path) assertUp(err == nil, t, 1, "expected file to exist, but got error from os.Stat: %v", err) } charm-2.1.1/src/gopkg.in/natefinch/lumberjack.v2/example_test.go0000664000175000017500000000060612672604530023532 0ustar marcomarcopackage lumberjack_test import ( "log" "gopkg.in/natefinch/lumberjack.v2" ) // To use lumberjack with the standard library's log package, just pass it into // the SetOutput function when your application starts. func Example() { log.SetOutput(&lumberjack.Logger{ Filename: "/var/log/myapp/foo.log", MaxSize: 500, // megabytes MaxBackups: 3, MaxAge: 28, // days }) } charm-2.1.1/src/gopkg.in/natefinch/lumberjack.v2/lumberjack.go0000664000175000017500000002740212672604530023162 0ustar marcomarco// Package lumberjack provides a rolling logger. // // Note that this is v2.0 of lumberjack, and should be imported using gopkg.in // thusly: // // import "gopkg.in/natefinch/lumberjack.v2" // // The package name remains simply lumberjack, and the code resides at // https://github.com/natefinch/lumberjack under the v2.0 branch. // // Lumberjack is intended to be one part of a logging infrastructure. // It is not an all-in-one solution, but instead is a pluggable // component at the bottom of the logging stack that simply controls the files // to which logs are written. // // Lumberjack plays well with any logging package that can write to an // io.Writer, including the standard library's log package. // // Lumberjack assumes that only one process is writing to the output files. // Using the same lumberjack configuration from multiple processes on the same // machine will result in improper behavior. package lumberjack import ( "fmt" "io" "io/ioutil" "os" "path/filepath" "sort" "strings" "sync" "time" ) const ( backupTimeFormat = "2006-01-02T15-04-05.000" defaultMaxSize = 100 ) // ensure we always implement io.WriteCloser var _ io.WriteCloser = (*Logger)(nil) // Logger is an io.WriteCloser that writes to the specified filename. // // Logger opens or creates the logfile on first Write. If the file exists and // is less than MaxSize megabytes, lumberjack will open and append to that file. // If the file exists and its size is >= MaxSize megabytes, the file is renamed // by putting the current time in a timestamp in the name immediately before the // file's extension (or the end of the filename if there's no extension). A new // log file is then created using original filename. // // Whenever a write would cause the current log file exceed MaxSize megabytes, // the current file is closed, renamed, and a new log file created with the // original name. Thus, the filename you give Logger is always the "current" log // file. // // Cleaning Up Old Log Files // // Whenever a new logfile gets created, old log files may be deleted. The most // recent files according to the encoded timestamp will be retained, up to a // number equal to MaxBackups (or all of them if MaxBackups is 0). Any files // with an encoded timestamp older than MaxAge days are deleted, regardless of // MaxBackups. Note that the time encoded in the timestamp is the rotation // time, which may differ from the last time that file was written to. // // If MaxBackups and MaxAge are both 0, no old log files will be deleted. type Logger struct { // Filename is the file to write logs to. Backup log files will be retained // in the same directory. It uses -lumberjack.log in // os.TempDir() if empty. Filename string `json:"filename" yaml:"filename"` // MaxSize is the maximum size in megabytes of the log file before it gets // rotated. It defaults to 100 megabytes. MaxSize int `json:"maxsize" yaml:"maxsize"` // MaxAge is the maximum number of days to retain old log files based on the // timestamp encoded in their filename. Note that a day is defined as 24 // hours and may not exactly correspond to calendar days due to daylight // savings, leap seconds, etc. The default is not to remove old log files // based on age. MaxAge int `json:"maxage" yaml:"maxage"` // MaxBackups is the maximum number of old log files to retain. The default // is to retain all old log files (though MaxAge may still cause them to get // deleted.) MaxBackups int `json:"maxbackups" yaml:"maxbackups"` // LocalTime determines if the time used for formatting the timestamps in // backup files is the computer's local time. The default is to use UTC // time. LocalTime bool `json:"localtime" yaml:"localtime"` size int64 file *os.File mu sync.Mutex } var ( // currentTime exists so it can be mocked out by tests. currentTime = time.Now // os_Stat exists so it can be mocked out by tests. os_Stat = os.Stat // megabyte is the conversion factor between MaxSize and bytes. It is a // variable so tests can mock it out and not need to write megabytes of data // to disk. megabyte = 1024 * 1024 ) // Write implements io.Writer. If a write would cause the log file to be larger // than MaxSize, the file is closed, renamed to include a timestamp of the // current time, and a new log file is created using the original log file name. // If the length of the write is greater than MaxSize, an error is returned. func (l *Logger) Write(p []byte) (n int, err error) { l.mu.Lock() defer l.mu.Unlock() writeLen := int64(len(p)) if writeLen > l.max() { return 0, fmt.Errorf( "write length %d exceeds maximum file size %d", writeLen, l.max(), ) } if l.file == nil { if err = l.openExistingOrNew(len(p)); err != nil { return 0, err } } if l.size+writeLen > l.max() { if err := l.rotate(); err != nil { return 0, err } } n, err = l.file.Write(p) l.size += int64(n) return n, err } // Close implements io.Closer, and closes the current logfile. func (l *Logger) Close() error { l.mu.Lock() defer l.mu.Unlock() return l.close() } // close closes the file if it is open. func (l *Logger) close() error { if l.file == nil { return nil } err := l.file.Close() l.file = nil return err } // Rotate causes Logger to close the existing log file and immediately create a // new one. This is a helper function for applications that want to initiate // rotations outside of the normal rotation rules, such as in response to // SIGHUP. After rotating, this initiates a cleanup of old log files according // to the normal rules. func (l *Logger) Rotate() error { l.mu.Lock() defer l.mu.Unlock() return l.rotate() } // rotate closes the current file, moves it aside with a timestamp in the name, // (if it exists), opens a new file with the original filename, and then runs // cleanup. func (l *Logger) rotate() error { if err := l.close(); err != nil { return err } if err := l.openNew(); err != nil { return err } return l.cleanup() } // openNew opens a new log file for writing, moving any old log file out of the // way. This methods assumes the file has already been closed. func (l *Logger) openNew() error { err := os.MkdirAll(l.dir(), 0744) if err != nil { return fmt.Errorf("can't make directories for new logfile: %s", err) } name := l.filename() mode := os.FileMode(0644) info, err := os_Stat(name) if err == nil { // Copy the mode off the old logfile. mode = info.Mode() // move the existing file newname := backupName(name, l.LocalTime) if err := os.Rename(name, newname); err != nil { return fmt.Errorf("can't rename log file: %s", err) } // this is a no-op anywhere but linux if err := chown(name, info); err != nil { return err } } // we use truncate here because this should only get called when we've moved // the file ourselves. if someone else creates the file in the meantime, // just wipe out the contents. f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode) if err != nil { return fmt.Errorf("can't open new logfile: %s", err) } l.file = f l.size = 0 return nil } // backupName creates a new filename from the given name, inserting a timestamp // between the filename and the extension, using the local time if requested // (otherwise UTC). func backupName(name string, local bool) string { dir := filepath.Dir(name) filename := filepath.Base(name) ext := filepath.Ext(filename) prefix := filename[:len(filename)-len(ext)] t := currentTime() if !local { t = t.UTC() } timestamp := t.Format(backupTimeFormat) return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext)) } // openExistingOrNew opens the logfile if it exists and if the current write // would not put it over MaxSize. If there is no such file or the write would // put it over the MaxSize, a new file is created. func (l *Logger) openExistingOrNew(writeLen int) error { filename := l.filename() info, err := os_Stat(filename) if os.IsNotExist(err) { return l.openNew() } if err != nil { return fmt.Errorf("error getting log file info: %s", err) } if info.Size()+int64(writeLen) >= l.max() { return l.rotate() } file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644) if err != nil { // if we fail to open the old log file for some reason, just ignore // it and open a new log file. return l.openNew() } l.file = file l.size = info.Size() return nil } // genFilename generates the name of the logfile from the current time. func (l *Logger) filename() string { if l.Filename != "" { return l.Filename } name := filepath.Base(os.Args[0]) + "-lumberjack.log" return filepath.Join(os.TempDir(), name) } // cleanup deletes old log files, keeping at most l.MaxBackups files, as long as // none of them are older than MaxAge. func (l *Logger) cleanup() error { if l.MaxBackups == 0 && l.MaxAge == 0 { return nil } files, err := l.oldLogFiles() if err != nil { return err } var deletes []logInfo if l.MaxBackups > 0 && l.MaxBackups < len(files) { deletes = files[l.MaxBackups:] files = files[:l.MaxBackups] } if l.MaxAge > 0 { diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge)) cutoff := currentTime().Add(-1 * diff) for _, f := range files { if f.timestamp.Before(cutoff) { deletes = append(deletes, f) } } } if len(deletes) == 0 { return nil } go deleteAll(l.dir(), deletes) return nil } func deleteAll(dir string, files []logInfo) { // remove files on a separate goroutine for _, f := range files { // what am I going to do, log this? _ = os.Remove(filepath.Join(dir, f.Name())) } } // oldLogFiles returns the list of backup log files stored in the same // directory as the current log file, sorted by ModTime func (l *Logger) oldLogFiles() ([]logInfo, error) { files, err := ioutil.ReadDir(l.dir()) if err != nil { return nil, fmt.Errorf("can't read log file directory: %s", err) } logFiles := []logInfo{} prefix, ext := l.prefixAndExt() for _, f := range files { if f.IsDir() { continue } name := l.timeFromName(f.Name(), prefix, ext) if name == "" { continue } t, err := time.Parse(backupTimeFormat, name) if err == nil { logFiles = append(logFiles, logInfo{t, f}) } // error parsing means that the suffix at the end was not generated // by lumberjack, and therefore it's not a backup file. } sort.Sort(byFormatTime(logFiles)) return logFiles, nil } // timeFromName extracts the formatted time from the filename by stripping off // the filename's prefix and extension. This prevents someone's filename from // confusing time.parse. func (l *Logger) timeFromName(filename, prefix, ext string) string { if !strings.HasPrefix(filename, prefix) { return "" } filename = filename[len(prefix):] if !strings.HasSuffix(filename, ext) { return "" } filename = filename[:len(filename)-len(ext)] return filename } // max returns the maximum size in bytes of log files before rolling. func (l *Logger) max() int64 { if l.MaxSize == 0 { return int64(defaultMaxSize * megabyte) } return int64(l.MaxSize) * int64(megabyte) } // dir returns the directory for the current filename. func (l *Logger) dir() string { return filepath.Dir(l.filename()) } // prefixAndExt returns the filename part and extension part from the Logger's // filename. func (l *Logger) prefixAndExt() (prefix, ext string) { filename := filepath.Base(l.filename()) ext = filepath.Ext(filename) prefix = filename[:len(filename)-len(ext)] + "-" return prefix, ext } // logInfo is a convenience struct to return the filename and its embedded // timestamp. type logInfo struct { timestamp time.Time os.FileInfo } // byFormatTime sorts by newest time formatted in the name. type byFormatTime []logInfo func (b byFormatTime) Less(i, j int) bool { return b[i].timestamp.After(b[j].timestamp) } func (b byFormatTime) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b byFormatTime) Len() int { return len(b) } charm-2.1.1/src/gopkg.in/natefinch/lumberjack.v2/linux_test.go0000664000175000017500000000332112672604530023233 0ustar marcomarco// +build linux package lumberjack import ( "os" "syscall" "testing" ) func TestMaintainMode(t *testing.T) { currentTime = fakeTime dir := makeTempDir("TestMaintainMode", t) defer os.RemoveAll(dir) filename := logFile(dir) mode := os.FileMode(0770) f, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, mode) isNil(err, t) f.Close() l := &Logger{ Filename: filename, MaxBackups: 1, MaxSize: 100, // megabytes } defer l.Close() b := []byte("boo!") n, err := l.Write(b) isNil(err, t) equals(len(b), n, t) newFakeTime() err = l.Rotate() isNil(err, t) filename2 := backupFile(dir) info, err := os.Stat(filename) isNil(err, t) info2, err := os.Stat(filename2) isNil(err, t) equals(mode, info.Mode(), t) equals(mode, info2.Mode(), t) } func TestMaintainOwner(t *testing.T) { fakeC := fakeChown{} os_Chown = fakeC.Set os_Stat = fakeStat defer func() { os_Chown = os.Chown os_Stat = os.Stat }() currentTime = fakeTime dir := makeTempDir("TestMaintainOwner", t) defer os.RemoveAll(dir) filename := logFile(dir) l := &Logger{ Filename: filename, MaxBackups: 1, MaxSize: 100, // megabytes } defer l.Close() b := []byte("boo!") n, err := l.Write(b) isNil(err, t) equals(len(b), n, t) newFakeTime() err = l.Rotate() isNil(err, t) equals(555, fakeC.uid, t) equals(666, fakeC.gid, t) } type fakeChown struct { name string uid int gid int } func (f *fakeChown) Set(name string, uid, gid int) error { f.name = name f.uid = uid f.gid = gid return nil } func fakeStat(name string) (os.FileInfo, error) { info, err := os.Stat(name) if err != nil { return info, err } stat := info.Sys().(*syscall.Stat_t) stat.Uid = 555 stat.Gid = 666 return info, nil } charm-2.1.1/src/gopkg.in/natefinch/lumberjack.v2/README.md0000664000175000017500000001311112672604530021763 0ustar marcomarco# lumberjack [![GoDoc](https://godoc.org/gopkg.in/natefinch/lumberjack.v2?status.png)](https://godoc.org/gopkg.in/natefinch/lumberjack.v2) [![Build Status](https://drone.io/github.com/natefinch/lumberjack/status.png)](https://drone.io/github.com/natefinch/lumberjack/latest) [![Build status](https://ci.appveyor.com/api/projects/status/00gchpxtg4gkrt5d)](https://ci.appveyor.com/project/natefinch/lumberjack) [![Coverage Status](https://coveralls.io/repos/natefinch/lumberjack/badge.svg?branch=v2.0)](https://coveralls.io/r/natefinch/lumberjack?branch=v2.0) ### Lumberjack is a Go package for writing logs to rolling files. Package lumberjack provides a rolling logger. Note that this is v2.0 of lumberjack, and should be imported using gopkg.in thusly: import "gopkg.in/natefinch/lumberjack.v2" The package name remains simply lumberjack, and the code resides at https://github.com/natefinch/lumberjack under the v2.0 branch. Lumberjack is intended to be one part of a logging infrastructure. It is not an all-in-one solution, but instead is a pluggable component at the bottom of the logging stack that simply controls the files to which logs are written. Lumberjack plays well with any logging package that can write to an io.Writer, including the standard library's log package. Lumberjack assumes that only one process is writing to the output files. Using the same lumberjack configuration from multiple processes on the same machine will result in improper behavior. **Example** To use lumberjack with the standard library's log package, just pass it into the SetOutput function when your application starts. Code: ```go log.SetOutput(&lumberjack.Logger{ Filename: "/var/log/myapp/foo.log", MaxSize: 500, // megabytes MaxBackups: 3, MaxAge: 28, //days }) ``` ## type Logger ``` go type Logger struct { // Filename is the file to write logs to. Backup log files will be retained // in the same directory. It uses -lumberjack.log in // os.TempDir() if empty. Filename string `json:"filename" yaml:"filename"` // MaxSize is the maximum size in megabytes of the log file before it gets // rotated. It defaults to 100 megabytes. MaxSize int `json:"maxsize" yaml:"maxsize"` // MaxAge is the maximum number of days to retain old log files based on the // timestamp encoded in their filename. Note that a day is defined as 24 // hours and may not exactly correspond to calendar days due to daylight // savings, leap seconds, etc. The default is not to remove old log files // based on age. MaxAge int `json:"maxage" yaml:"maxage"` // MaxBackups is the maximum number of old log files to retain. The default // is to retain all old log files (though MaxAge may still cause them to get // deleted.) MaxBackups int `json:"maxbackups" yaml:"maxbackups"` // LocalTime determines if the time used for formatting the timestamps in // backup files is the computer's local time. The default is to use UTC // time. LocalTime bool `json:"localtime" yaml:"localtime"` // contains filtered or unexported fields } ``` Logger is an io.WriteCloser that writes to the specified filename. Logger opens or creates the logfile on first Write. If the file exists and is less than MaxSize megabytes, lumberjack will open and append to that file. If the file exists and its size is >= MaxSize megabytes, the file is renamed by putting the current time in a timestamp in the name immediately before the file's extension (or the end of the filename if there's no extension). A new log file is then created using original filename. Whenever a write would cause the current log file exceed MaxSize megabytes, the current file is closed, renamed, and a new log file created with the original name. Thus, the filename you give Logger is always the "current" log file. ### Cleaning Up Old Log Files Whenever a new logfile gets created, old log files may be deleted. The most recent files according to the encoded timestamp will be retained, up to a number equal to MaxBackups (or all of them if MaxBackups is 0). Any files with an encoded timestamp older than MaxAge days are deleted, regardless of MaxBackups. Note that the time encoded in the timestamp is the rotation time, which may differ from the last time that file was written to. If MaxBackups and MaxAge are both 0, no old log files will be deleted. ### func (\*Logger) Close ``` go func (l *Logger) Close() error ``` Close implements io.Closer, and closes the current logfile. ### func (\*Logger) Rotate ``` go func (l *Logger) Rotate() error ``` Rotate causes Logger to close the existing log file and immediately create a new one. This is a helper function for applications that want to initiate rotations outside of the normal rotation rules, such as in response to SIGHUP. After rotating, this initiates a cleanup of old log files according to the normal rules. **Example** Example of how to rotate in response to SIGHUP. Code: ```go l := &lumberjack.Logger{} log.SetOutput(l) c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGHUP) go func() { for { <-c l.Rotate() } }() ``` ### func (\*Logger) Write ``` go func (l *Logger) Write(p []byte) (n int, err error) ``` Write implements io.Writer. If a write would cause the log file to be larger than MaxSize, the file is closed, renamed to include a timestamp of the current time, and a new log file is created using the original log file name. If the length of the write is greater than MaxSize, an error is returned. - - - Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) charm-2.1.1/src/gopkg.in/natefinch/lumberjack.v2/testing_test.go0000664000175000017500000000544012672604530023555 0ustar marcomarcopackage lumberjack import ( "fmt" "path/filepath" "reflect" "runtime" "testing" ) // assert will log the given message if condition is false. func assert(condition bool, t testing.TB, msg string, v ...interface{}) { assertUp(condition, t, 1, msg, v...) } // assertUp is like assert, but used inside helper functions, to ensure that // the file and line number reported by failures corresponds to one or more // levels up the stack. func assertUp(condition bool, t testing.TB, caller int, msg string, v ...interface{}) { if !condition { _, file, line, _ := runtime.Caller(caller + 1) v = append([]interface{}{filepath.Base(file), line}, v...) fmt.Printf("%s:%d: "+msg+"\n", v...) t.FailNow() } } // equals tests that the two values are equal according to reflect.DeepEqual. func equals(exp, act interface{}, t testing.TB) { equalsUp(exp, act, t, 1) } // equalsUp is like equals, but used inside helper functions, to ensure that the // file and line number reported by failures corresponds to one or more levels // up the stack. func equalsUp(exp, act interface{}, t testing.TB, caller int) { if !reflect.DeepEqual(exp, act) { _, file, line, _ := runtime.Caller(caller + 1) fmt.Printf("%s:%d: exp: %v (%T), got: %v (%T)\n", filepath.Base(file), line, exp, exp, act, act) t.FailNow() } } // isNil reports a failure if the given value is not nil. Note that values // which cannot be nil will always fail this check. func isNil(obtained interface{}, t testing.TB) { isNilUp(obtained, t, 1) } // isNilUp is like isNil, but used inside helper functions, to ensure that the // file and line number reported by failures corresponds to one or more levels // up the stack. func isNilUp(obtained interface{}, t testing.TB, caller int) { if !_isNil(obtained) { _, file, line, _ := runtime.Caller(caller + 1) fmt.Printf("%s:%d: expected nil, got: %v\n", filepath.Base(file), line, obtained) t.FailNow() } } // notNil reports a failure if the given value is nil. func notNil(obtained interface{}, t testing.TB) { notNilUp(obtained, t, 1) } // notNilUp is like notNil, but used inside helper functions, to ensure that the // file and line number reported by failures corresponds to one or more levels // up the stack. func notNilUp(obtained interface{}, t testing.TB, caller int) { if _isNil(obtained) { _, file, line, _ := runtime.Caller(caller + 1) fmt.Printf("%s:%d: expected non-nil, got: %v\n", filepath.Base(file), line, obtained) t.FailNow() } } // _isNil is a helper function for isNil and notNil, and should not be used // directly. func _isNil(obtained interface{}) bool { if obtained == nil { return true } switch v := reflect.ValueOf(obtained); v.Kind() { case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: return v.IsNil() } return false } charm-2.1.1/src/gopkg.in/natefinch/lumberjack.v2/chown_linux.go0000664000175000017500000000061412672604530023374 0ustar marcomarcopackage lumberjack import ( "os" "syscall" ) // os_Chown is a var so we can mock it out during tests. var os_Chown = os.Chown func chown(name string, info os.FileInfo) error { f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, info.Mode()) if err != nil { return err } f.Close() stat := info.Sys().(*syscall.Stat_t) return os_Chown(name, int(stat.Uid), int(stat.Gid)) } charm-2.1.1/src/gopkg.in/natefinch/lumberjack.v2/chown.go0000664000175000017500000000016212672604530022153 0ustar marcomarco// +build !linux package lumberjack import ( "os" ) func chown(_ string, _ os.FileInfo) error { return nil } charm-2.1.1/src/gopkg.in/natefinch/lumberjack.v2/rotate_test.go0000664000175000017500000000056412672604530023400 0ustar marcomarco// +build linux package lumberjack_test import ( "log" "os" "os/signal" "syscall" "gopkg.in/natefinch/lumberjack.v2" ) // Example of how to rotate in response to SIGHUP. func ExampleLogger_Rotate() { l := &lumberjack.Logger{} log.SetOutput(l) c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGHUP) go func() { for { <-c l.Rotate() } }() } charm-2.1.1/src/gopkg.in/juju/0000775000175000017500000000000012672604601015057 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/0000775000175000017500000000000012672604603021374 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/Makefile0000664000175000017500000000775712672604603023054 0ustar marcomarco# Makefile for the charm store. ifndef GOPATH $(warning You need to set up a GOPATH.) endif PROJECT := gopkg.in/juju/charmstore.v5-unstable PROJECT_DIR := $(shell go list -e -f '{{.Dir}}' $(PROJECT)) GIT_COMMIT := $(shell git rev-parse --verify HEAD) VERSION := $(shell git describe --dirty) ifeq ($(shell uname -p | sed -r 's/.*(x86|armel|armhf).*/golang/'), golang) GO_C := golang INSTALL_FLAGS := else GO_C := gccgo-4.9 gccgo-go INSTALL_FLAGS := -gccgoflags=-static-libgo endif define DEPENDENCIES build-essential bzr juju-mongodb mongodb-server $(GO_C) openjdk-7-jre-headless elasticsearch endef ifeq ($(VERSION),no) VERSIONDEPS := else VERSIONDEPS := version/init.go endif default: build $(GOPATH)/bin/godeps: # godeps needs to be fetched with the insecure flag as launchpad # uses http for part of the checkout process. go get -v -insecure launchpad.net/godeps # Start of GOPATH-dependent targets. Some targets only make sense - # and will only work - when this tree is found on the GOPATH. ifeq ($(CURDIR),$(PROJECT_DIR)) build: $(VERSIONDEPS) go build $(PROJECT)/... check: $(VERSIONDEPS) go test $(PROJECT)/... install: $(VERSIONDEPS) go install $(INSTALL_FLAGS) -v $(PROJECT)/... clean: go clean $(PROJECT)/... else build: $(error Cannot $@; $(CURDIR) is not on GOPATH) check: $(error Cannot $@; $(CURDIR) is not on GOPATH) install: $(error Cannot $@; $(CURDIR) is not on GOPATH) clean: $(error Cannot $@; $(CURDIR) is not on GOPATH) endif # End of GOPATH-dependent targets. # Reformat source files. format: gofmt -w -l . # Reformat and simplify source files. simplify: gofmt -w -l -s . # Run the charmd server. server: install charmd -logging-config INFO cmd/charmd/config.yaml # Update the project Go dependencies to the required revision. deps: $(GOPATH)/bin/godeps $(GOPATH)/bin/godeps -u dependencies.tsv # Generate the dependencies file. create-deps: $(GOPATH)/bin/godeps godeps -t $(shell go list $(PROJECT)/...) > dependencies.tsv || true # Generate version information version/init.go: version/init.go.tmpl FORCE gofmt -r "unknownVersion -> Version{GitCommit: \"${GIT_COMMIT}\", Version: \"${VERSION}\",}" $< > $@ # Install packages required to develop the charm store and run tests. APT_BASED := $(shell command -v apt-get >/dev/null; echo $$?) sysdeps: ifeq ($(APT_BASED),0) ifeq ($(shell lsb_release -cs|sed -r 's/precise|quantal|raring/old/'),old) @echo Adding PPAs for golang and mongodb @sudo apt-add-repository --yes ppa:juju/golang @sudo apt-add-repository --yes ppa:juju/stable endif @echo Installing dependencies [ "x$(apt-key export D88E42B4 2>&1 1>/dev/null)" = "x" ] || { curl -s http://packages.elasticsearch.org/GPG-KEY-elasticsearch | sudo apt-key add -;} repo="http://packages.elasticsearch.org/elasticsearch/1.3/debian" file=/etc/apt/sources.list.d/packages_elasticsearch_org_elasticsearch_1_3_debian.list ; grep "$$repo" $$file || echo "deb $$repo stable main" | sudo tee $$file > /dev/null sudo apt-get update @sudo apt-get --force-yes install $(strip $(DEPENDENCIES)) \ $(shell apt-cache madison juju-mongodb mongodb-server | head -1 | cut -d '|' -f1) else @echo sysdeps runs only on systems with apt-get @echo on OS X with homebrew try: brew install bazaar mongodb elasticsearch endif gopkg: @echo $(PROJECT) help: @echo -e 'Charmstore - list of make targets:\n' @echo 'make - Build the package.' @echo 'make check - Run tests.' @echo 'make install - Install the package.' @echo 'make server - Start the charmd server.' @echo 'make clean - Remove object files from package source directories.' @echo 'make sysdeps - Install the development environment system packages.' @echo 'make deps - Set up the project Go dependencies.' @echo 'make create-deps - Generate the Go dependencies file.' @echo 'make format - Format the source files.' @echo 'make simplify - Format and simplify the source files.' @echo 'make gopkg - Output the current gopkg repository path and version.' .PHONY: build check clean format gopkg help install simplify sysdeps FORCE charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/dependencies.tsv0000664000175000017500000000624012672604603024562 0ustar marcomarcogithub.com/ajstarks/svgo git 89e3ac64b5b3e403a5e7c35ea4f98d45db7b4518 2014-10-04T21:11:59Z github.com/juju/blobstore git 06056004b3d7b54bbb7984d830c537bad00fec21 2015-07-29T11:18:58Z github.com/juju/errors git 1b5e39b83d1835fa480e0c2ddefb040ee82d58b3 2015-09-16T12:56:42Z github.com/juju/gojsonpointer git afe8b77aa08f272b49e01b82de78510c11f61500 2015-02-04T19:46:29Z github.com/juju/gojsonreference git f0d24ac5ee330baa21721cdff56d45e4ee42628e 2015-02-04T19:46:33Z github.com/juju/gojsonschema git e1ad140384f254c82f89450d9a7c8dd38a632838 2015-03-12T17:00:16Z github.com/juju/httpprof git 14bf14c307672fd2456bdbf35d19cf0ccd3cf565 2014-12-17T16:00:36Z github.com/juju/httprequest git 89d547093c45e293599088cc63e805c6f1205dc0 2016-03-02T10:09:58Z github.com/juju/idmclient git 812a86ff450af958df6665839d93590f27961b08 2016-03-16T15:15:55Z github.com/juju/loggo git 8477fc936adf0e382d680310047ca27e128a309a 2015-05-27T03:58:39Z github.com/juju/mempool git 24974d6c264fe5a29716e7d56ea24c4bd904b7cc 2016-02-05T10:49:27Z github.com/juju/names git ef19de31613af3735aa69ba3b40accce2faf7316 2016-03-01T22:07:10Z github.com/juju/schema git 1e25943f8c6fd6815282d6f1ac87091d21e14e19 2016-03-01T11:16:46Z github.com/juju/testing git 4d5a7d64948aae30698f5d97b4c0d1a85a4504b7 2016-03-07T02:41:09Z github.com/juju/txn git 99ec629d0066a4d73c54d8e021a7fc1dc07df614 2015-06-09T16:58:27Z github.com/juju/utils git 0cac78a34dd1c42d2f2dc718c345fd13e3a264fc 2016-01-29T15:50:19Z github.com/juju/version git 102b12db83e38cb2ce7003544092ea7b0ca59e92 2015-11-07T04:32:11Z github.com/juju/webbrowser git 54b8c57083b4afb7dc75da7f13e2967b2606a507 2016-03-09T14:36:29Z github.com/juju/xml git eb759a627588d35166bc505fceb51b88500e291e 2015-04-13T13:11:21Z github.com/juju/zip git f6b1e93fa2e29a1d7d49b566b2b51efb060c982a 2016-02-05T10:52:21Z github.com/julienschmidt/httprouter git 77a895ad01ebc98a4dc95d8355bc825ce80a56f6 2015-10-13T22:55:20Z golang.org/x/crypto git aedad9a179ec1ea11b7064c57cbc6dc30d7724ec 2015-08-30T18:06:42Z golang.org/x/net git ea47fc708ee3e20177f3ca3716217c4ab75942cb 2015-08-29T23:03:18Z gopkg.in/check.v1 git 4f90aeace3a26ad7021961c297b22c42160c7b25 2016-01-05T16:49:36Z gopkg.in/errgo.v1 git 66cb46252b94c1f3d65646f54ee8043ab38d766c 2015-10-07T15:31:57Z gopkg.in/juju/charm.v6-unstable git 4ad4f4ba39affe67785e385c1f49e7ec4206896f 2016-03-09T11:06:06Z gopkg.in/juju/charmrepo.v2-unstable git 0e86a44c1b8c4bfe3b5c9227a3223fc4c0e947fb 2016-03-16T15:33:12Z gopkg.in/juju/jujusvg.v1 git a60359df348ef2ca40ec3bcd58a01de54f05658e 2016-02-11T10:02:50Z gopkg.in/macaroon-bakery.v1 git 6bce7a1e7399542cbafe16cbbb1dfe4591fcafe7 2016-03-16T08:34:47Z gopkg.in/macaroon.v1 git ab3940c6c16510a850e1c2dd628b919f0f3f1464 2015-01-21T11:42:31Z gopkg.in/mgo.v2 git 4d04138ffef2791c479c0c8bbffc30b34081b8d9 2015-10-26T16:34:53Z gopkg.in/natefinch/lumberjack.v2 git 514cbda263a734ae8caac038dadf05f8f3f9f738 2016-01-25T11:17:49Z gopkg.in/tomb.v2 git 14b3d72120e8d10ea6e6b7f87f7175734b1faab8 2014-06-26T14:46:23Z gopkg.in/yaml.v1 git 9f9df34309c04878acc86042b16630b0f696e1de 2014-09-24T16:16:07Z gopkg.in/yaml.v2 git 53feefa2559fb8dfa8d81baad31be332c97d6c77 2015-09-24T14:23:14Z launchpad.net/tomb bzr gustavo@niemeyer.net-20140529072043-hzcrlnl3ygvg914q 18 charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/version/0000775000175000017500000000000012672604603023061 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/version/version.go0000664000175000017500000000132212672604603025073 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package version // Version describes the current version of the code being run. type Version struct { GitCommit string Version string } // VersionInfo is a variable representing the version of the currently // executing code. Builds of the system where the version information // is required must arrange to provide the correct values for this // variable. One possible way to do this is to create an init() function // that updates this variable, please see init.go.tmpl to see an example. var VersionInfo = unknownVersion var unknownVersion = Version{ GitCommit: "unknown git commit", Version: "unknown version", } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/version/init.go.tmpl0000664000175000017500000000066312672604603025333 0ustar marcomarcopackage version // init is used to update the version info with the correct details for // the current build. It is expected that an appropriate build script // or Makefile will create a new init.go file based on this template // using a command like the following: // // gofmt -r "unknownVersion -> Version{GitCommit: \"${GIT_COMMIT}\", Version: \"${VERSION}\",}" init.go.tmpl > init.go func init() { VersionInfo = unknownVersion } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/LICENSE0000664000175000017500000010333012672604603022401 0ustar marcomarco GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Remote Network Interaction; Use with the GNU General Public License. Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see . charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/config/0000775000175000017500000000000012672604603022641 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/config/config_test.go0000664000175000017500000000651012672604603025476 0ustar marcomarco// Copyright 2012, 2013, 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package config_test // import "gopkg.in/juju/charmstore.v5-unstable/config" import ( "io/ioutil" "path" "testing" "time" jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/juju/charmstore.v5-unstable/config" ) func TestPackage(t *testing.T) { gc.TestingT(t) } type ConfigSuite struct { jujutesting.IsolationSuite } var _ = gc.Suite(&ConfigSuite{}) const testConfig = ` audit-log-file: /var/log/charmstore/audit.log audit-log-max-size: 500 audit-log-max-age: 1 mongo-url: localhost:23456 api-addr: blah:2324 foo: 1 bar: false auth-username: myuser auth-password: mypasswd identity-location: localhost:18082 identity-public-key: +qNbDWly3kRTDVv2UN03hrv/CBt4W6nxY5dHdw+KJFA= identity-api-url: "http://example.com/identity" terms-public-key: +qNbDWly3kRTDVv2UN03hrv/CBt4W6nxY5dHdw+KJFB= terms-location: localhost:8092 agent-username: agentuser agent-key: private: lsvcDkapKoFxIyjX9/eQgb3s41KVwPMISFwAJdVCZ70= public: +qNbDWly3kRTDVv2UN03hrv/CBt4W6nxY5dHdw+KJFA= stats-cache-max-age: 1h search-cache-max-age: 15m request-timeout: 500ms max-mgo-sessions: 10 ` func (s *ConfigSuite) readConfig(c *gc.C, content string) (*config.Config, error) { // Write the configuration content to file. path := path.Join(c.MkDir(), "charmd.conf") err := ioutil.WriteFile(path, []byte(content), 0666) c.Assert(err, gc.IsNil) // Read the configuration. return config.Read(path) } func (s *ConfigSuite) TestRead(c *gc.C) { conf, err := s.readConfig(c, testConfig) c.Assert(err, gc.IsNil) c.Assert(conf, jc.DeepEquals, &config.Config{ AuditLogFile: "/var/log/charmstore/audit.log", AuditLogMaxAge: 1, AuditLogMaxSize: 500, MongoURL: "localhost:23456", APIAddr: "blah:2324", AuthUsername: "myuser", AuthPassword: "mypasswd", IdentityLocation: "localhost:18082", IdentityPublicKey: &bakery.PublicKey{ Key: mustParseKey("+qNbDWly3kRTDVv2UN03hrv/CBt4W6nxY5dHdw+KJFA="), }, IdentityAPIURL: "http://example.com/identity", TermsLocation: "localhost:8092", TermsPublicKey: &bakery.PublicKey{ Key: mustParseKey("+qNbDWly3kRTDVv2UN03hrv/CBt4W6nxY5dHdw+KJFB="), }, AgentUsername: "agentuser", AgentKey: &bakery.KeyPair{ Public: bakery.PublicKey{ Key: mustParseKey("+qNbDWly3kRTDVv2UN03hrv/CBt4W6nxY5dHdw+KJFA="), }, Private: bakery.PrivateKey{ mustParseKey("lsvcDkapKoFxIyjX9/eQgb3s41KVwPMISFwAJdVCZ70="), }, }, StatsCacheMaxAge: config.DurationString{time.Hour}, RequestTimeout: config.DurationString{500 * time.Millisecond}, MaxMgoSessions: 10, SearchCacheMaxAge: config.DurationString{15 * time.Minute}, }) } func (s *ConfigSuite) TestReadConfigError(c *gc.C) { cfg, err := config.Read(path.Join(c.MkDir(), "charmd.conf")) c.Assert(err, gc.ErrorMatches, ".* no such file or directory") c.Assert(cfg, gc.IsNil) } func (s *ConfigSuite) TestValidateConfigError(c *gc.C) { cfg, err := s.readConfig(c, "") c.Assert(err, gc.ErrorMatches, "missing fields mongo-url, api-addr, auth-username, auth-password in config file") c.Assert(cfg, gc.IsNil) } func mustParseKey(s string) bakery.Key { var k bakery.Key err := k.UnmarshalText([]byte(s)) if err != nil { panic(err) } return k } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/config/config.go0000664000175000017500000000651212672604603024441 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. // The config package defines configuration parameters for // the charm store. package config // import "gopkg.in/juju/charmstore.v5-unstable/config" import ( "fmt" "io/ioutil" "os" "strings" "time" "gopkg.in/errgo.v1" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/yaml.v2" ) type Config struct { // TODO(rog) rename this to MongoAddr - it's not a URL. MongoURL string `yaml:"mongo-url,omitempty"` AuditLogFile string `yaml:"audit-log-file,omitempty"` AuditLogMaxSize int `yaml:"audit-log-max-size,omitempty"` AuditLogMaxAge int `yaml:"audit-log-max-age,omitempty"` APIAddr string `yaml:"api-addr,omitempty"` AuthUsername string `yaml:"auth-username,omitempty"` AuthPassword string `yaml:"auth-password,omitempty"` ESAddr string `yaml:"elasticsearch-addr,omitempty"` // elasticsearch is optional IdentityPublicKey *bakery.PublicKey `yaml:"identity-public-key,omitempty"` IdentityLocation string `yaml:"identity-location"` TermsPublicKey *bakery.PublicKey `yaml:"terms-public-key,omitempty"` TermsLocation string `yaml:"terms-location,omitempty"` // The identity API is optional IdentityAPIURL string `yaml:"identity-api-url,omitempty"` AgentUsername string `yaml:"agent-username,omitempty"` AgentKey *bakery.KeyPair `yaml:"agent-key,omitempty"` MaxMgoSessions int `yaml:"max-mgo-sessions,omitempty"` RequestTimeout DurationString `yaml:"request-timeout,omitempty"` StatsCacheMaxAge DurationString `yaml:"stats-cache-max-age,omitempty"` SearchCacheMaxAge DurationString `yaml:"search-cache-max-age,omitempty"` Database string `yaml:"database,omitempty"` } func (c *Config) validate() error { var missing []string if c.MongoURL == "" { missing = append(missing, "mongo-url") } if c.APIAddr == "" { missing = append(missing, "api-addr") } if c.AuthUsername == "" { missing = append(missing, "auth-username") } if strings.Contains(c.AuthUsername, ":") { return fmt.Errorf("invalid user name %q (contains ':')", c.AuthUsername) } if c.AuthPassword == "" { missing = append(missing, "auth-password") } if len(missing) != 0 { return fmt.Errorf("missing fields %s in config file", strings.Join(missing, ", ")) } return nil } // Read reads a charm store configuration file from the // given path. func Read(path string) (*Config, error) { f, err := os.Open(path) if err != nil { return nil, errgo.Notef(err, "cannot open config file") } defer f.Close() data, err := ioutil.ReadAll(f) if err != nil { return nil, errgo.Notef(err, "cannot read %q", path) } var conf Config err = yaml.Unmarshal(data, &conf) if err != nil { return nil, errgo.Notef(err, "cannot parse %q", path) } if err := conf.validate(); err != nil { return nil, errgo.Mask(err) } return &conf, nil } // DurationString holds a duration that marshals and // unmarshals as a friendly string. type DurationString struct { time.Duration } func (dp *DurationString) UnmarshalText(data []byte) error { d, err := time.ParseDuration(string(data)) if err != nil { return errgo.Mask(err) } dp.Duration = d return nil } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/docs/0000775000175000017500000000000012672604603022324 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/docs/bundles.md0000664000175000017500000001313612672604603024306 0ustar marcomarco# Bundles in The Charmstore The charmstore allows two versions of bundle specifications, as described by github.com/juju/charm. The versions are numbered 3 and 4, relating to the API version under which they can be hosted: charmworld (API v3) supports only version 3 bundles, charmstore (API v4) supports version 3 and version 4. ## Version 3 bundles Version 3 bundles are currently existing bundles that specify a deployment as a list of services and, optionally, relations. The charmstore will not support the idea of a "basket" or multiple bundles within one file. However, existing baskets will still be imported, and split up into their component bundles. ## Version 4 bundles Version 4 bundles are identical to version 3 bundles except for a few key differences: the `branch` attribute of the service spec is no longer supported, they may contain a machine specification, and their deployment directives are different from version 3 bundles. ### Deploying version 4 bundles Because version 4 bundles are not yet idempotent (i.e.: if a machine fails to come up, running the bundle again will recreate all machines in the machine spec), the juju deployer pessimistically assumes that a bundle is a version 4 bundle *only* if it has a machine spec. This means that a bundle without a machine spec must use the version 3 style of placement directives listed below until further notice, when the deployer is updated. This does not affect version 4 bundle support within the charmstore (that is, the machine spec is still optional). The Juju GUI does not yet support version 4 bundles as of version 1.3.4, as the GUI charm contains an older version of the deployer. ### Machine Specifications A machine specification identifies a machine that will be created in the Juju environment. These machines are named with an integer, and can have any of three optional attributes: * *constraints* - Constraints are specified as a string as described by the Juju constraints flag (see `juju help constraints` for more information). * *annotations* - Annotations, provided as key-value pairs, are additional information that is tacked onto the machine within the Juju state server. These can be used for marking machines for your own use, or for use by Juju clients. * *series* - You may optionally specify the series of the machine to be created (e.g.: "precise" or "trusty"). If you do not specify a series, the bundle series will be used. Machines are specified under the `machines` top-level attribute. ### Deployment directives Version 4 deployment directives (the `to` attribute on the service spec) is a YAML list of items following the format: (:)?(||new) If containertype is specified, the unit is deployed into a new container of that type, otherwise it will be "hulk-smashed" into the specified location, by co-locating it with any other units that happen to be there, which may result in unintended behavior. The second part (after the colon) specifies where the new unit should be placed; it may refer to a unit of another service specified in the bundle, a machine id specified in the machines section, or the special name "new" which specifies a newly created machine. A unit placement may be specified with a service name only, in which case its unit number is assumed to be one more than the unit number of the previous unit in the list with the same service, or zero if there were none. If there are less elements in To than NumUnits, the last element is replicated to fill it. If there are no elements (or To is omitted), "new" is replicated. For example: wordpress/0 wordpress/1 lxc:0 kvm:new specifies that the first two units get hulk-smashed onto the first two units of the wordpress service, the third unit gets allocated onto an lxc container on machine 0, and subsequent units get allocated on kvm containers on new machines. The above example is the same as this: wordpress wordpress lxc:0 kvm:new Version 3 placement directives take the format: ((:)?(=)?|0) meaning that a machine cannot be specified beyond colocating (either through a container or hulk-smash) along with a specified unit of another service. Version 3 placement directives may be either a string of a single directive or a YAML list of directives in the above format. The only machine that may be specified is machine 0, allowing colocation on the bootstrap node. ## Example Bundles ### Version 3 ```yaml series: precise services: nova-compute: charm: cs:precise/nova-compute units: 3 ceph: units: 3 to: [nova-compute, nova-compute] mysql: to: 0 quantum: units: 4 to: ["lxc:nova-compute", "lxc:nova-compute", "lxc:nova-compute", "lxc:nova-compute"] verity: to: lxc:nova-compute=2 semper: to: nova-compute=2 lxc-service: num_units: 5 to: [ "lxc:nova-compute=1", "lxc:nova-compute=2", "lxc:nova-compute=0", "lxc:nova-compute=0", "lxc:nova-compute=2" ] ``` ### Version 4 ```yaml series: precise services: # Automatically place nova-compute: charm: cs:precise/nova-compute units: 3 # Specify containers ceph: units: 3 to: # Specify a unit - lxc:nova-compute/0 # Specify a machine - lxc:1 # Create a new machine, deploy to container on that machine. - lxc:new # Specify a machine mysql: to: - 0 # Specify colocation quantum: units: 4 to: - ceph/1 # Assume first unit - nova-compute # Repeats previous directive to fill out placements machines: 1: constraints: "mem=16G arch=amd64" annotations: foo: bar series: precise ``` charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/docs/API.md0000664000175000017500000016243112672604603023266 0ustar marcomarco# Charm store API The current live API lives at https://api.jujucharms.com/charmstore/v4 ## Intro The charm store stores and indexes charms and bundles. A charm or bundle is referred to by a charm store id which can take one of the following two forms: * ~*owner*/*series*/*name*(-*revision*) * *series*/*name*(-*revision*) *Owner* is the name of the user that owns the charm. *Series* is one of a small number of known possible series for charms (currently just the Ubuntu series names) or the special name bundle to signify that the charm id refers to a charm bundle. A charm store id referring to a charm (not a bundle) can also use one of the following two forms, omitting the series: * ~*owner*/*name*(-*revision*) * *name*(-*revision*) In this case the store will look at all charms with the same *owner* and *name*, and choose one according to its preference (for example, it currently prefers the latest LTS series). ### Data format All endpoints that do not produce binary data produce a single JSON object as their result. These will be described in terms of the Go types that produce and consume the format, along with an example. A charm id is represented as a `charm.URL type`. ### Errors If any request returns an error, it will produce it in the following form: ```go type Error struct { Message string Code string Info map[string] Error `json:",omitempty"` } ``` Example: ```json { "Message": "unexpected Content-Type \"image/jpeg\"; expected \"application/json\"", "Code": "bad request" } ``` Note: this format is compatible with the error results used by juju-core. Currently defined codes are the following: * not found * metadata not found * forbidden * bad request * duplicate upload * multiple errors * unauthorized * method not allowed The `Info` field is set when a request returns a "multiple errors" error code; currently the only two endpoints that can are "/meta" and "*id*/meta/any". Each element in `Info` corresponds to an element in the PUT request, and holds the error for that element. See those endpoints for examples. ### Bulk requests and missing metadata There are two forms of "bulk" API request that can return information about several items at once. The `/meta/any` endpoint (along with some others) have a set of "include" flags that specify metadata to return. The `/meta` endpoint has a set of "id" flags that specify a set of ids to return data on. In both of these cases, when the relevant data does not exist, the result will be omitted from the returned map. For example a GET of `/meta/archive-size?id=something` will return an empty map if the id "something" is not found; a GET of `/precise/wordpress-34/meta/any?include=bundle-metadata` will return an empty map if the id "precise/wordpress-34" refers to a bundle rather than a charm. For the singular forms of these endpoints, a 404 "metadata not found" error will be returned when this happens. In the `meta/any` GET bulk request, if some data requires authorization, the default behavior is to return an authorization required response. Clients interested in public data only can include a `ignore-auth=1` query so that only public information is returned. In this case, results requiring authorization (if any) will be omitted. ### Channels Any entity in the charm store is considered to be part of one or more "channels" (think "distribution channels"). Currently supported channels are "unpublished", "development" and "stable". All entities are initially (and always) part of the "unpublished" channel; subsequent operations on the publish endpoint can make entities available in other channels. All requests that take one or more entity ids as parameters accept a "channel" query parameter that influences what channel is chosen to resolve the ids. The default channel is "stable". For example, if wordpress-3 has just been published to the stable channel, and wordpress-4 has been published to the development then a GET of wordpress/meta/id-revision?channel=development will return {"Revision": 4} and a GET of wordpress/wordpress/meta/id-revision will return {"Revision": 3} because the default channel is "stable". ### Versioning The version of the API is indicated by an initial "vN" prefix to the path. Later versions will increment this number. This also means we can potentially serve backwardly compatible paths to juju-core. All paths in this document should be read as if they had a "v4" prefix. For example, the `wordpress/meta/charm-metadata` path is actually at `v4/wordpress/meta/charm-metadata`. ### Boolean values Where a flag specifies a boolean property, the value must be either "1", signifying true, or empty or "0", signifying false. ## Requests ### Expand-id #### GET *id*/expand-id The expand-id path expands a general id into a set of specific ids. It strips any revision number and series from id, and returns a slice of all the possible ids matched by that, including all the versions and series. If *id* is in the development channel, all development and non-development revisions will be returned; if it is not, then only non-development revisions will be returned. ```go []Id type Id struct { Id string } ``` Example: `GET wordpress/expand-id` ```json [ {"Id": "trusty/wordpress-2"} {"Id": "trusty/wordpress-1"}, {"Id": "precise/wordpress-2"}, {"Id": "precise/wordpress-1"}, ] ``` Example: `GET precise/wordpress-34/expand-id` ```json [ {"Id": "trusty/wordpress-2"} {"Id": "trusty/wordpress-1"}, {"Id": "precise/wordpress-2"}, {"Id": "precise/wordpress-1"}, ] ``` Example: `GET development/precise/wordpress-34/expand-id` ```json [ {"Id": "development/trusty/wordpress-3"}, {"Id": "trusty/wordpress-2"}, {"Id": "trusty/wordpress-1"}, {"Id": "precise/wordpress-2"}, {"Id": "precise/wordpress-1"}, ] ``` ### Archive #### GET *id*/archive The `/archive` path returns the raw archive zip file for the charm with the given charm id. The response header includes the SHA 384 hash of the archive (Content-Sha384) and the fully qualified entity id (Entity-Id). Example: `GET wordpress/archive` Any additional elements attached to the `/charm` path retrieve the file from the charm or bundle's zip file. The `Content-Sha384` header field in the response will hold the hash checksum of the archive. #### GET *id*/archive/*path* Retrieve a file corresponding to *path* in the charm or bundle's zip archive. Example: `GET trusty/wordpress/archive/config.yaml` #### POST *id*/archive This uploads the given charm or bundle in zip format.
POST id/archive?hash=sha384hash
The id specified must specify the series and must not contain a revision number. The hash flag must specify the SHA384 hash of the uploaded archive in hexadecimal format. If the same content has already been uploaded, the response will return immediately without reading the entire body. The charm or bundle is verified before being made available. The response holds the full charm/bundle id including the revision number. ```go type UploadedId struct { Id string } ``` Example response body: ```json { "Id": "precise/wordpress-24" } ``` #### DELETE *id*/archive This deletes the given charm or bundle with the given id. If the ID is not fully specified, the charm series or revisions are not resolved and the charm is not deleted. In order to delete the charm, the ID must include series as well as revisions. In order to delete all versions of the charm, use `/expand-id` and iterate on all elements in the result. ### Visual diagram #### GET *id*/diagram.svg This returns a scalable vector-graphics image representing the entity with the given id. This will return a not-found error for charms. #### GET *id*/icon.svg This returns the SVG image of the charm's icon. This reports a not-found error for bundles. Unlike the `archive/icon.svg` where 404 is returned in case an icon does not exist, this endpoint returns the default icon. #### GET *id*/readme This returns the README. ### Promulgation #### PUT *id*/promulgate A PUT to ~*user*/*anyseries*/*name*-*anyrevision* sets whether entities with the id *x*/*name* are considered to be aliases for ~*user*/*x*/*name* for all series *x*. The series and revision in the id are ignored (except that an entity must exist that matches the id). If Promulgate is true, it means that any new charms published to ~*user*/*x*/*name* will also be given the alias *x*/*name*. The latest revision for all ids ~*user*/*anyseries*/*name* will also be aliased likewise. If Promulgate is false, any new charms published to ~*user*/*anyseries*/*name* will not be given a promulgated alias, but no change is made to any existing aliases. The promulgated status can be retrieved from the promulgated meta endpoint. ```go type PromulgateRequest struct { Promulgate bool } ``` Example: `PUT ~charmers/precise/wordpress-23/promulgate` Request body: ```json { "Promulgate" : true, } ``` ### Charm and bundle publishing #### PUT *id*/publish A PUT to the publish endpoint publishes the entity with the given id on the channels provided in the request body. It reports an error if there are no channels specified or if one of the channels is invalid (the "unpublished" channel is special and is also considered invalid in a publish request). See the section on Channels in the introduction for how the published channels affects id resolving. ```go type PublishRequest struct { Channels []string } ``` On success, the response body will be empty. Example: `PUT ~charmers/trusty/django-42/publish` Request body: ```json { "Channels" : ["stable"], } ``` After the above request, ~charmers/trusty/django will resolve to ~charmers/trusty/django-42 unless a different channel is specified in the request. ### Stats #### GET stats/counter/... This endpoint can be used to retrieve stats related to entities.
GET stats/counter/key[:key]...?[by=unit]&start=date][&stop=date][&list=1]
The stats path allows the retrieval of counts of operations in a general way. A statistic is composed of an ordered tuple of keys:
kind:series:name:user
Operations on the store increment counts associated with a specific tuple, determined by the operation and the charm being operated on. When querying statistics, it is possible to aggregate statistics by using a `\*` as the last tuple element, standing for all tuples with the given prefix. For example, `missing:\*` will retrieve the counts for all operations of kind "missing", regardless of the series, name or user. If the list flag is specified, counts for all next level keys will be listed. For example, a query for `stats/counter/download:*?list=1&by=week` will show all the download counts for each series for each week. If a date range is specified, the returned counts will be restricted to the given date range. Dates are specified in the form "yyyy-mm-dd". If the `by` flag is specified, one count is shown for each unit in the specified period, where unit can be `week` or `day`. Possible kinds are: * archive-download * archive-delete * archive-upload * archive-failed-upload ```go []Statistic type Statistic struct { Key string `json:",omitempty"` Date string `json:",omitempty"` Count int64 } ``` Example: `GET "stats/counter/missing:trusty:*"` ```json [ {"Count": 1917} ] ``` Example: `GET stats/counter/download/archive-download:*?by=week&list=1&start=2014-03-01` ```json [ { "Key": "charm-bundle:precise:*", "Date": "2014-06-08", "Count": 2715 }, { "Key": "charm-bundle:trusty:*", "Date": "2014-06-08", "Count": 2672 }, { "Key": "charm-bundle:oneiric:*", "Date": "2014-06-08", "Count": 14 }, { "Key": "charm-bundle:quantal:*", "Date": "2014-06-08", "Count": 1 }, { "Key": "charm-bundle:trusty:*", "Date": "2014-06-15", "Count": 3835 }, { "Key": "charm-bundle:precise:*", "Date": "2014-06-15", "Count": 3389 } ] ``` **Update**: We need to provide aggregated stats for downloads: * promulgated and ~user counterpart charms should have the same download stats. #### PUT stats/update This endpoint can be used to increase the stats related to an entity. This will increase the download stats by one for the entity provided and at the time stamp provided. It can for future purpose include the client issuing the requests. This is used when charmstore is in front of a cache server that will not call the real /archive endpoint and as such will not increase the download counts.
PUT stats/counter
Request body: ```go type StatsUpdateRequest struct { Timestamp time.Time Type string CharmReference *charm.URL } ``` Example: `PUT stats/update` Request body: ```json { "Timestamp":"2015-08-06T06:46:13Z", "Type":"deploy", "CharmReference":"cs:~charmers/utopic/wordpress-42" } ``` ### Meta #### GET meta The meta path returns an array of all the path names under meta, excluding the `meta/any` path, as suitable for passing as "include=" flags to paths that allow those. Note that the result does not include sub-paths of extra-info because these vary according to each charm or bundle. Example: `GET /meta` ```json [ "archive-size", "archive-upload-time", "bundle-machine-count", "bundle-metadata", "bundle-unit-count", "bundles-containing", "charm-actions", "charm-config", "charm-metadata", "charm-related", "extra-info", "hash", "hash256", "id", "id-name", "id-revision", "id-series", "id-user", "manifest", "promulgated", "published", "revision-info", "stats", "supported-series", "tags" ] ``` #### GET meta/*endpoint* This endpoint allows a user to query any number of IDs for metadata.
GET meta/endpoint?id=id0[&id=id1...][otherflags]
This call is equivalent to calling "*id*/meta" for each id separately. The result holds an element for each id in the request with the resulting metadata exactly as returned by "GET *id*/meta/*endpoint*[?*otherflags*]". The map keys are the ids exactly as specified in the request, although they are resolved to fill in series and revision as usual when fetching the metadata. Any ids that are not found, or with non-relevant metadata, will be omitted. ```go map[string] interface{} ``` Example: `GET meta/archive-size?id=wordpress&id=mysql` ```json { "wordpress": { "Size": 1234 }, "mysql" : { "Size": 4321 } } ``` Example: `GET /meta/any?include=archive-size&include=extra-info/featured&id=wordpress&id=mysql` ```json { "wordpress": { "Id": "precise/wordpress-3", "Meta": { "archive-size": { "Size": 1234 }, "extra-info/featured": true } }, "mysql" : { "Id": "precise/mysql-23", "Meta": { "archive-size": { "Size": 4321 }, "extra-info/featured": true } } } ``` #### PUT meta/*endpoint* A PUT to this endpoint allows the metadata endpoint of several ids to be updated. The request body is as specified in the result of the above GET request. The ids in the body specify the ids that will be updated. If there is a failure, the error code will be "multiple errors", and the Info field will holds one entry for each id in the request body that failed, holding the error for that id. If there are no errors, PUT endpoints usually return an empty body in the response. Example: `PUT meta/extra-info/featured` Request body: ```json { "precise/wordpress-23" : true, "precise/mysql-53" : true, "precise/wordpress-22" : false, } ``` Example: `PUT meta/any` Request body: ```json { "precise/wordpress-23": { "Meta": { "extra-info/featured": true, "extra-info/revision-info": "12dfede4ee23", "bad-metaname": 3235 } }, "trusty/mysql-23": { "Meta": { "extra-info/featured": false, } } } ``` Response body (with HTTP status 500): ```json { "Message": "multiple errors (1) found", "Code": "multiple errors", "Info": { "precise/wordpress-23": { "Message": "multiple errors", "Code": "multiple errors", "Info": { "bad-metaname": { "Message": "metadata not found", "Code": "not found" } } } } } ``` If the request succeeds, a 200 OK status code is returned with an empty response body. #### GET *id*/meta This path returns the same information as the meta path. The results are the same regardless of the actual id. Example: `GET foo/meta` ```json [ "archive-size", "archive-upload-time", "bundle-machine-count", "bundle-metadata", "bundle-unit-count", "bundles-containing", "charm-actions", "charm-config", "charm-metadata", "charm-related", "extra-info", "id", "id-name", "id-revision", "id-series", "id-user", "manifest", "promulgated", "revision-info", "stats", "tags" ] ``` #### GET *id*/meta/any
GET id/meta/any?[include=meta[&include=meta...]]
The `meta/any` path returns requested metadata information on the given id. If the id is non-specific, the latest revision and preferred series for the id will be assumed. Other metadata can be requested by specifying one or more `include` flags. The value of each meta must be the name of one of the path elements defined under the `/meta` path (for example: `charm-config`, `charm-meta`, `manifest`) and causes the desired metadata to be included in the Meta field, keyed by meta. If there is no metadata for the given meta path, the element will be omitted (for example, if bundle-specific data is requested for a charm id). The `any` path may not itself be the subject of an include directive. It is allowed to specify "charm-" or "bundle-"" specific metadata paths -- if the id refers to a charm then bundle-specific metadata will be omitted and vice versa. Various other paths use the same `include` mechanism to allow retrieval of arbitrary metadata. ```go type Meta struct { Id string `json:",omitempty"` Meta map[string] interface{} `json:",omitempty"` } ``` Example: `GET wordpress/meta/any` ```json { "Id": "trusty/wordpress-32" } ``` Example: `GET ubuntu/meta/any?include=archive-size&include=extra-info/featured` ```json { "Id": "trusty/ubuntu-3", "Meta": { "archive-size": { "Size": 7580 }, "extra-info/featured": true } } ``` #### PUT *id*/meta/any This endpoint allows the updating of several metadata elements at once. These must support PUT requests. The body of the PUT request is in the same form as returned by the above GET request, except with the Id field omitted. The elements inside the Meta field specify which meta endpoints will be updated. If one or more of the update fails, the resulting error will contain an Info field that has an entry for each update that fails, keyed by the endpoint name. Example: `PUT ubuntu/meta/any` Request body: ```json { "Meta": { "extra-info": { "revision-info": "a46f45649f0d0e0b" }, "extra-info/featured": true } } ``` Example: `PUT ubuntu/meta/any` Request body: ```json { "Meta": { "extra-info/featured": false, "archive-size": 12354, } } ``` Response body: ```json { "Message": "multiple errors", "Code": "multiple errors", "Info": { "archive-size": { "Message": "method not allowed", "Code": "bad request", } } } ``` #### GET *id*/meta/charm-metadata The `/meta/charm.metadata` path returns the contents of the charm metadata file for a charm. The id must refer to a charm, not a bundle. ```go type CharmMetadata struct { Summary string Description string Subordinate bool `json:",omitempty"` // Provides and Requires map from the relation name to // information about the relation. Provides map[string]Relation `json:",omitempty"` Requires map[string]Relation `json:",omitempty"` Peers map[string]Relation `json:",omitempty"` Tags []string `json:",omitempty"` } type Relation struct { Interface string Optional bool `json:",omitempty"` Limit int `json:",omitempty"` Scope RelationScope } type RelationRole string type RelationScope string ``` The possible values of a `RelationScope` are * global * container Example: `GET wordpress/meta/charm-metadata` ```json { "Summary": "WordPress is a full featured web blogging tool, this charm deploys it.", "Description": "This will install and setup WordPress optimized to run in the cloud. This install, in particular, will \n place Ngnix and php-fpm configured to scale horizontally with Nginx's reverse proxy\n", "Provides": { "website": { "Interface": "http", "Scope": "global" } }, "Requires": { "cache": { "Interface": "cache", "Scope": "global" }, "db": { "Interface": "db", "Scope": "global" } }, "Peers": { "loadbalancer": { "Interface": "reversenginx", "Scope": "global" } }, "Tags": [ "applications" ] } ``` #### GET *id*/meta/bundle-metadata The `meta/bundle-metadata` path returns the contents of the bundle metadata file for a bundle. The id must refer to a bundle, not a charm. ```go type BundleData struct { Services map[string] ServiceSpec Machines map[string] MachineSpec `json:",omitempty"` Series string `json:",omitempty"` Relations [][]string `json:",omitempty"` } type MachineSpec struct { Constraints string `json:",omitempty"` Annotations map[string]string `json:",omitempty"` } type ServiceSpec struct { Charm string NumUnits int To []string `json:",omitempty"` // Options holds the configuration values // to apply to the new service. They should // be compatible with the charm configuration. Options map[string]interface{} `json:",omitempty"` Annotations map[string]string `json:",omitempty"` Constraints string `json:",omitempty"` } ``` Example: `GET mediawiki/meta/bundle-metadata` ```json { "Services": { "mediawiki": { "Charm": "cs:precise/mediawiki-10", "NumUnits": 1, "Options": { "debug": false, "name": "Please set name of wiki", "skin": "vector" }, "Annotations": { "gui-x": "619", "gui-y": "-128" } }, "memcached": { "Charm": "cs:precise/memcached-7", "NumUnits": 1, "Options": { "connection_limit": "global", "factor": 1.25 }, "Annotations": { "gui-x": "926", "gui-y": "-125" } } }, "Relations": [ [ "mediawiki:cache", "memcached:cache" ] ] } ``` #### GET *id*/meta/bundle-unit-count The `meta/bundle-unit-count` path returns a count of all the units that will be created by a bundle. The id must refer to a bundle, not a charm. ```go type BundleCount struct { Count int } ``` Example: `GET bundle/mediawiki/meta/bundle-unit-count` ```json { "Count": 1 } ``` #### GET *id*/meta/bundle-machine-count The `meta/bundle-machine-count` path returns a count of all the machines used by a bundle. The id must refer to a bundle, not a charm. ```go type BundleCount struct { Count int } ``` Example: `GET bundle/mediawiki/meta/bundle-machine-count` ```json { "Count": 2 } ``` #### GET *id*/meta/manifest The `meta/manifest` path returns the list of all files in the bundle or charm's archive. ```go []ManifestFile type ManifestFile struct { Name string Size int64 } ``` Example: `GET trusty/juju-gui-3/meta/manifest` ```json [ { "Name": "config.yaml", "Size": 8254 }, { "Name": "HACKING.md", "Size": 11376 }, { "Name": "Makefile", "Size": 3304 }, { "Name": "metadata.yaml", "Size": 1110 }, { "Name": "README.md", "Size": 9243 }, { "Name": "hooks/config-changed", "Size": 1636 }, { "Name": "hooks/install", "Size": 3055 }, { "Name": "hooks/start", "Size": 1101 }, { "Name": "hooks/stop", "Size": 1053 } ] ``` #### GET *id*/meta/charm-actions The `meta/charm-actions` path returns the actions available in a charm as stored in its `actions.yaml` file. Id must refer to a charm, not a bundle. ```go type Actions struct { Actions map[string]ActionSpec `json:",omitempty"` } type ActionSpec struct { Description string Params JSONSchema } ``` The Params field holds a JSON schema specification of an action's parameters. See [http://json-schema.org/latest/json-schema-core.html](http://json-schema.org/latest/json-schema-core.html). Example: `GET wordpress/meta/charm-actions` ```json { "Actions": { "backup": { "Description": "back up the charm", "Params": { "properties": { "destination-host": { "type": "string" }, "destination-name": { "type": "string" } }, "required": [ "destination-host" ], "type": "object" } } } } ``` #### GET *id*/meta/charm-config The `meta/charm-config` path returns the charm's configuration specification as stored in its `config.yaml` file. Id must refer to a charm, not a bundle. ```go type Config struct { Options map[string] Option } // Option represents a single charm config option. type Option struct { Type string Description string Default interface{} } ``` Example: `GET trusty/juju-gui-3/meta/charm-config` ```json { "Options": { "builtin-server": { "Type": "boolean", "Description": "Enable the built-in server.", "Default": true }, "login-help": { "Type": "string", "Description": "The help text shown to the user.", "Default": null }, "read-only": { "Type": "boolean", "Description": "Enable read-only mode.", "Default": false } } } ``` #### GET *id*/meta/published The `meta/published` path returns a list of the channels that the entity has been published to. ```go type PublishedResponse struct { // Info holds an entry for each channel that the // entity has been published to. Info []PublishedInfo } // PublishedInfo holds information on a channel that an entity // has been published to. type PublishedInfo struct { // Channel holds the value of the channel that // the entity has been published to. // This will never be "unpublished" as entities // cannot be published to that channel. Channel Channel // Current holds whether the entity is the most // recently published member of the channel. Current bool } ``` #### GET *id*/meta/terms The `meta/terms` path returns a list of terms and conditions (as recorded in the terms field of the charm metadata) the user must agree to in order to obtain the archive of the given charm id. Example: `GET some-charm/meta/terms` ```json [ "enterprise-terms/1", "special-terms/17" ] ``` #### GET *id*/meta/archive-size The `meta/archive-size` path returns the archive size, in bytes, of the archive of the given charm or bundle id. ```go type ArchiveSize struct { Size int64 } ``` Example: `GET wordpress/meta/archive-size` ```json { "Size": 4747 } ``` #### GET *id*/meta/hash This path returns the SHA384 hash sum of the archive of the given charm or bundle id. ```go type HashResponse struct { Sum string } ``` Example: `GET wordpress/meta/hash` Response body: ```json { "Sum": "0a410321586d244d3981e2b23a27a7e86ebdcab8bd0ca8f818d3f4c34b2ea2791e0dbdc949f70b283a3f5efdf908abf1" } ``` #### GET *id*/meta/hash256 This path returns the SHA256 hash sum of the archive of the given charm or bundle id. ```go type HashResponse struct { Sum string } ``` Example: `GET wordpress/meta/hash256` Response body: ```json { "Sum": "9ab5036cc18ba61a9d25fad389e46b3d407fc02c3eba917fe5f18fdf51ee6924" } ``` #### GET *id*/meta/supported-series This path returns the set of series supported by the given charm. This endpoint is appropriate for charms only. ```go type SupportedSeriesResponse struct { SupportedSeries []string } ``` Example: `GET precise/wordpress/meta/supported-series` Response body: ```json { "SupportedSeries": ["precise"] } ``` #### GET *id*/meta/bundles-containing The `meta/bundles-containing` path returns information on the last revision of any bundles that contain the charm with the given id.
GET id/meta/bundles-containing[?include=meta[&include=meta...]]
The Meta field is populated with information on the returned bundles according to the include flags - see the `meta/any` path for more info on how to use the `include` flag. The only values that are valid for `any-series`, `any-revision` or `all-results` flags are 0, 1 and empty. If `all-results` is enabled, all the bundle revisions are returned, not just the last one. The API should validate that and return bad request if any other value is provided. ```go []Bundle type Bundle struct { Id string Meta map[string]interface{} `json:",omitempty"` } ``` Example: `GET mysql/meta/bundles-containing?include=featured` might return: ```json [ { "Id": "bundle/mysql-scalable", "Meta": { "featured": { "Featured": false } } }, { "Id": "bundle/wordpress-simple", "Meta": { "featured": { "Featured": true } } } ] ``` #### GET *id*/meta/extra-info The meta/extra-info path reports any additional metadata recorded for the charm. This contains only information stored by clients - the API server itself does not populate any fields. The resulting object holds an entry for each piece of metadata recorded with a PUT to `meta/extra-info`. ```go type ExtraInfo struct { Values map[string] interface{} } ``` Example: `GET wordpress/meta/extra-info` ```json { "featured": true, "vcs-digest": "4b6b3c7d795eb66ca5f82bc52c01eb57ab595ab2" } ``` #### GET *id*/meta/extra-info/*key* This path returns the contents of the given `extra-info` key. The result is exactly the JSON value stored as a result of the PUT request to `extra-info` or `extra-info/key`. Example: `GET wordpress/meta/extra-info/featured` ```json true ``` #### PUT *id*/meta/extra-info This request updates the value of any metadata values. Any values that are not mentioned in the request are left untouched. Any fields with null values are deleted. Example: `PUT precise/wordpress-32/meta/extra-info` Request body: ```json { "vcs-digest": "7d6a853c7bb102d90027b6add67b15834d815e08", } ``` #### PUT *id*/meta/extra-info/*key* This request creates or updates the value for a specific key. If the value is null, the key is deleted. Example: `PUT precise/wordpress-32/meta/extra-info/vcs-digest` Request body: ```json "7d6a853c7bb102d90027b6add67b15834d815e08", ``` The above example is equivalent to the `meta/extra-info` example above. #### GET *id*/meta/charm-related The `meta/charm-related` path returns all charms that are related to the given charm id, which must not refer to a bundle. It is possible to include additional metadata for charms by using the `include` query:
GET id/meta/charm-related[?include=meta[&include=meta...]]
```go type Related struct { // Requires holds an entry for each interface provided by // the charm, containing all charms that require that interface. Requires map[string] []Item `json:",omitempty"` // Provides holds an entry for each interface required by the // the charm, containing all charms that provide that interface. Provides map[string] []Item `json:",omitempty"` } type Item struct { Id string Meta map[string] interface{} `json:",omitempty"` } ``` The Meta field is populated according to the include flags - see the `meta` path for more info on how to use this. Example: `GET wordpress/meta/charm-related` ```json { "Requires": { "memcache": [ {"Id": "precise/memcached-13"} ], "db": [ {"Id": "precise/mysql-46"}, {"Id": "~clint-fewbar/precise/galera-42"} ] }, "Provides": { "http": [ {"Id": "precise/apache2-24"}, {"Id": "precise/haproxy-31"}, {"Id": "precise/squid-reverseproxy-8"} ] } } ``` Example: `GET trusty/juju-gui-3/meta/charm-related?include=charm-config` ```json { "Provides": { "http": [ { "Id": "precise/apache2-24", "Meta": { "charm-config": { "Options": { "logrotate_count": { "Type": "int", "Description": "The number of days", "Default": 365 } } } } } ], "nrpe-external-master": [ { "Id": "precise/nova-compute-31", "Meta": { "charm-config": { "Options": { "bridge-interface": { "Type": "string", "Description": "Bridge interface", "Default": "br100" }, "bridge-ip": { "Type": "string", "Description": "IP to be assigned to bridge", "Default": "11.0.0.1" } } } } } ] } } ``` #### GET *id*/meta/archive-upload-time The `meta/archive-upload-time` path returns the time the archives for the given *id* was uploaded. The time is formatted according to RFC3339. ```go type ArchiveUploadTimeResponse struct { UploadTime time.Time } ``` Example: `GET trusty/wordpress-42/meta/archive-upload-time` ```json { "UploadTime": "2014-07-04T13:53:57.403506102Z" } ``` #### GET *id*/meta/promulgated The `promulgated` path reports whether the entity with the given ID is promulgated. Promulgated charms do not require the user portion of the ID to be specified. ```go type PromulgatedResponse struct { Promulgated bool } ``` Example: `GET trusty/wordpress-42/meta/promulgated` ```json { "Promulgated": true } ``` #### GET *id*/meta/stats
GET id/meta/stats?[refresh=0|1]
Many clients will need to use stats to determine the best result. Details for a charm/bundle might require the stats as important information to users. Currently we track deployment stats only. We intend to open this up to additional data. The response includes downloads count for both the specific requested entity revision and for all the revisions, and it is structured as below: ```go // StatsResponse holds the result of an id/meta/stats GET request. type StatsResponse struct { // ArchiveDownloadCount is superceded by ArchiveDownload but maintained for // backward compatibility. ArchiveDownloadCount int64 // ArchiveDownload holds the downloads count for a specific revision of the // entity. ArchiveDownload StatsCount // ArchiveDownloadAllRevisions holds the downloads count for all revisions // of the entity. ArchiveDownloadAllRevisions StatsCount } // StatsCount holds stats counts and is used as part of StatsResponse. type StatsCount struct { Total int64 // Total count over all time. Day int64 // Count over the last day. Week int64 // Count over the last week. Month int64 // Count over the last month. } ``` If the refresh boolean parameter is non-zero, the latest stats will be returned without caching. #### GET *id*/meta/tags The `tags` path returns any tags that are associated with the entity. Example: `GET trusty/wordpress-42/meta/tags` ```json { "Tags": [ "blog", "cms" ] } ``` #### GET *id*/meta/revision-info The `revision-info` path returns information about other available revisions of the charm id that the charm store knows about. It will include both older and newer revisions. The fully qualified ids of those charms will be returned in an ordered list from newest to oldest revision. Note that the current revision will be included in the list as it is also an available revision. ```go type RevisionInfoResponse struct { Revisions []*charm.URL } ``` Example: `GET trusty/wordpress-42/meta/revision-info` ```json { "Revisions": [ "cs:trusty/wordpress-43", "cs:trusty/wordpress-42", "cs:trusty/wordpress-41", "cs:trusty/wordpress-39" ] } ``` #### GET *id*/meta/id The `id` path returns information on the charm or bundle id, split apart into its various components, including the id itself. The information is exactly that contained within the entity id. ```go type IdResponse struct { Id *charm.URL User string Series string `json:",omitempty"` Name string Revision int } ``` Example: `GET ~bob/trusty/wordpress/meta/id` ```json { "Id": "~bob/trusty/wordpress-42", "User": "bob", "Series": "trusty", "Name": "wordpress", "Revision": 42 } ``` Example: `GET precise/wordpress/meta/id` ```json { "Id": "precise/wordpress-42", "Series": "precise", "Name": "wordpress", "Revision": 42 } ``` Example: `GET bundle/openstack/meta/id` ```json { "Id": "bundle/openstack-3", "Series": "bundle", "Name": "openstack", "Revision": 3 } ``` #### GET *id*/meta/id-revision The `revision` path returns information on the revision of the id. The information is exactly that contained within the id. ```go type Revision struct { Revision int } ``` Example: `GET trusty/wordpress-42/meta/id-revision` ```json { "Revision": 42 } ``` #### GET *id*/meta/id-name The `name` path returns information on the name of the id. The information is exactly that contained within the id. ```go type Name struct { Name string } ``` Example: `GET trusty/wordpress-42/meta/id-name` ```json { "Name": "wordpress" } ``` #### GET *id*/meta/id-user The `id-user` path returns information on the user name in the id. This information is exactly that contained within the id. ```go type User struct { User string } ``` Example: `GET ~bob/trusty/wordpress-42/meta/id-user` ```json { "User": "bob" } ``` Example: `GET trusty/wordpress-42/meta/id-user` ```json { "User": "" } ``` #### GET *id*/meta/id-series The `id-series` path returns information on the series in the id. This information is exactly that contained within the id. For bundles, this will return "bundle". ```go type Series struct { Series string } ``` Example: `GET ~bob/trusty/wordpress-42/meta/id-series` ```json { "Series": "trusty" } ``` #### GET *id*/meta/common-info The meta/common-info path reports any common metadata recorded for the base entity. This contains only information stored by clients - the API server itself does not populate any fields. The resulting object holds an entry for each piece of metadata recorded with a PUT to `meta/common-info`. ```go type CommonInfo struct { Values map[string] interface{} } ``` Example: `GET wordpress/meta/common-info` `GET precise/wordpress-32/meta/common-info` ```json { "homepage": "http://wordpress.org", "bugs-url": "http://wordpress.org/bugs", } ``` #### GET *id*/meta/common-info/*key* This path returns the contents of the given `common-info` key. The result is exactly the JSON value stored as a result of the PUT request to `common-info` or `common-info/key`. Example: `GET wordpress/meta/common-info/homepage` `GET precise/wordpress-32/meta/common-info/homepage` ```json "http://wordpress.org" ``` #### PUT *id*/meta/common-info This request updates the value of any metadata values. Any values that are not mentioned in the request are left untouched. Any fields with null values are deleted. Example: `PUT precise/wordpress-32/meta/common-info` Request body: ```json { "bugs-url": "http://wordpress.org/newbugs", } ``` #### PUT *id*/meta/common-info/*key* This request creates or updates the value for a specific key. If the value is null, the key is deleted. Example: `PUT precise/wordpress-32/meta/common-info/bugs-url` Request body: ```json "http://wordpress.org/newbugs", ``` The above example is equivalent to the `meta/common-info` example above. ### Resources **Not yet implemented** #### POST *id*/resources/name.stream Posting to the resources path creates a new version of the given stream for the charm with the given id. The request returns the new version. ```go type ResourcesRevision struct { Revision int } ``` #### GET *id*/resources/name.stream[-revision]/arch/filename Getting from the `/resources` path retrieves a charm resource from the charm with the given id. If version is not specified, it retrieves the latest version of the resource. The SHA-256 hash of the data is specified in the HTTP response headers. #### PUT *id*/resources/[~user/]series/name.stream-revision/arch?sha256=hash Putting to the `resources` path uploads a resource (an arbitrary "blob" of data) associated with the charm with id series/name, which must not be a bundle. Stream and arch specify which of the charms resource streams and which architecture the resource will be associated with, respectively. Revision specifies the revision of the stream that's being uploaded to. The hash value must specify the hash of the stream. If the same series, name, stream, revision combination is PUT again, it must specify the same hash. ### Search #### GET search The `search` path searches within the latest version of charms and bundles within the store.
GET search[?text=text][&autocomplete=1][&filter=value...][&limit=limit][&skip=skip][&include=meta[&include=meta...]][&sort=field]
`text` specifies any text to search for. If `autocomplete` is specified, the search will return only charms and bundles with a name that has text as a prefix. `limit` limits the number of returned items to the specified limit count. `skip` skips over the first skip items in the result. Any number of filters may be specified, limiting the search to items with attributes that match the specified filter value. Items matching any of the selected values for a filter are selected, so `name=1&name=2` would match items whose name was either 1 or 2. However, if multiple filters are specified, the charm must match all of them, so `name=1&series=2` will only match charms whose name is 1 and whose series is 2. Available filters are: * tags - the set of tags associated with the charm. * name - the charm's name. * owner - the charm's owner (the ~user element of the charm id) * promulgated - the charm has been promulgated. * provides - interfaces provided by the charm. * requires - interfaces required by the charm. * series - the charm's series. * summary - the charm's summary text. * description - the charm's description text. * type - "charm" or "bundle" to search only one doctype or the other. Notes 1. filtering on a specified, but empty, owner is the same as filtering on promulgated=1. 2. a specified, but empty text field will return all charms and bundles. 3. the promulgated filter is only applied if specified. If the value is "1" then only promulgated entities are returned if it is any other value only non-promulgated entities are returned. The response contains a list of information on the charms or bundles that were matched by the request. If no parameters are specified, all charms and bundles will match. By default, only the charm store id is included. The results are sorted according to the given sort field, which may be one of `owner`, `name` or `series`, corresponding to the filters of the same names. If the field is prefixed with a hyphen (-), the sorting order will be reversed. If the sort field is not specified, the results are returned in most-relevant-first order if the text filter was specified, or an arbitrary order otherwise. It is possible to specify more than one sort field to get multi-level sorting, e.g. sort=name,-series will get charms in order of the charm name and then in reverse order of series. The Meta field is populated according to the include flag - see the `meta` path for more info on how to use this. ```go []SearchResult type SearchResult struct { Id string // Meta holds at most one entry for each meta value // specified in the include flags, holding the // data that would be returned by reading /meta/meta?id=id. // Metadata not relevant to a particular result will not // be included. Meta map[string] interface{} `json:",omitempty"` } ``` Example: `GET search?text=word&autocomplete=1&limit=2&include=archive-size` ```json [ { "Id": "precise/wordpress-1", "Meta": { "archive-size": { "Size": 1024 } } }, { "Id": "precise/wordpress-2", "Meta": { "archive-size": { "Size": 4242 } } } ] ``` #### GET search/interesting This returns a list of bundles and charms which are interesting from the Juju GUI perspective. Those are shown on the left sidebar of the GUI when no other search requests are performed. `GET search/interesting[?limit=limit][&include=meta]` The Meta field is populated according to the include flag - see the `meta` path for more info on how to use this. The `limit` flag is the same as for the "search" path. ### List #### GET list The `list` path lists charms and bundles within the store.
GET list[?filter=value...][&include=meta[&include=meta...]][&sort=field]
Any number of filters may be specified, limiting the list to items with attributes that match the specified filter value. Items matching any of the selected values for a filter are selected, so `name=1&name=2` would match items whose name was either 1 or 2. However, if multiple filters are specified, the charm must match all of them, so `name=1&series=2` will only match charms whose name is 1 and whose series is 2. Available filters are: * name - the charm's name. * owner - the charm's owner (the ~user element of the charm id) * promulgated - the charm has been promulgated. * series - the charm's series. * type - "charm" or "bundle" to search only one doctype or the other. Notes 1. the promulgated filter is only applied if specified. If the value is "1" then only promulgated entities are returned if it is any other value only non-promulgated entities are returned. The response contains a list of information on the charms or bundles that were matched by the request. If no parameters are specified, all charms and bundles will match. By default, only the charm store id is included. The results are sorted according to the given sort field, which may be one of `owner`, `name` or `series`, corresponding to the filters of the same names. If the field is prefixed with a hyphen (-), the sorting order will be reversed. If the sort field is not specified the order will be a server side logical order. It is possible to specify more than one sort field to get multi-level sorting, e.g. sort=name,-series will get charms in order of the charm name and then in reverse order of series. The Meta field is populated according to the include flag - see the `meta` path for more info on how to use this. ```go []EntityResult type EntityResult struct { Id string // Meta holds at most one entry for each meta value // specified in the include flags, holding the // data that would be returned by reading /meta/meta?id=id. // Metadata not relevant to a particular result will not // be included. Meta map[string] interface{} `json:",omitempty"` } ``` Example: `GET list?name=wordpress&include=archive-size` ```json [ { "Id": "precise/wordpress-1", "Meta": { "archive-size": { "Size": 1024 } } }, { "Id": "precise/wordpress-2", "Meta": { "archive-size": { "Size": 4242 } } } ] ``` ### Debug info #### GET /debug **Not yet implemented** This returns metadata describing the current version of the software running the server, and any other information deemed appropriate. The specific form of the returned data is deliberately left unspecified for now. #### GET /debug/status Used as a health check of the service. The API will also be used for nagios tests. The items that are checked: * connection to MongoDB * connection to ElasticSearch (if needed) (based on charm config) (elasticsearch cluster status, all nodes up/etc see charmworld) * number of charms and bundles in the blobstore * number of promulgated items * time and location of service start * time of last ingestion process * did ingestion finish * did ingestion finished without errors (this should not count charm/bundle ingest errors) ```go type DebugStatuses map[string] struct { Name string Value string Passed bool } ``` Example: `GET /debug/status` ```json { "mongo_connected" : { "Name": "MongoDB is connected", "Value": "Connected", "Passed": true }, "mongo_collections" : { "Name": "MongoDB collections", "Value": "All required collections exist", "Passed": true }, "ES_connected": { "Name": "ElasticSearch is connected", "Value": "Connected", "Passed": true }, "entities": { "Name": "Entities in charm store", "Value": "5701 charms; 2000 bundles; 42 promulgated", "Passed": true, }, "server_started": { "Name": "Server started", "Value": "123.45.67.89 2014-09-16 11:12:29Z", "Passed": true }, } ``` ### Permissions All entities in the charm store have their own access control lists. Read and write permissions are supported for specific users and groups. By default, all charms and bundles are readable by everyone, meaning that anonymous users can retrieve archives and metadata information without restrictions. The permission endpoints can be used to retrieve or change entities' permissions. #### GET *id*/meta/perm This path reports the read and write ACLs for the charm or bundle. ```go type PermResponse struct { Read []string Write []string } ``` If the `Read` ACL is empty, the entity and its metadata cannot be retrieved by anyone. If the `Write` ACL is empty, the entity cannot be modified by anyone. The special user `everyone` indicates that the corresponding operation (read or write) can be performed by everyone, including anonymous users. Example: `GET ~joe/wordpress/meta/perm` ```json { "Read": ["everyone"], "Write": ["joe"] } ``` #### PUT *id*/meta/perm This request updates the permissions associated with the charm or bundle. ```go type PermResponse struct { Read []string Write []string } ``` If the Read or Write ACL is empty or missing from the request body, that field will be overwritten as empty. See the *id*/meta/perm/*key* request to PUT only Read or Write. Example: `PUT precise/wordpress-32/meta/perm` Request body: ```json { "Read": ["everyone"], "Write": ["joe"] } ``` #### GET *id*/meta/perm/*key* This path returns the contents of the given permission *key* (that can be `read` or `write`). The result is exactly the JSON value stored as a result of the PUT request to `meta/perm/key`. Example: `GET wordpress/meta/perm/read` ```json ["everyone"] ``` #### PUT *id*/meta/perm/*key* This request updates the *key* permission associated with the charm or bundle, where *key* can be `read` or `write`. Example: `PUT precise/wordpress-32/meta/perm/read` Request body: ```json ["joe", "frank"] ``` ### Authorization #### GET /macaroon This endpoint returns a macaroon in JSON format that, when its third party caveats are discharged, will allow access to the charm store. No prior authorization is required. #### GET /delegatable-macaroon This endpoint returns a macaroon in JSON format that can be passed to third parties to allow them to access the charm store on the user's behalf. If the "id" parameter is specified (url encoded), the returned macaroon will be restricted for use only with the entity with the given id. A delegatable macaroon will only be returned to an authorized user (not including admin). It will carry the same privileges as the macaroon used to authorize the request, but is suitable for use by third parties. #### GET /whoami This endpoint returns the user name of the client and the list of groups the user is a member of. This endpoint requires authorization. Example: `GET whoami` ```json { "User": "alice", "Groups": ["charmers", "admin", "team-awesome"] } ``` The response is defined as: ```go type WhoAmIResponse struct { User string Groups []string } ``` ### Logs #### GET /log This endpoint returns the log messages stored on the charm store. It is possible to save them by sending POST requests to the same endpoint (see below). For instance, the ingestion of charms/bundles produces logs that are collected and send to the charm store by the ingestion client. `GET /log[?limit=count][&skip=count][&id=entity-id][&level=log-level][&type=log-type]` Each log message is defined as: ```go type LogResponse struct { // Data holds the log message as a JSON-encoded value. Data json.RawMessage // Level holds the log level as a string. Level LogLevel // Type holds the log type as a string. Type LogType // URLs holds a slice of entity URLs associated with the log message. URLs []`*`charm.URL `json:",omitempty"` // Time holds the time of the log. Time time.Time } ``` The log entries are ordered by last inserted (most recent logs first), and by default the last 1000 logs are returned. Use the `limit` and skip `query` parameters to change the default behavior. Logs can further be filtered by log level (“infoâ€, “warning†or “errorâ€) and by related entity id. The type query parameter groups entries by type. For instance, to request all the ingestion errors related to the *utopic/django* charm, use the following URL: `/log?type=ingestion&level=error&id=utopic/django` #### POST /log This endpoint uploads logs to the charm store. The request content type must be `application/json`. The body must contain the JSON representation of a list of logs, each one being in this format: ```go type Log struct { // Data holds the log message as a JSON-encoded value. Data *json.RawMessage // Level holds the log level as a string. Level LogLevel // Type holds the log type as a string. Type LogType // URLs holds a slice of entity URLs associated with the log message. URLs []*charm.URL `json:",omitempty"` } ``` Nothing is returned if the request succeeds. Otherwise, an error is returned. ### Changes Each charm store has a global feed for all new published charms and bundles. #### GET changes/published This endpoint returns the ids of published charms or bundles published, most recently published first. `GET changes/published[?limit=count][&from=fromdate][&to=todate]` The `fromdate` and `todate` values constrain the range of publish dates, in "yyyy-mm-dd" format. If `fromdate` is specified only charms published on or after that date are returned; if `todate` is specified, only charms published on or before that date are returned. If the `limit` count is specified, it must be positive, and only the first count results are returned. The published time is in RFC3339 format. ```go []Published type Published struct { Id string PublishTime time.Time } ``` Example: `GET changes/published` ```json [ { "Id": "cs:trusty/wordpress-42", "PublishTime": "2014-07-31T15:04:05Z" }, { "Id": "cs:trusty/mysql-11", "PublishTime": "2014-07-30T14:20:00Z" }, { "Id": "cs:bundle/mediawiki", "PublishTime": "2014-07-29T13:45:10Z" } ] ``` Example: `GET changes/published?limit=10&from=31-07-2014` ```json [ { "Id": "cs:trusty/wordpress-42", "PublishTime": "2014-07-31T15:04:05Z" } ] ``` charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/README.md0000664000175000017500000000431612672604603022657 0ustar marcomarco# juju/charmstore Store and publish Juju charms and bundles. ## Installation To start using the charm store, first ensure you have a valid Go environment, then run the following: go get -d gopkg.in/juju/charmstore.v5-unstable cd $GOPATH/gopkg.in/juju/charmstore.v5-unstable ## Go dependencies The project uses godeps (https://launchpad.net/godeps) to manage Go dependencies. To install this, run: go get launchpad.net/godeps After installing it, you can update the dependencies to the revision specified in the `dependencies.tsv` file with the following: make deps Use `make create-deps` to update the dependencies file. ## Development environment A couple of system packages are required in order to set up a charm store development environment. To install them, run the following: make sysdeps To run the elasticsearch tests you must run an elasticsearch server. If the elasticsearch server is running at an address other than localhost:9200 then set `JUJU_TEST_ELASTICSEARCH=:` where host and port provide the address of the elasticsearch server. If you do not wish to run the elasticsearh tests, set `JUJU_TEST_ELASTICSEARCH=none`. At this point, from the root of this branch, run the command:: make install The command above builds and installs the charm store binaries, and places them in `$GOPATH/bin`. This is the list of the installed commands: - charmd: start the charm store server; - essync: synchronize the contents of the Elastic Search database with the charm store. A description of each command can be found below. ## Testing Run `make check` to test the application. Run `make help` to display help about all the available make targets. ## Charmstore server Once the charms database is fully populated, it is possible to interact with charm data using the charm store server. It can be started with the following command: charmd -logging-config INFO cmd/charmd/config.yaml The same result can be achieved more easily by running `make server`. Note that this configuration *should not* be used when running a production server, as it uses a known password for authentication. At this point the server starts listening on port 8080 (as specified in the config YAML file). charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/0000775000175000017500000000000012672604603023210 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/blobstore/0000775000175000017500000000000012672604603025203 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/blobstore/blobstore_test.go0000664000175000017500000001326212672604603030570 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package blobstore_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" import ( "fmt" "io" "io/ioutil" "strconv" "strings" "testing" jujutesting "github.com/juju/testing" gc "gopkg.in/check.v1" "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" ) func TestPackage(t *testing.T) { jujutesting.MgoTestPackage(t, nil) } type BlobStoreSuite struct { jujutesting.IsolatedMgoSuite } var _ = gc.Suite(&BlobStoreSuite{}) func (s *BlobStoreSuite) TestPutOpen(c *gc.C) { store := blobstore.New(s.Session.DB("db"), "blobstore") content := "some data" chal, err := store.Put(strings.NewReader(content), "x", int64(len(content)), hashOf(content), nil) c.Assert(err, gc.IsNil) c.Assert(chal, gc.IsNil) rc, length, err := store.Open("x") c.Assert(err, gc.IsNil) defer rc.Close() c.Assert(length, gc.Equals, int64(len(content))) data, err := ioutil.ReadAll(rc) c.Assert(err, gc.IsNil) c.Assert(string(data), gc.Equals, content) // Putting the resource again should generate a challenge. chal, err = store.Put(strings.NewReader(content), "y", int64(len(content)), hashOf(content), nil) c.Assert(err, gc.IsNil) c.Assert(chal, gc.NotNil) resp, err := blobstore.NewContentChallengeResponse(chal, strings.NewReader(content)) c.Assert(err, gc.IsNil) chal, err = store.Put(strings.NewReader(content), "y", int64(len(content)), hashOf(content), resp) c.Assert(err, gc.IsNil) c.Assert(chal, gc.IsNil) } func (s *BlobStoreSuite) TestPutTwice(c *gc.C) { store := blobstore.New(s.Session.DB("db"), "blobstore") content := "some data" err := store.PutUnchallenged(strings.NewReader(content), "x", int64(len(content)), hashOf(content)) c.Assert(err, gc.IsNil) content = "some different data" err = store.PutUnchallenged(strings.NewReader(content), "x", int64(len(content)), hashOf(content)) c.Assert(err, gc.IsNil) rc, length, err := store.Open("x") c.Assert(err, gc.IsNil) defer rc.Close() c.Assert(length, gc.Equals, int64(len(content))) data, err := ioutil.ReadAll(rc) c.Assert(err, gc.IsNil) c.Assert(string(data), gc.Equals, content) } func (s *BlobStoreSuite) TestPutInvalidHash(c *gc.C) { store := blobstore.New(s.Session.DB("db"), "blobstore") content := "some data" chal, err := store.Put(strings.NewReader(content), "x", int64(len(content)), hashOf("wrong"), nil) c.Assert(err, gc.ErrorMatches, "hash mismatch") c.Assert(chal, gc.IsNil) rc, length, err := store.Open("x") c.Assert(err, gc.ErrorMatches, "resource.*not found") c.Assert(rc, gc.Equals, nil) c.Assert(length, gc.Equals, int64(0)) } func (s *BlobStoreSuite) TestPutUnchallenged(c *gc.C) { store := blobstore.New(s.Session.DB("db"), "blobstore") content := "some data" err := store.PutUnchallenged(strings.NewReader(content), "x", int64(len(content)), hashOf(content)) c.Assert(err, gc.IsNil) rc, length, err := store.Open("x") c.Assert(err, gc.IsNil) defer rc.Close() c.Assert(length, gc.Equals, int64(len(content))) data, err := ioutil.ReadAll(rc) c.Assert(err, gc.IsNil) c.Assert(string(data), gc.Equals, content) err = store.PutUnchallenged(strings.NewReader(content), "x", int64(len(content)), hashOf(content)) c.Assert(err, gc.IsNil) } func (s *BlobStoreSuite) TestPutUnchallengedInvalidHash(c *gc.C) { store := blobstore.New(s.Session.DB("db"), "blobstore") content := "some data" err := store.PutUnchallenged(strings.NewReader(content), "x", int64(len(content)), hashOf("wrong")) c.Assert(err, gc.ErrorMatches, "hash mismatch") } func (s *BlobStoreSuite) TestRemove(c *gc.C) { store := blobstore.New(s.Session.DB("db"), "blobstore") content := "some data" err := store.PutUnchallenged(strings.NewReader(content), "x", int64(len(content)), hashOf(content)) c.Assert(err, gc.IsNil) rc, length, err := store.Open("x") c.Assert(err, gc.IsNil) defer rc.Close() c.Assert(length, gc.Equals, int64(len(content))) data, err := ioutil.ReadAll(rc) c.Assert(err, gc.IsNil) c.Assert(string(data), gc.Equals, content) err = store.Remove("x") c.Assert(err, gc.IsNil) rc, length, err = store.Open("x") c.Assert(err, gc.ErrorMatches, `resource at path "[^"]+" not found`) } func (s *BlobStoreSuite) TestLarge(c *gc.C) { store := blobstore.New(s.Session.DB("db"), "blobstore") size := int64(20 * 1024 * 1024) newContent := func() io.Reader { return newDataSource(123, size) } hash := hashOfReader(c, newContent()) chal, err := store.Put(newContent(), "x", size, hash, nil) c.Assert(err, gc.IsNil) c.Assert(chal, gc.IsNil) rc, length, err := store.Open("x") c.Assert(err, gc.IsNil) defer rc.Close() c.Assert(length, gc.Equals, size) c.Assert(hashOfReader(c, rc), gc.Equals, hash) } func hashOfReader(c *gc.C, r io.Reader) string { h := blobstore.NewHash() _, err := io.Copy(h, r) c.Assert(err, gc.IsNil) return fmt.Sprintf("%x", h.Sum(nil)) } func hashOf(s string) string { h := blobstore.NewHash() h.Write([]byte(s)) return fmt.Sprintf("%x", h.Sum(nil)) } type dataSource struct { buf []byte bufIndex int remain int64 } // newDataSource returns a stream of size bytes holding // a repeated number. func newDataSource(fillWith int64, size int64) io.Reader { src := &dataSource{ remain: size, } for len(src.buf) < 8*1024 { src.buf = strconv.AppendInt(src.buf, fillWith, 10) src.buf = append(src.buf, ' ') } return src } func (s *dataSource) Read(buf []byte) (int, error) { if int64(len(buf)) > s.remain { buf = buf[:int(s.remain)] } total := len(buf) if total == 0 { return 0, io.EOF } for len(buf) > 0 { if s.bufIndex == len(s.buf) { s.bufIndex = 0 } nb := copy(buf, s.buf[s.bufIndex:]) s.bufIndex += nb buf = buf[nb:] s.remain -= int64(nb) } return total, nil } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/blobstore/blobstore.go0000664000175000017500000001134712672604603027533 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package blobstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" import ( "crypto/sha512" "fmt" "hash" "io" "strconv" "github.com/juju/blobstore" "github.com/juju/errors" "gopkg.in/errgo.v1" "gopkg.in/mgo.v2" ) type ReadSeekCloser interface { io.Reader io.Seeker io.Closer } // ContentChallengeError holds a proof-of-content // challenge produced by a blobstore. type ContentChallengeError struct { Req ContentChallenge } func (e *ContentChallengeError) Error() string { return "cannot upload because proof of content ownership is required" } // ContentChallenge holds a proof-of-content challenge // produced by a blobstore. A client can satisfy the request // by producing a ContentChallengeResponse containing // the same request id and a hash of RangeLength bytes // of the content starting at RangeStart. type ContentChallenge struct { RequestId string RangeStart int64 RangeLength int64 } // ContentChallengeResponse holds a response to a ContentChallenge. type ContentChallengeResponse struct { RequestId string Hash string } // NewHash is used to calculate checksums for the blob store. func NewHash() hash.Hash { return sha512.New384() } // NewContentChallengeResponse can be used by a client to respond to a content // challenge. The returned value should be passed to BlobStorage.Put // when the client retries the request. func NewContentChallengeResponse(chal *ContentChallenge, r io.ReadSeeker) (*ContentChallengeResponse, error) { _, err := r.Seek(chal.RangeStart, 0) if err != nil { return nil, errgo.Mask(err) } hash := NewHash() nw, err := io.CopyN(hash, r, chal.RangeLength) if err != nil { return nil, errgo.Mask(err) } if nw != chal.RangeLength { return nil, errgo.Newf("content is not long enough") } return &ContentChallengeResponse{ RequestId: chal.RequestId, Hash: fmt.Sprintf("%x", hash.Sum(nil)), }, nil } // Store stores data blobs in mongodb, de-duplicating by // blob hash. type Store struct { mstore blobstore.ManagedStorage } // New returns a new blob store that writes to the given database, // prefixing its collections with the given prefix. func New(db *mgo.Database, prefix string) *Store { rs := blobstore.NewGridFS(db.Name, prefix, db.Session) return &Store{ mstore: blobstore.NewManagedStorage(db, rs), } } func (s *Store) challengeResponse(resp *ContentChallengeResponse) error { id, err := strconv.ParseInt(resp.RequestId, 10, 64) if err != nil { return errgo.Newf("invalid request id %q", id) } return s.mstore.ProofOfAccessResponse(blobstore.NewPutResponse(id, resp.Hash)) } // Put tries to stream the content from the given reader into blob // storage, with the provided name. The content should have the given // size and hash. If the content is already in the store, a // ContentChallengeError is returned containing a challenge that must be // satisfied by a client to prove that they have access to the content. // If the proof has already been acquired, it should be passed in as the // proof argument. func (s *Store) Put(r io.Reader, name string, size int64, hash string, proof *ContentChallengeResponse) (*ContentChallenge, error) { if proof != nil { err := s.challengeResponse(proof) if err == nil { return nil, nil } if err != blobstore.ErrResourceDeleted { return nil, errgo.Mask(err) } // The blob has been deleted since the challenge // was created, so continue on with uploading // the content as if there was no previous challenge. } resp, err := s.mstore.PutForEnvironmentRequest("", name, hash) if err != nil { if errors.IsNotFound(err) { if err := s.mstore.PutForEnvironmentAndCheckHash("", name, r, size, hash); err != nil { return nil, errgo.Mask(err) } return nil, nil } return nil, err } return &ContentChallenge{ RequestId: fmt.Sprint(resp.RequestId), RangeStart: resp.RangeStart, RangeLength: resp.RangeLength, }, nil } // PutUnchallenged stream the content from the given reader into blob // storage, with the provided name. The content should have the given // size and hash. In this case a challenge is never returned and a proof // is not required. func (s *Store) PutUnchallenged(r io.Reader, name string, size int64, hash string) error { return s.mstore.PutForEnvironmentAndCheckHash("", name, r, size, hash) } // Open opens the entry with the given name. func (s *Store) Open(name string) (ReadSeekCloser, int64, error) { r, length, err := s.mstore.GetForEnvironment("", name) if err != nil { return nil, 0, errgo.Mask(err) } return r.(ReadSeekCloser), length, nil } // Remove the given name from the Store. func (s *Store) Remove(name string) error { return s.mstore.RemoveForEnvironment("", name) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/0000775000175000017500000000000012672604603025357 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/zip_test.go0000664000175000017500000000663612672604603027562 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmstore_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" import ( "archive/zip" "bytes" "io" "io/ioutil" "strings" jujutesting "github.com/juju/testing" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" ) type zipSuite struct { jujutesting.IsolationSuite contents map[string]string } var _ = gc.Suite(&zipSuite{}) func (s *zipSuite) SetUpSuite(c *gc.C) { s.IsolationSuite.SetUpSuite(c) s.contents = map[string]string{ "readme.md": "readme contents", "uncompressed_readme.md": "readme contents", "icon.svg": "icon contents", "metadata.yaml": "metadata contents", "empty": "", "uncompressed_empty": "", } } func (s *zipSuite) makeZipReader(c *gc.C, contents map[string]string) (io.ReadSeeker, []*zip.File) { // Create a customized zip archive in memory. var buf bytes.Buffer w := zip.NewWriter(&buf) for name, content := range contents { header := &zip.FileHeader{ Name: name, Method: zip.Deflate, } if strings.HasPrefix(name, "uncompressed_") { header.Method = zip.Store } f, err := w.CreateHeader(header) c.Assert(err, gc.IsNil) _, err = f.Write([]byte(content)) c.Assert(err, gc.IsNil) } c.Assert(w.Close(), gc.IsNil) // Retrieve the zip files in the archive. zipReader := bytes.NewReader(buf.Bytes()) r, err := zip.NewReader(zipReader, int64(buf.Len())) c.Assert(err, gc.IsNil) c.Assert(r.File, gc.HasLen, len(contents)) return zipReader, r.File } func (s *zipSuite) TestZipFileReader(c *gc.C) { zipReader, files := s.makeZipReader(c, s.contents) // Check that a ZipFile created from each file in the archive // can be read correctly. for i, f := range files { c.Logf("test %d: %s", i, f.Name) zf, err := charmstore.NewZipFile(f) c.Assert(err, gc.IsNil) zfr, err := charmstore.ZipFileReader(zipReader, zf) c.Assert(err, gc.IsNil) content, err := ioutil.ReadAll(zfr) c.Assert(err, gc.IsNil) c.Assert(string(content), gc.Equals, s.contents[f.Name]) } } func (s *zipSuite) TestZipFileReaderWithErrorOnSeek(c *gc.C) { er := &seekErrorReader{} r, err := charmstore.ZipFileReader(er, mongodoc.ZipFile{}) c.Assert(err, gc.ErrorMatches, "cannot seek to 0 in zip content: foiled!") c.Assert(r, gc.Equals, nil) } type seekErrorReader struct { io.Reader } func (r *seekErrorReader) Seek(offset int64, whence int) (int64, error) { return 0, errgo.New("foiled!") } func (s *zipSuite) TestNewZipFile(c *gc.C) { _, files := s.makeZipReader(c, s.contents) // Check that we can create a new ZipFile from // each zip file in the archive. for i, f := range files { c.Logf("test %d: %s", i, f.Name) zf, err := charmstore.NewZipFile(f) c.Assert(err, gc.IsNil) offset, err := f.DataOffset() c.Assert(err, gc.IsNil) c.Assert(zf.Offset, gc.Equals, offset) c.Assert(zf.Size, gc.Equals, int64(f.CompressedSize64)) c.Assert(zf.Compressed, gc.Equals, !strings.HasPrefix(f.Name, "uncompressed_")) } } func (s *zipSuite) TestNewZipFileWithCompressionMethodError(c *gc.C) { _, files := s.makeZipReader(c, map[string]string{"foo": "contents"}) f := files[0] f.Method = 99 _, err := charmstore.NewZipFile(f) c.Assert(err, gc.ErrorMatches, `unknown zip compression method for "foo"`) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/export_test.go0000664000175000017500000000053712672604603030273 0ustar marcomarco// Copyright 2013, 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" var TimeToStamp = timeToStamp // StatsCacheEvictAll removes everything from the stats cache. func StatsCacheEvictAll(s *Store) { s.pool.statsCache.EvictAll() } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrations_dump_test.go0000664000175000017500000004670312672604603032160 0ustar marcomarcopackage charmstore import ( "archive/zip" "bufio" "bytes" "encoding/binary" "encoding/json" "fmt" "go/build" "io/ioutil" "net/http" "os" "os/exec" "path" "path/filepath" "sort" "strings" "time" jujutesting "github.com/juju/testing" "github.com/juju/utils/fs" "gopkg.in/errgo.v1" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/tomb.v2" "gopkg.in/yaml.v2" "gopkg.in/juju/charmstore.v5-unstable/config" "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" ) // historicalDBName holds the name of the juju database // as hard-coded in previous versions of the charm store server. const historicalDBName = "juju" // dumpMigrationHistory checks out and runs the charmstore version held // in each element of history in sequence, runs any associated updates, // and, if the version is not before earlierDeployedVersion, dumps // the database to a file. // // After dumpMigrationHistory has been called, createDatabaseAtVersion // can be used to backtrack the database to any of the dumped versions. func dumpMigrationHistory(session *mgo.Session, earliestDeployedVersion string, history []versionSpec) error { db := session.DB(historicalDBName) vcsStatus, err := currentVCSStatus() if err != nil { return errgo.Mask(err) } dumping := false for _, vc := range history { logger.Infof("----------------- running version %v", vc.version) if vc.version == earliestDeployedVersion { dumping = true } if err := runMigrationVersion(db, vc); err != nil { return errgo.Notef(err, "cannot run at version %s", vc.version) } if dumping { filename := migrationDumpFileName(vc.version) logger.Infof("dumping database to %s", filename) if err := saveDBToFile(db, vcsStatus, filename); err != nil { return errgo.Notef(err, "cannot save DB at version %v", vc.version) } } } if !dumping { return errgo.Newf("no versions matched earliest deployed version %q; nothing dumped", earliestDeployedVersion) } return nil } // createDatabaseAtVersion loads the database from the // dump file for the given version (see dumpMigrationHistory). func createDatabaseAtVersion(db *mgo.Database, version string) error { vcsStatus, err := restoreDBFromFile(db, migrationDumpFileName(version)) if err != nil { return errgo.Notef(err, "cannot restore version %q", version) } logger.Infof("restored migration from version %s; dumped at %s", version, vcsStatus) return nil } // migrationDumpFileName returns the name of the file that // the migration database snapshot will be saved to. func migrationDumpFileName(version string) string { return "migrationdump." + version + ".zip" } // currentVCSStatus returns the git status of the current // charmstore source code. This will be saved into the // migration dump file so that there is some indication // as to when that was created. func currentVCSStatus() (string, error) { cmd := exec.Command("git", "describe") cmd.Stderr = os.Stderr data, err := cmd.Output() if err != nil { return "", errgo.Mask(err) } // With the --porcelain flag, git status prints a simple // line-per-locally-modified-file, or nothing at all if there // are no locally modified files. cmd = exec.Command("git", "status", "--porcelain") cmd.Stderr = os.Stderr data1, err := cmd.Output() if err != nil { return "", errgo.Mask(err) } return string(append(data, data1...)), nil } // saveDBToFile dumps the entire state of the database to the given // file name, also saving the given VCS status. func saveDBToFile(db *mgo.Database, vcsStatus string, filename string) (err error) { f, err := os.Create(filename) if err != nil { return errgo.Mask(err) } defer func() { if err != nil { os.Remove(filename) } }() defer f.Close() zw := zip.NewWriter(f) defer func() { if err1 := zw.Close(); err1 != nil { err = errgo.Notef(err1, "zip close failed") } }() collections, err := dumpDB(db) if err != nil { return errgo.Mask(err) } if err := writeVCSStatus(zw, vcsStatus); err != nil { return errgo.Mask(err) } for _, c := range collections { w, err := zw.Create(historicalDBName + "/" + c.name + ".bson") if err != nil { return errgo.Mask(err) } if _, err := w.Write(c.data); err != nil { return errgo.Mask(err) } } return nil } // restoreDBFromFile reads the database dump from the given file // and restores it into db. func restoreDBFromFile(db *mgo.Database, filename string) (vcsStatus string, _ error) { f, err := os.Open(filename) if err != nil { return "", errgo.Mask(err) } defer f.Close() info, err := f.Stat() if err != nil { return "", errgo.Mask(err) } zr, err := zip.NewReader(f, info.Size()) if err != nil { return "", errgo.Mask(err) } var colls []collectionData for _, f := range zr.File { name := path.Clean(f.Name) if name == vcsStatusFile { data, err := readZipFile(f) if err != nil { return "", errgo.Mask(err) } vcsStatus = string(data) continue } if !strings.HasSuffix(name, ".bson") { logger.Infof("ignoring %v", name) continue } if !strings.HasPrefix(name, historicalDBName+"/") { return "", errgo.Newf("file %s from unknown database found in dump file", name) } name = strings.TrimPrefix(name, historicalDBName+"/") name = strings.TrimSuffix(name, ".bson") data, err := readZipFile(f) if err != nil { return "", errgo.Mask(err) } colls = append(colls, collectionData{ name: name, data: data, }) } if err := restoreDB(db, colls); err != nil { return "", errgo.Mask(err) } return vcsStatus, nil } // readZipFile reads the entire contents of f. func readZipFile(f *zip.File) ([]byte, error) { r, err := f.Open() if err != nil { return nil, errgo.Mask(err) } defer r.Close() data, err := ioutil.ReadAll(r) if err != nil { return nil, errgo.Mask(err) } return data, nil } const vcsStatusFile = "vcs-status" // writeVCSStatus writes the given VCS status into the // given zip file. func writeVCSStatus(zw *zip.Writer, vcsStatus string) error { w, err := zw.Create(vcsStatusFile) if err != nil { return errgo.Mask(err) } if _, err := w.Write([]byte(vcsStatus)); err != nil { return errgo.Mask(err) } return nil } const defaultCharmStoreRepo = "gopkg.in/juju/charmstore.v5-unstable" // versionSpec specifies a version of the charm store to run // and a function that will apply some updates to that // version. type versionSpec struct { version string // package holds the Go package containing the // charmd command. If empty, this defaults to // pkg string // update is called to apply updates after running charmd. update func(db *mgo.Database, csv *charmStoreVersion) error } var bogusPublicKey bakery.PublicKey // runVersion runs the charm store at the given version // and applies the associated updates. func runMigrationVersion(db *mgo.Database, vc versionSpec) error { if vc.pkg == "" { vc.pkg = defaultCharmStoreRepo } csv, err := runCharmStoreVersion(vc.pkg, vc.version, &config.Config{ MongoURL: jujutesting.MgoServer.Addr(), AuthUsername: "admin", AuthPassword: "password", APIAddr: fmt.Sprintf("localhost:%d", jujutesting.FindTCPPort()), MaxMgoSessions: 10, IdentityLocation: "https://api.jujucharms.com/identity", IdentityPublicKey: &bogusPublicKey, }) if err != nil { return errgo.Mask(err) } defer csv.Close() if vc.update == nil { return nil } if err := vc.update(db, csv); err != nil { return errgo.Notef(err, "cannot run update") } return nil } // collectionData holds all the dumped data from a collection. type collectionData struct { // name holds the name of the collection. name string // data holds all the records from the collection as // a sequence of raw BSON records. data []byte } // dumpDB returns dumped data for all the non-system // collections in the database. func dumpDB(db *mgo.Database) ([]collectionData, error) { collections, err := db.CollectionNames() if err != nil { return nil, errgo.Mask(err) } sort.Strings(collections) var dumped []collectionData for _, c := range collections { if strings.HasPrefix(c, "system.") { continue } data, err := dumpCollection(db.C(c)) if err != nil { return nil, errgo.Notef(err, "cannot dump %q: %v", c) } dumped = append(dumped, collectionData{ name: c, data: data, }) } return dumped, nil } // dumpCollection returns dumped data from a collection. func dumpCollection(c *mgo.Collection) ([]byte, error) { var buf bytes.Buffer iter := c.Find(nil).Iter() var item bson.Raw for iter.Next(&item) { if item.Kind != 3 { return nil, errgo.Newf("unexpected item kind in collection %v", item.Kind) } buf.Write(item.Data) } if err := iter.Err(); err != nil { return nil, errgo.Mask(err) } return buf.Bytes(), nil } // restoreDB restores all the given collections into the database. func restoreDB(db *mgo.Database, dump []collectionData) error { if err := db.DropDatabase(); err != nil { return errgo.Notef(err, "cannot drop database %v", db.Name) } for _, cd := range dump { if err := restoreCollection(db.C(cd.name), cd.data); err != nil { return errgo.Mask(err) } } return nil } // restoreCollection restores all the given data (in raw BSON format) // into the given collection, dropping it first. func restoreCollection(c *mgo.Collection, data []byte) error { if len(data) == 0 { return c.Create(&mgo.CollectionInfo{}) } for len(data) > 0 { doc, rest := nextBSONDoc(data) data = rest if err := c.Insert(doc); err != nil { return errgo.Mask(err) } } return nil } // nextBSONDoc returns the next BSON document from // the given data, and the data following it. func nextBSONDoc(data []byte) (bson.Raw, []byte) { if len(data) < 4 { panic("truncated record") } n := binary.LittleEndian.Uint32(data) return bson.Raw{ Kind: 3, Data: data[0:n], }, data[n:] } // charmStoreVersion represents a specific checked-out // version of the charm store code and a running version // of its associated charmd command. type charmStoreVersion struct { tomb tomb.Tomb // rootDir holds the root of the GOPATH directory // holding all the charmstore source. // This is copied from the GOPATH directory // that the charmstore tests are being run in. rootDir string // csAddr holds the address that can be used to // dial the running charmd. csAddr string // runningCmd refers to the running charmd, so that // it can be killed. runningCmd *exec.Cmd } // runCharmStoreVersion runs the given charm store version // from the given repository Go path and starting it with // the given configuration. func runCharmStoreVersion(csRepo, version string, cfg *config.Config) (_ *charmStoreVersion, err error) { dir, err := ioutil.TempDir("", "charmstore-test") if err != nil { return nil, errgo.Mask(err) } defer func() { if err != nil { os.RemoveAll(dir) } }() csv := &charmStoreVersion{ rootDir: dir, csAddr: cfg.APIAddr, } if err := csv.copyRepo(csRepo); err != nil { return nil, errgo.Mask(err) } destPkgDir := filepath.Join(csv.srcDir(), filepath.FromSlash(csRepo)) // Discard any changes made in the local repo. if err := csv.runCmd(destPkgDir, "git", "reset", "--hard", "HEAD"); err != nil { return nil, errgo.Mask(err) } if err := csv.runCmd(destPkgDir, "git", "checkout", version); err != nil { return nil, errgo.Mask(err) } depFile := filepath.Join(destPkgDir, "dependencies.tsv") if err := csv.copyDeps(depFile); err != nil { return nil, errgo.Mask(err) } if err := csv.runCmd(destPkgDir, "godeps", "-force-clean", "-u", depFile); err != nil { return nil, errgo.Mask(err) } if err := csv.runCmd(destPkgDir, "go", "install", path.Join(csRepo, "/cmd/charmd")); err != nil { return nil, errgo.Mask(err) } if err := csv.startCS(cfg); err != nil { return nil, errgo.Mask(err) } return csv, nil } // srvDir returns the package root of the charm store source. func (csv *charmStoreVersion) srcDir() string { return filepath.Join(csv.rootDir, "src") } // Close kills the charmd and removes all its associated files. func (csv *charmStoreVersion) Close() error { csv.Kill() if err := csv.Wait(); err != nil { logger.Infof("warning: error closing down server: %#v", err) } return csv.remove() } // remove removes all the files associated with csv. func (csv *charmStoreVersion) remove() error { return os.RemoveAll(csv.rootDir) } // uploadSpec specifies a entity to be uploaded through // the API. type uploadSpec struct { // usePost specifies that POST should be used rather than PUT. usePost bool // entity holds the entity to be uploaded. entity ArchiverTo // id holds the charm id to be uploaded to. id string // promulgatedId holds the promulgated id to be used, // valid only when usePost is false. promulgatedId string } // Upload uploads all the given entities to the charm store, // using the given API version. func (csv *charmStoreVersion) Upload(apiVersion string, specs []uploadSpec) error { for _, spec := range specs { if spec.usePost { if err := csv.uploadWithPost(apiVersion, spec.entity, spec.id); err != nil { return errgo.Mask(err) } } else { if err := csv.uploadWithPut(apiVersion, spec.entity, spec.id, spec.promulgatedId); err != nil { return errgo.Mask(err) } } } return nil } func (csv *charmStoreVersion) uploadWithPost(apiVersion string, entity ArchiverTo, url string) error { var buf bytes.Buffer if err := entity.ArchiveTo(&buf); err != nil { return errgo.Mask(err) } hash := blobstore.NewHash() hash.Write(buf.Bytes()) logger.Infof("archive %d bytes", len(buf.Bytes())) req, err := http.NewRequest("POST", fmt.Sprintf("/%s/%s/archive?hash=%x", apiVersion, url, hash.Sum(nil)), &buf) if err != nil { return errgo.Mask(err) } req.Header.Set("Content-Type", "application/zip") resp, err := csv.DoRequest(req) if err != nil { return errgo.Mask(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { body, _ := ioutil.ReadAll(resp.Body) return errgo.Newf("unexpected response to POST %q: %v (body %q)", req.URL, resp.Status, body) } return nil } func (csv *charmStoreVersion) uploadWithPut(apiVersion string, entity ArchiverTo, url, promulgatedURL string) error { var buf bytes.Buffer if err := entity.ArchiveTo(&buf); err != nil { return errgo.Mask(err) } promulgatedParam := "" if promulgatedURL != "" { promulgatedParam = fmt.Sprintf("&promulgated=%s", promulgatedURL) } hash := blobstore.NewHash() hash.Write(buf.Bytes()) logger.Infof("archive %d bytes", len(buf.Bytes())) req, err := http.NewRequest("PUT", fmt.Sprintf("/%s/%s/archive?hash=%x%s", apiVersion, url, hash.Sum(nil), promulgatedParam), &buf) if err != nil { return errgo.Mask(err) } req.Header.Set("Content-Type", "application/zip") resp, err := csv.DoRequest(req) if err != nil { return errgo.Mask(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { body, _ := ioutil.ReadAll(resp.Body) return errgo.Newf("unexpected response to PUT %q: %v (body %q)", req.URL, resp.Status, body) } return nil } // Put makes a PUT request containing the given body, JSON encoded, to the API. // The urlPath parameter should contain only the URL path, not the host or scheme. func (csv *charmStoreVersion) Put(urlPath string, body interface{}) error { data, err := json.Marshal(body) if err != nil { return errgo.Mask(err) } req, err := http.NewRequest("PUT", urlPath, bytes.NewReader(data)) if err != nil { return errgo.Mask(err) } req.Header.Set("Content-Type", "application/json") resp, err := csv.DoRequest(req) if err != nil { return errgo.Mask(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { body, _ := ioutil.ReadAll(resp.Body) return errgo.Newf("unexpected response to PUT %q: %v (body %q)", req.URL, resp.Status, body) } return nil } // DoRequest sends the given HTTP request to the charm store server. func (csv *charmStoreVersion) DoRequest(req *http.Request) (*http.Response, error) { req.SetBasicAuth("admin", "password") req.URL.Host = csv.csAddr req.URL.Scheme = "http" return http.DefaultClient.Do(req) } // waitUntilServerIsUp waits until the charmstore server is up. // It returns an error if it has to wait longer than the given timeout. func (csv *charmStoreVersion) waitUntilServerIsUp(timeout time.Duration) error { endt := time.Now().Add(timeout) for { req, err := http.NewRequest("GET", "/", nil) if err != nil { return errgo.Mask(err) } resp, err := csv.DoRequest(req) if err == nil { resp.Body.Close() return nil } if time.Now().After(endt) { return errgo.Notef(err, "timed out waiting for server to come up") } time.Sleep(100 * time.Millisecond) } } // startCS starts the charmd process running. func (csv *charmStoreVersion) startCS(cfg *config.Config) error { data, err := yaml.Marshal(cfg) if err != nil { return errgo.Mask(err) } cfgPath := filepath.Join(csv.rootDir, "csconfig.yaml") if err := ioutil.WriteFile(cfgPath, data, 0666); err != nil { return errgo.Mask(err) } cmd := exec.Command(filepath.Join(csv.rootDir, "bin", "charmd"), "--logging-config=INFO", cfgPath) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr cmd.Dir = csv.rootDir if err := cmd.Start(); err != nil { return errgo.Mask(err) } csv.runningCmd = cmd csv.tomb.Go(func() error { return errgo.Mask(cmd.Wait()) }) if err := csv.waitUntilServerIsUp(10 * time.Second); err != nil { return errgo.Mask(err) } return nil } // Kill kills the charmstore server. func (csv *charmStoreVersion) Kill() { csv.runningCmd.Process.Kill() } // Wait waits for the charmstore server to exit. func (csv *charmStoreVersion) Wait() error { return csv.tomb.Wait() } // runCmd runs the given command in the given current // working directory. func (csv *charmStoreVersion) runCmd(cwd string, c string, arg ...string) error { logger.Infof("cd %v; %v %v", cwd, c, strings.Join(arg, " ")) cmd := exec.Command(c, arg...) cmd.Env = envWithVars(map[string]string{ "GOPATH": csv.rootDir, }) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr cmd.Dir = cwd if err := cmd.Run(); err != nil { return errgo.Notef(err, "failed to run %v %v", c, arg) } return nil } // envWithVars returns the OS environment variables // with the specified variables changed to their associated // values. func envWithVars(vars map[string]string) []string { env := os.Environ() for i, v := range env { j := strings.Index(v, "=") if j == -1 { continue } name := v[0:j] if val, ok := vars[name]; ok { env[i] = name + "=" + val delete(vars, name) } } for name, val := range vars { env = append(env, name+"="+val) } return env } // copyDeps copies all the dependencies found in the godeps // file depFile from the local version into csv.rootDir. func (csv *charmStoreVersion) copyDeps(depFile string) error { f, err := os.Open(depFile) if err != nil { return errgo.Mask(err) } defer f.Close() for scan := bufio.NewScanner(f); scan.Scan(); { line := scan.Text() tabIndex := strings.Index(line, "\t") if tabIndex == -1 { return errgo.Newf("no tab found in dependencies line %q", line) } pkgPath := line[0:tabIndex] if err := csv.copyRepo(pkgPath); err != nil { return errgo.Mask(err) } } return nil } // copyRepo copies all the files inside the given importPath // from their local version into csv.rootDir. func (csv *charmStoreVersion) copyRepo(importPath string) error { pkg, err := build.Import(importPath, ".", build.FindOnly) if pkg.Dir == "" { return errgo.Mask(err) } destDir := filepath.Join(csv.srcDir(), filepath.FromSlash(pkg.ImportPath)) if err := os.MkdirAll(filepath.Dir(destDir), 0777); err != nil { return errgo.Mask(err) } if err := fs.Copy(pkg.Dir, destDir); err != nil { return errgo.Mask(err) } return nil } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/debug.go0000664000175000017500000001571612672604603027006 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" import ( "bytes" "encoding/json" "fmt" "math/rand" "net/http" "net/http/httptest" "sort" "strings" "time" "github.com/juju/utils" "gopkg.in/errgo.v1" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/mgo.v2" "gopkg.in/juju/charmstore.v5-unstable/internal/router" appver "gopkg.in/juju/charmstore.v5-unstable/version" ) // GET /debug/info . func serveDebugInfo(http.Header, *http.Request) (interface{}, error) { return appver.VersionInfo, nil } // GET /debug/check. func debugCheck(checks map[string]func() error) http.Handler { return router.HandleJSON(func(http.Header, *http.Request) (interface{}, error) { n := len(checks) type result struct { name string err error } c := make(chan result) for name, check := range checks { name, check := name, check go func() { c <- result{name: name, err: check()} }() } results := make(map[string]string, n) var failed bool for ; n > 0; n-- { res := <-c if res.err == nil { results[res.name] = "OK" } else { failed = true results[res.name] = res.err.Error() } } if failed { keys := make([]string, 0, len(results)) for k := range results { keys = append(keys, k) } sort.Strings(keys) msgs := make([]string, len(results)) for i, k := range keys { msgs[i] = fmt.Sprintf("[%s: %s]", k, results[k]) } return nil, errgo.Newf("check failure: %s", strings.Join(msgs, " ")) } return results, nil }) } func checkDB(db *mgo.Database) func() error { return func() error { s := db.Session.Copy() s.SetSyncTimeout(500 * time.Millisecond) defer s.Close() return s.Ping() } } func checkES(si *SearchIndex) func() error { if si == nil || si.Database == nil { return func() error { return nil } } return func() error { _, err := si.Health() return err } } // GET /debug/fullcheck func debugFullCheck(hnd http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { code := http.StatusInternalServerError resp := new(bytes.Buffer) defer func() { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.WriteHeader(code) resp.WriteTo(w) }() fmt.Fprintln(resp, "Testing v4...") // test search fmt.Fprintln(resp, "performing search...") var sr params.SearchResponse if err := get(hnd, "/v4/search?limit=2000", &sr); err != nil { fmt.Fprintf(resp, "ERROR: search failed %s.\n", err) return } if len(sr.Results) < 1 { fmt.Fprintln(resp, "ERROR: no search results found.") return } fmt.Fprintf(resp, "%d results found.\n", len(sr.Results)) // pick random charm id := sr.Results[rand.Intn(len(sr.Results))].Id fmt.Fprintf(resp, "using %s.\n", id) // test content fmt.Fprintln(resp, "reading manifest...") url := "/v4/" + id.Path() + "/meta/manifest" fmt.Fprintln(resp, url) var files []params.ManifestFile if err := get(hnd, url, &files); err != nil { fmt.Fprintf(resp, "ERROR: cannot retrieve manifest: %s.\n", err) return } if len(files) == 0 { fmt.Fprintln(resp, "ERROR: manifest empty.") return } fmt.Fprintf(resp, "%d files found.\n", len(files)) // Choose a file to access expectFile := "metadata.yaml" if id.Series == "bundle" { expectFile = "bundle.yaml" } var file params.ManifestFile // default to metadata.yaml for _, f := range files { if f.Name == expectFile { file = f break } } // find a random file for i := 0; i < 5; i++ { f := files[rand.Intn(len(files))] if f.Size <= 16*1024 { file = f break } } fmt.Fprintf(resp, "using %s.\n", file.Name) // read the file fmt.Fprintln(resp, "reading file...") url = "/v4/" + id.Path() + "/archive/" + file.Name fmt.Fprintln(resp, url) var buf []byte if err := get(hnd, url, &buf); err != nil { fmt.Fprintf(resp, "ERROR: cannot retrieve file: %s.\n", err) return } if int64(len(buf)) != file.Size { fmt.Fprintf(resp, "ERROR: incorrect file size, expected: %d, received %d.\n", file.Size, len(buf)) return } fmt.Fprintf(resp, "%d bytes received.\n", len(buf)) // check if the charm is promulgated fmt.Fprintln(resp, "checking promulgated...") url = "/v4/" + id.Path() + "/meta/promulgated" fmt.Fprintln(resp, url) var promulgated params.PromulgatedResponse if err := get(hnd, url, &promulgated); err != nil { fmt.Fprintf(resp, "ERROR: cannot retrieve promulgated: %s.\n", err) return } if promulgated.Promulgated != (id.User == "") { fmt.Fprintf(resp, "ERROR: incorrect promulgated response, expected: %v, received %v.\n", (id.User == ""), promulgated.Promulgated) return } fmt.Fprintf(resp, "promulgated: %v.\n", promulgated.Promulgated) // check expand-id fmt.Fprintln(resp, "checking expand-id...") url = "/v4/" + id.Path() + "/expand-id" fmt.Fprintln(resp, url) var expanded []params.ExpandedId if err := get(hnd, url, &expanded); err != nil { fmt.Fprintf(resp, "ERROR: cannot expand-id: %s.\n", err) return } if len(expanded) == 0 { fmt.Fprintln(resp, "ERROR: expand-id returned 0 results") return } fmt.Fprintf(resp, "%d ids found.\n", len(expanded)) code = http.StatusOK }) } func newServiceDebugHandler(p *Pool, c ServerParams, hnd http.Handler) http.Handler { mux := router.NewServeMux() mux.Handle("/info", router.HandleJSON(serveDebugInfo)) mux.Handle("/check", debugCheck(map[string]func() error{ "mongodb": checkDB(p.db.Database), "elasticsearch": checkES(p.es), })) mux.Handle("/fullcheck", authorized(c, debugFullCheck(hnd))) return mux } func authorized(c ServerParams, h http.Handler) http.Handler { return router.HandleErrors(func(w http.ResponseWriter, r *http.Request) error { u, p, err := utils.ParseBasicAuthHeader(r.Header) if err != nil { return errgo.WithCausef(err, params.ErrUnauthorized, "") } if u != c.AuthUsername || p != c.AuthPassword { return errgo.WithCausef(nil, params.ErrUnauthorized, "username or password mismatch") } h.ServeHTTP(w, r) return nil }) } func get(h http.Handler, url string, body interface{}) error { req, err := http.NewRequest("GET", url, nil) if err != nil { return errgo.Notef(err, "cannot create request") } w := httptest.NewRecorder() h.ServeHTTP(w, req) if w.Code != http.StatusOK { if w.HeaderMap.Get("Content-Type") != "application/json" { return errgo.Newf("bad status %d", w.Code) } var e params.Error if err := json.Unmarshal(w.Body.Bytes(), &e); err != nil { return errgo.Notef(err, "cannot decode error") } return &e } if body == nil { return nil } if bytes, ok := body.(*[]byte); ok { *bytes = w.Body.Bytes() return nil } if w.HeaderMap.Get("Content-Type") == "application/json" { if err := json.Unmarshal(w.Body.Bytes(), body); err != nil { return errgo.Notef(err, "cannot decode body") } return nil } return errgo.Newf("cannot decode body") } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrations_test.go0000664000175000017500000001246012672604603031124 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" import ( "net/http" jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/mgo.v2" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" ) type migrationsSuite struct { jujutesting.IsolatedMgoSuite db StoreDatabase executed []mongodoc.MigrationName } var _ = gc.Suite(&migrationsSuite{}) func (s *migrationsSuite) SetUpTest(c *gc.C) { s.IsolatedMgoSuite.SetUpTest(c) s.db = StoreDatabase{s.Session.DB("migration-testing")} s.executed = nil } func (s *migrationsSuite) newServer(c *gc.C) error { apiHandler := func(p *Pool, config ServerParams, _ string) HTTPCloseHandler { return nopCloseHandler{http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {})} } srv, err := NewServer(s.db.Database, nil, serverParams, map[string]NewAPIHandlerFunc{ "version1": apiHandler, }) if err == nil { srv.Close() } return err } // patchMigrations patches the charm store migration list with the given migrations. func (s *migrationsSuite) patchMigrations(c *gc.C, ms []migration) { s.PatchValue(&migrations, ms) } // makeMigrations generates default migrations using the given names, and then // patches the charm store migration list with the generated ones. func (s *migrationsSuite) makeMigrations(c *gc.C, names ...mongodoc.MigrationName) { ms := make([]migration, len(names)) for i, name := range names { name := name ms[i] = migration{ name: name, migrate: func(StoreDatabase) error { s.executed = append(s.executed, name) return nil }, } } s.patchMigrations(c, ms) } func (s *migrationsSuite) TestMigrate(c *gc.C) { // Create migrations. names := []mongodoc.MigrationName{"migr-1", "migr-2"} s.makeMigrations(c, names...) // Start the server. err := s.newServer(c) c.Assert(err, gc.IsNil) // The two migrations have been correctly executed in order. c.Assert(s.executed, jc.DeepEquals, names) // The migration document in the db reports that the execution is done. s.checkExecuted(c, names...) // Restart the server again and check migrations this time are not run. err = s.newServer(c) c.Assert(err, gc.IsNil) c.Assert(s.executed, jc.DeepEquals, names) s.checkExecuted(c, names...) } func (s *migrationsSuite) TestMigrateNoMigrations(c *gc.C) { // Empty the list of migrations. s.makeMigrations(c) // Start the server. err := s.newServer(c) c.Assert(err, gc.IsNil) // No migrations were executed. c.Assert(s.executed, gc.HasLen, 0) s.checkExecuted(c) } func (s *migrationsSuite) TestMigrateNewMigration(c *gc.C) { // Simulate two migrations were already run. err := setExecuted(s.db, "migr-1") c.Assert(err, gc.IsNil) err = setExecuted(s.db, "migr-2") c.Assert(err, gc.IsNil) // Create migrations. s.makeMigrations(c, "migr-1", "migr-2", "migr-3") // Start the server. err = s.newServer(c) c.Assert(err, gc.IsNil) // Only one migration has been executed. c.Assert(s.executed, jc.DeepEquals, []mongodoc.MigrationName{"migr-3"}) // The migration document in the db reports that the execution is done. s.checkExecuted(c, "migr-1", "migr-2", "migr-3") } func (s *migrationsSuite) TestMigrateErrorUnknownMigration(c *gc.C) { // Simulate that a migration was already run. err := setExecuted(s.db, "migr-1") c.Assert(err, gc.IsNil) // Create migrations, without including the already executed one. s.makeMigrations(c, "migr-2", "migr-3") // Start the server. err = s.newServer(c) c.Assert(err, gc.ErrorMatches, `database migration failed: found unknown migration "migr-1"; running old charm store code on newer charm store database\?`) // No new migrations were executed. c.Assert(s.executed, gc.HasLen, 0) s.checkExecuted(c, "migr-1") } func (s *migrationsSuite) TestMigrateErrorExecutingMigration(c *gc.C) { ms := []migration{{ name: "migr-1", migrate: func(StoreDatabase) error { return nil }, }, { name: "migr-2", migrate: func(StoreDatabase) error { return errgo.New("bad wolf") }, }, { name: "migr-3", migrate: func(StoreDatabase) error { return nil }, }} s.patchMigrations(c, ms) // Start the server. err := s.newServer(c) c.Assert(err, gc.ErrorMatches, "database migration failed: error executing migration: migr-2: bad wolf") // Only one migration has been executed. s.checkExecuted(c, "migr-1") } func (s *migrationsSuite) TestMigrateMigrationNames(c *gc.C) { names := make(map[mongodoc.MigrationName]bool, len(migrations)) for _, m := range migrations { c.Assert(names[m.name], jc.IsFalse, gc.Commentf("multiple migrations named %q", m.name)) names[m.name] = true } } func (s *migrationsSuite) TestMigrateMigrationList(c *gc.C) { // When adding migration, update the list below, but never remove existing // migrations. existing := []string{} for i, name := range existing { m := migrations[i] c.Assert(m.name, gc.Equals, name) } } func (s *migrationsSuite) checkExecuted(c *gc.C, expected ...mongodoc.MigrationName) { var obtained []mongodoc.MigrationName var doc mongodoc.Migration if err := s.db.Migrations().Find(nil).One(&doc); err != mgo.ErrNotFound { c.Assert(err, gc.IsNil) obtained = doc.Executed } c.Assert(obtained, jc.SameContents, expected) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/store.go0000664000175000017500000011633412672604603027052 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" import ( "encoding/json" "fmt" "io" "sync" "time" "github.com/juju/loggo" "github.com/juju/utils/parallel" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/mgostorage" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/natefinch/lumberjack.v2" "gopkg.in/juju/charmstore.v5-unstable/audit" "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" "gopkg.in/juju/charmstore.v5-unstable/internal/cache" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" "gopkg.in/juju/charmstore.v5-unstable/internal/router" ) var logger = loggo.GetLogger("charmstore.internal.charmstore") var ( errClosed = errgo.New("charm store has been closed") ErrTooManySessions = errgo.New("too many mongo sessions in use") ) // Pool holds a connection to the underlying charm and blob // data stores. Calling its Store method returns a new Store // from the pool that can be used to process short-lived requests // to access and modify the store. type Pool struct { db StoreDatabase es *SearchIndex bakeryParams *bakery.NewServiceParams stats stats run *parallel.Run // statsCache holds a cache of AggregatedCounts // values, keyed by entity id. When the id has no // revision, the counts apply to all revisions of the // entity. statsCache *cache.Cache config ServerParams // auditEncoder encodes messages to auditLogger. auditEncoder *json.Encoder auditLogger *lumberjack.Logger // reqStoreC is a buffered channel that contains allocated // stores that are not currently in use. reqStoreC chan *Store // mu guards the fields following it. mu sync.Mutex // storeCount holds the number of stores currently allocated. storeCount int // closed holds whether the handler has been closed. closed bool } // reqStoreCacheSize holds the maximum number of store // instances to keep around cached when there is no // limit specified by config.MaxMgoSessions. const reqStoreCacheSize = 50 // maxAsyncGoroutines holds the maximum number // of goroutines that will be started by Store.Go. const maxAsyncGoroutines = 50 // NewPool returns a Pool that uses the given database // and search index. If bakeryParams is not nil, // the Bakery field in the resulting Store will be set // to a new Service that stores macaroons in mongo. // // The pool must be closed (with the Close method) // after use. func NewPool(db *mgo.Database, si *SearchIndex, bakeryParams *bakery.NewServiceParams, config ServerParams) (*Pool, error) { if config.StatsCacheMaxAge == 0 { config.StatsCacheMaxAge = time.Hour } p := &Pool{ db: StoreDatabase{db}.copy(), es: si, statsCache: cache.New(config.StatsCacheMaxAge), config: config, run: parallel.NewRun(maxAsyncGoroutines), auditLogger: config.AuditLogger, } if config.MaxMgoSessions > 0 { p.reqStoreC = make(chan *Store, config.MaxMgoSessions) } else { p.reqStoreC = make(chan *Store, reqStoreCacheSize) } if bakeryParams != nil { bp := *bakeryParams // Fill out any bakery parameters explicitly here so // that we use the same values when each Store is // created. We don't fill out bp.Store field though, as // that needs to hold the correct mongo session which we // only know when the Store is created from the Pool. if bp.Key == nil { var err error bp.Key, err = bakery.GenerateKey() if err != nil { return nil, errgo.Notef(err, "cannot generate bakery key") } } if bp.Locator == nil { bp.Locator = bakery.PublicKeyLocatorMap(nil) } p.bakeryParams = &bp } if config.AuditLogger != nil { p.auditLogger = config.AuditLogger p.auditEncoder = json.NewEncoder(p.auditLogger) } store := p.Store() defer store.Close() if err := store.ensureIndexes(); err != nil { return nil, errgo.Notef(err, "cannot ensure indexes") } if err := store.ES.ensureIndexes(false); err != nil { return nil, errgo.Notef(err, "cannot ensure elasticsearch indexes") } return p, nil } // Close closes the pool. This must be called when the pool // is finished with. func (p *Pool) Close() { p.mu.Lock() if p.closed { p.mu.Unlock() return } p.closed = true p.mu.Unlock() p.run.Wait() p.db.Close() // Close all cached stores. Any used by // outstanding requests will be closed when the // requests complete. loop: for { select { case s := <-p.reqStoreC: s.DB.Close() default: break loop } } if p.auditLogger != nil { p.auditLogger.Close() } } // RequestStore returns a store for a client request. It returns // an error with a ErrTooManySessions cause // if too many mongo sessions are in use. func (p *Pool) RequestStore() (*Store, error) { store, err := p.requestStoreNB(false) if store != nil { return store, nil } if errgo.Cause(err) != ErrTooManySessions { return nil, errgo.Mask(err) } // No handlers currently available - we've exceeded our concurrency limit // so wait for a handler to become available. select { case store := <-p.reqStoreC: return store, nil case <-time.After(p.config.HTTPRequestWaitDuration): return nil, errgo.Mask(err, errgo.Is(ErrTooManySessions)) } } // Store returns a Store that can be used to access the database. // // It must be closed (with the Close method) after use. func (p *Pool) Store() *Store { store, _ := p.requestStoreNB(true) return store } // requestStoreNB is like RequestStore except that it // does not block when a Store is not immediately // available, in which case it returns an error with // a ErrTooManySessions cause. // // If always is true, it will never return an error. func (p *Pool) requestStoreNB(always bool) (*Store, error) { p.mu.Lock() defer p.mu.Unlock() if p.closed && !always { return nil, errClosed } select { case store := <-p.reqStoreC: return store, nil default: } if !always && p.config.MaxMgoSessions > 0 && p.storeCount >= p.config.MaxMgoSessions { return nil, ErrTooManySessions } p.storeCount++ db := p.db.copy() store := &Store{ DB: db, BlobStore: blobstore.New(db.Database, "entitystore"), ES: p.es, stats: &p.stats, pool: p, } if p.bakeryParams != nil { store.Bakery = newBakery(db, *p.bakeryParams) } return store, nil } func newBakery(db StoreDatabase, bp bakery.NewServiceParams) *bakery.Service { macStore, err := mgostorage.New(db.Macaroons()) if err != nil { // Should never happen. panic(errgo.Newf("unexpected error from mgostorage.New: %v", err)) } bp.Store = macStore bsvc, err := bakery.NewService(bp) if err != nil { // This should never happen because the only reason bakery.NewService // can fail is if it can't generate a key, and we have already made // sure that the key is generated. panic(errgo.Notef(err, "cannot make bakery service")) } return bsvc } // Store holds a connection to the underlying charm and blob // data stores that is appropriate for short term use. type Store struct { DB StoreDatabase BlobStore *blobstore.Store ES *SearchIndex Bakery *bakery.Service stats *stats pool *Pool } // Copy returns a new store with a lifetime // independent of s. Use this method if you // need to use a store in an independent goroutine. // // It must be closed (with the Close method) after use. func (s *Store) Copy() *Store { s1 := *s s1.DB = s.DB.clone() s1.BlobStore = blobstore.New(s1.DB.Database, "entitystore") if s.Bakery != nil { s1.Bakery = newBakery(s1.DB, *s.pool.bakeryParams) } s.pool.mu.Lock() s.pool.storeCount++ s.pool.mu.Unlock() return &s1 } // Close closes the store instance. func (s *Store) Close() { // Refresh the mongodb session so that the // next time the Store is used, it will acquire // a new connection from the pool as if the // session had been copied. s.DB.Session.Refresh() s.pool.mu.Lock() defer s.pool.mu.Unlock() if !s.pool.closed && (s.pool.config.MaxMgoSessions == 0 || s.pool.storeCount <= s.pool.config.MaxMgoSessions) { // The pool isn't overloaded, so put the store // back. Note that the default case should // never happen when MaxMgoSessions > 0. select { case s.pool.reqStoreC <- s: return default: // No space for handler - this may happen when // the number of actual sessions has exceeded // the requested maximum (for example when // a request already at the limit uses another session, // or when we are imposing no limit). } } s.DB.Close() s.pool.storeCount-- } // SetReconnectTimeout sets the length of time that // mongo requests will block waiting to reconnect // to a disconnected mongo server. If it is zero, // requests may block forever. func (s *Store) SetReconnectTimeout(d time.Duration) { s.DB.Session.SetSyncTimeout(d) } // Go runs the given function in a new goroutine, // passing it a copy of s, which will be closed // after the function returns. func (s *Store) Go(f func(*Store)) { s = s.Copy() s.pool.run.Do(func() error { defer s.Close() f(s) return nil }) } // Pool returns the pool that the store originally // came from. func (s *Store) Pool() *Pool { return s.pool } func (s *Store) ensureIndexes() error { indexes := []struct { c *mgo.Collection i mgo.Index }{{ s.DB.StatCounters(), mgo.Index{Key: []string{"k", "t"}, Unique: true}, }, { s.DB.StatTokens(), mgo.Index{Key: []string{"t"}, Unique: true}, }, { s.DB.Entities(), mgo.Index{Key: []string{"baseurl"}}, }, { s.DB.Entities(), mgo.Index{Key: []string{"uploadtime"}}, }, { s.DB.Entities(), mgo.Index{Key: []string{"promulgated-url"}, Unique: true, Sparse: true}, }, { s.DB.Logs(), mgo.Index{Key: []string{"urls"}}, }, { s.DB.Entities(), mgo.Index{Key: []string{"user"}}, }, { s.DB.Entities(), mgo.Index{Key: []string{"user", "name"}}, }, { s.DB.Entities(), mgo.Index{Key: []string{"user", "name", "series"}}, }, { s.DB.Entities(), mgo.Index{Key: []string{"series"}}, }, { s.DB.Entities(), mgo.Index{Key: []string{"blobhash256"}}, }, { s.DB.Entities(), mgo.Index{Key: []string{"_id", "name"}}, }, { s.DB.Entities(), mgo.Index{Key: []string{"charmrequiredinterfaces"}}, }, { s.DB.Entities(), mgo.Index{Key: []string{"charmprovidedinterfaces"}}, }, { s.DB.Entities(), mgo.Index{Key: []string{"bundlecharms"}}, }, { s.DB.Entities(), mgo.Index{Key: []string{"name", "development", "-promulgated-revision", "-supportedseries"}}, }, { s.DB.Entities(), mgo.Index{Key: []string{"name", "development", "user", "-revision", "-supportedseries"}}, }, { s.DB.BaseEntities(), mgo.Index{Key: []string{"name"}}, }, { // TODO this index should be created by the mgo gridfs code. s.DB.C("entitystore.files"), mgo.Index{Key: []string{"filename"}}, }} for _, idx := range indexes { err := idx.c.EnsureIndex(idx.i) if err != nil { return errgo.Notef(err, "cannot ensure index with keys %v on collection %s", idx.i, idx.c.Name) } } return nil } // AddAudit adds the given entry to the audit log. func (s *Store) AddAudit(entry audit.Entry) { s.addAuditAtTime(entry, time.Now()) } func (s *Store) addAuditAtTime(entry audit.Entry, t time.Time) { if s.pool.auditEncoder == nil { return } entry.Time = t err := s.pool.auditEncoder.Encode(entry) if err != nil { logger.Errorf("Cannot write audit log entry: %v", err) } } // FindEntity finds the entity in the store with the given URL, which // must be fully qualified. If the given URL has no user then it is // assumed to be a promulgated entity. If fields is not nil, only its // fields will be populated in the returned entities. func (s *Store) FindEntity(url *router.ResolvedURL, fields map[string]int) (*mongodoc.Entity, error) { q := s.DB.Entities().Find(bson.D{{"_id", &url.URL}}) if fields != nil { q = q.Select(fields) } var entity mongodoc.Entity err := q.One(&entity) if err != nil { if err == mgo.ErrNotFound { return nil, errgo.WithCausef(nil, params.ErrNotFound, "entity not found") } return nil, errgo.Mask(err) } return &entity, nil } // FindEntities finds all entities in the store matching the given URL. // If the given URL has no user then only promulgated entities will be // queried. If the given URL channel does not represent an entity under // development then only published entities will be queried. If fields // is not nil, only its fields will be populated in the returned // entities. func (s *Store) FindEntities(url *charm.URL, fields map[string]int) ([]*mongodoc.Entity, error) { query := s.EntitiesQuery(url) if fields != nil { query = query.Select(fields) } var docs []*mongodoc.Entity err := query.All(&docs) if err != nil { return nil, errgo.Notef(err, "cannot find entities matching %s", url) } return docs, nil } // FindBestEntity finds the entity that provides the preferred match to // the given URL, on the given channel. If the given URL has no user // then only promulgated entities will be queried. If fields is not nil, // only those fields will be populated in the returned entities. // // If the URL contains a revision then it is assumed to be fully formed // and refer to a single entity; the channel is ignored. // // If the URL does not contain a revision then the channel is searched // for the best match, here NoChannel will be treated as // params.StableChannel. func (s *Store) FindBestEntity(url *charm.URL, channel params.Channel, fields map[string]int) (*mongodoc.Entity, error) { if fields != nil { // Make sure we have all the fields we need to make a decision. // TODO this would be more efficient if we used bitmasks for field selection. nfields := map[string]int{ "_id": 1, "promulgated-url": 1, "promulgated-revision": 1, "series": 1, "revision": 1, "development": 1, "stable": 1, } for f := range fields { nfields[f] = 1 } fields = nfields } if url.Revision != -1 { // If the URL contains a revision, then it refers to a single entity. entity, err := s.findSingleEntity(url, fields) if errgo.Cause(err) == params.ErrNotFound { return nil, errgo.WithCausef(nil, params.ErrNotFound, "no matching charm or bundle for %s", url) } else if err != nil { return nil, errgo.Mask(err) } // If a channel was specified make sure the entity is in that channel. // This is crucial because if we don't do this, then the user could choose // to use any chosen set of ACLs against any entity. switch channel { case params.StableChannel: if !entity.Stable { return nil, errgo.WithCausef(nil, params.ErrNotFound, "%s not found in stable channel", url) } case params.DevelopmentChannel: if !entity.Development { return nil, errgo.WithCausef(nil, params.ErrNotFound, "%s not found in development channel", url) } } return entity, nil } switch channel { case params.UnpublishedChannel: return s.findUnpublishedEntity(url, fields) case params.NoChannel: channel = params.StableChannel fallthrough default: return s.findEntityInChannel(url, channel, fields) } } // findSingleEntity returns the entity referred to by URL. It is expected // that the URL refers to only one entity and is fully formed. The url may // refer to either a user-owned or promulgated charm name. func (s *Store) findSingleEntity(url *charm.URL, fields map[string]int) (*mongodoc.Entity, error) { query := s.EntitiesQuery(url) if fields != nil { query = query.Select(fields) } var entity mongodoc.Entity err := query.One(&entity) if err == nil { return &entity, nil } if err == mgo.ErrNotFound { return nil, errgo.WithCausef(err, params.ErrNotFound, "no matching charm or bundle for %s", url) } return nil, errgo.Notef(err, "cannot find entities matching %s", url) } // findEntityInChannel attempts to find an entity on the given channel. The // base entity for URL is retrieved and the series with the best match to // URL.Series is used as the resolved entity. func (s *Store) findEntityInChannel(url *charm.URL, ch params.Channel, fields map[string]int) (*mongodoc.Entity, error) { baseEntity, err := s.FindBaseEntity(url, map[string]int{ "_id": 1, "channelentities": 1, }) if errgo.Cause(err) == params.ErrNotFound { return nil, errgo.WithCausef(nil, params.ErrNotFound, "no matching charm or bundle for %s", url) } else if err != nil { return nil, errgo.Mask(err) } var entityURL *charm.URL if url.Series == "" { for _, u := range baseEntity.ChannelEntities[ch] { if entityURL == nil || seriesScore[u.Series] > seriesScore[entityURL.Series] { entityURL = u } } } else { entityURL = baseEntity.ChannelEntities[ch][url.Series] } if entityURL == nil { return nil, errgo.WithCausef(nil, params.ErrNotFound, "no matching charm or bundle for %s", url) } return s.findSingleEntity(entityURL, fields) } // findUnpublishedEntity attempts to find an entity on the unpublished // channel. This searches all entities in the store for the best match to // the URL. func (s *Store) findUnpublishedEntity(url *charm.URL, fields map[string]int) (*mongodoc.Entity, error) { entities, err := s.FindEntities(url, fields) if err != nil { return nil, errgo.Mask(err) } if len(entities) == 0 { return nil, errgo.WithCausef(nil, params.ErrNotFound, "no matching charm or bundle for %s", url) } best := entities[0] for _, e := range entities { if seriesScore[e.Series] > seriesScore[best.Series] { best = e continue } if seriesScore[e.Series] < seriesScore[best.Series] { continue } if url.User == "" { if e.PromulgatedRevision > best.PromulgatedRevision { best = e continue } } else { if e.Revision > best.Revision { best = e continue } } } return best, nil } var seriesScore = map[string]int{ "bundle": -1, "lucid": 1000, "precise": 1001, "trusty": 1002, "quantal": 1, "raring": 2, "saucy": 3, "utopic": 4, "vivid": 5, "wily": 6, // When we find a multi-series charm (no series) we // will always choose it in preference to a series-specific // charm "": 5000, } var seriesBundleOrEmpty = bson.D{{"$or", []bson.D{{{"series", "bundle"}}, {{"series", ""}}}}} // EntitiesQuery creates a mgo.Query object that can be used to find // entities matching the given URL. If the given URL has no user then // the produced query will only match promulgated entities. If the given URL // channel is not "development" then the produced query will only match // published entities. func (s *Store) EntitiesQuery(url *charm.URL) *mgo.Query { entities := s.DB.Entities() query := make(bson.D, 1, 5) query[0] = bson.DocElem{"name", url.Name} if url.User == "" { if url.Revision > -1 { query = append(query, bson.DocElem{"promulgated-revision", url.Revision}) } else { query = append(query, bson.DocElem{"promulgated-revision", bson.D{{"$gt", -1}}}) } } else { query = append(query, bson.DocElem{"user", url.User}) if url.Revision > -1 { query = append(query, bson.DocElem{"revision", url.Revision}) } } if url.Series == "" { if url.Revision > -1 { // If we're specifying a revision we must be searching // for a canonical URL, so search for a multi-series // charm or a bundle. query = append(query, seriesBundleOrEmpty...) } } else if url.Series == "bundle" { query = append(query, bson.DocElem{"series", "bundle"}) } else { query = append(query, bson.DocElem{"supportedseries", url.Series}) } return entities.Find(query) } // FindBaseEntity finds the base entity in the store using the given URL, // which can either represent a fully qualified entity or a base id. // If fields is not nil, only those fields will be populated in the // returned base entity. func (s *Store) FindBaseEntity(url *charm.URL, fields map[string]int) (*mongodoc.BaseEntity, error) { var query *mgo.Query if url.User == "" { query = s.DB.BaseEntities().Find(bson.D{{"name", url.Name}, {"promulgated", 1}}) } else { query = s.DB.BaseEntities().FindId(mongodoc.BaseURL(url)) } if fields != nil { query = query.Select(fields) } var baseEntity mongodoc.BaseEntity if err := query.One(&baseEntity); err != nil { if err == mgo.ErrNotFound { return nil, errgo.WithCausef(nil, params.ErrNotFound, "base entity not found") } return nil, errgo.Notef(err, "cannot find base entity %v", url) } return &baseEntity, nil } // FieldSelector returns a field selector that will select // the given fields, or all fields if none are specified. func FieldSelector(fields ...string) map[string]int { if len(fields) == 0 { return nil } sel := make(map[string]int, len(fields)) for _, field := range fields { sel[field] = 1 } return sel } // UpdateEntity applies the provided update to the entity described by // url. If there are no entries in update then no update is performed, // and no error is returned. func (s *Store) UpdateEntity(url *router.ResolvedURL, update bson.D) error { if len(update) == 0 { return nil } if err := s.DB.Entities().Update(bson.D{{"_id", &url.URL}}, update); err != nil { if err == mgo.ErrNotFound { return errgo.WithCausef(err, params.ErrNotFound, "cannot update %q", url) } return errgo.Notef(err, "cannot update %q", url) } return nil } // UpdateBaseEntity applies the provided update to the base entity of // url. If there are no entries in update then no update is performed, // and no error is returned. func (s *Store) UpdateBaseEntity(url *router.ResolvedURL, update bson.D) error { if len(update) == 0 { return nil } if err := s.DB.BaseEntities().Update(bson.D{{"_id", mongodoc.BaseURL(&url.URL)}}, update); err != nil { if err == mgo.ErrNotFound { return errgo.WithCausef(err, params.ErrNotFound, "cannot update base entity for %q", url) } return errgo.Notef(err, "cannot update base entity for %q", url) } return nil } // Publish assigns channels to the entity corresponding to the given URL. // An error is returned if no channels are provided. For the time being, // the only supported channels are "development" and "stable". func (s *Store) Publish(url *router.ResolvedURL, channels ...params.Channel) error { var updateSearch bool // Validate channels. actual := make([]params.Channel, 0, len(channels)) for _, c := range channels { switch c { case params.StableChannel: updateSearch = true fallthrough case params.DevelopmentChannel: actual = append(actual, c) } } numChannels := len(actual) if numChannels == 0 { return errgo.Newf("cannot update %q: no channels provided", url) } // Update the entity. update := make(bson.D, numChannels) for i, c := range actual { update[i] = bson.DocElem{string(c), true} } if err := s.UpdateEntity(url, bson.D{{"$set", update}}); err != nil { return errgo.Mask(err, errgo.Is(params.ErrNotFound)) } // Update the base entity. entity, err := s.FindEntity(url, FieldSelector("series", "supportedseries")) if err != nil { return errgo.Mask(err, errgo.Is(params.ErrNotFound)) } series := entity.SupportedSeries numSeries := len(series) if numSeries == 0 { series = []string{entity.Series} numSeries = 1 } update = make(bson.D, 0, numChannels*numSeries) for _, c := range actual { for _, s := range series { update = append(update, bson.DocElem{fmt.Sprintf("channelentities.%s.%s", c, s), entity.URL}) } } if err := s.UpdateBaseEntity(url, bson.D{{"$set", update}}); err != nil { return errgo.Mask(err) } if !updateSearch { return nil } // Add entity to ElasticSearch. if err := s.UpdateSearch(url); err != nil { return errgo.Notef(err, "cannot index %s to ElasticSearch", url) } return nil } // SetPromulgated sets whether the base entity of url is promulgated, If // promulgated is true it also unsets promulgated on any other base // entity for entities with the same name. It also calculates the next // promulgated URL for the entities owned by the new owner and sets those // entities appropriately. // // Note: This code is known to have some unfortunate (but not dangerous) // race conditions. It is possible that if one or more promulgations // happens concurrently for the same entity name then it could result in // more than one base entity being promulgated. If this happens then // uploads to either user will get promulgated names, these names will // never clash. This situation is easily remedied by setting the // promulgated user for this charm again, even to one of the ones that is // already promulgated. It can also result in the latest promulgated // revision of the charm not being one created by the promulgated user. // This will be remedied when a new charm is uploaded by the promulgated // user. As promulgation is a rare operation, it is considered that the // chances this will happen are slim. func (s *Store) SetPromulgated(url *router.ResolvedURL, promulgate bool) error { baseEntities := s.DB.BaseEntities() base := mongodoc.BaseURL(&url.URL) if !promulgate { err := baseEntities.UpdateId( base, bson.D{{"$set", bson.D{{"promulgated", mongodoc.IntBool(false)}}}}, ) if err != nil { if errgo.Cause(err) == mgo.ErrNotFound { return errgo.WithCausef(nil, params.ErrNotFound, "base entity %q not found", base) } return errgo.Notef(err, "cannot unpromulgate base entity %q", base) } if err := s.UpdateSearchBaseURL(base); err != nil { return errgo.Notef(err, "cannot update search entities for %q", base) } return nil } // Find any currently promulgated base entities for this charm name. // Under normal circumstances there should be a maximum of one of these, // but we should attempt to recover if there is an error condition. iter := baseEntities.Find( bson.D{ {"_id", bson.D{{"$ne", base}}}, {"name", base.Name}, {"promulgated", mongodoc.IntBool(true)}, }, ).Iter() defer iter.Close() var baseEntity mongodoc.BaseEntity for iter.Next(&baseEntity) { err := baseEntities.UpdateId( baseEntity.URL, bson.D{{"$set", bson.D{{"promulgated", mongodoc.IntBool(false)}}}}, ) if err != nil { return errgo.Notef(err, "cannot unpromulgate base entity %q", baseEntity.URL) } if err := s.UpdateSearchBaseURL(baseEntity.URL); err != nil { return errgo.Notef(err, "cannot update search entities for %q", baseEntity.URL) } } if err := iter.Close(); err != nil { return errgo.Notef(err, "cannot close mgo iterator") } // Set the promulgated flag on the base entity. err := s.DB.BaseEntities().UpdateId(base, bson.D{{"$set", bson.D{{"promulgated", mongodoc.IntBool(true)}}}}) if err != nil { if errgo.Cause(err) == mgo.ErrNotFound { return errgo.WithCausef(nil, params.ErrNotFound, "base entity %q not found", base) } return errgo.Notef(err, "cannot promulgate base entity %q", base) } type result struct { Series string `bson:"_id"` Revision int } // Find the latest revision in each series of entities with the promulgated base URL. var latestOwned []result err = s.DB.Entities().Pipe([]bson.D{ {{"$match", bson.D{{"baseurl", base}}}}, {{"$group", bson.D{{"_id", "$series"}, {"revision", bson.D{{"$max", "$revision"}}}}}}, }).All(&latestOwned) if err != nil { return errgo.Notef(err, "cannot find latest revision for promulgated URL") } // Find the latest revision in each series of the promulgated entitities // with the same name as the base entity. Note that this works because: // 1) promulgated URLs always have the same charm name as their // non-promulgated counterparts. // 2) bundles cannot have names that overlap with charms. // Because of 1), we are sure that selecting on the entity name will // select all entities with a matching promulgated URL name. Because of // 2) we are sure that we are only updating all charms or the single // bundle entity. latestPromulgated := make(map[string]int) iter = s.DB.Entities().Pipe([]bson.D{ {{"$match", bson.D{{"name", base.Name}}}}, {{"$group", bson.D{{"_id", "$series"}, {"revision", bson.D{{"$max", "$promulgated-revision"}}}}}}, }).Iter() var res result for iter.Next(&res) { latestPromulgated[res.Series] = res.Revision } if err := iter.Close(); err != nil { return errgo.Notef(err, "cannot close mgo iterator") } // Update the newest entity in each series with a base URL that matches the newly promulgated // base entity to have a promulgated URL, if it does not already have one. for _, r := range latestOwned { id := *base id.Series = r.Series id.Revision = r.Revision pID := id pID.User = "" pID.Revision = latestPromulgated[r.Series] + 1 err := s.DB.Entities().Update( bson.D{ {"_id", &id}, {"promulgated-revision", -1}, }, bson.D{ {"$set", bson.D{ {"promulgated-url", &pID}, {"promulgated-revision", pID.Revision}, }}, }, ) if err != nil && err != mgo.ErrNotFound { // If we get NotFound it is most likely because the latest owned revision is // already promulgated, so carry on. return errgo.Notef(err, "cannot update promulgated URLs") } } // Update the search record for the newest entity. if err := s.UpdateSearchBaseURL(base); err != nil { return errgo.Notef(err, "cannot update search entities for %q", base) } return nil } // SetPerms sets the ACL specified by which for the base entity with the // given id. The which parameter is in the form "[channel].operation", // where channel, if specified, is one of "development" or "stable" and // operation is one of "read" or "write". If which does not specify a // channel then the unpublished ACL is updated. This is only provided for // testing. func (s *Store) SetPerms(id *charm.URL, which string, acl ...string) error { return s.DB.BaseEntities().UpdateId(mongodoc.BaseURL(id), bson.D{{"$set", bson.D{{"channelacls." + which, acl}}, }}) } // MatchingInterfacesQuery returns a mongo query // that will find any charms that require any interfaces // in the required slice or provide any interfaces in the // provided slice. func (s *Store) MatchingInterfacesQuery(required, provided []string) *mgo.Query { return s.DB.Entities().Find(bson.D{{ "$or", []bson.D{{{ "charmrequiredinterfaces", bson.D{{ "$elemMatch", bson.D{{ "$in", required, }}, }}, }}, {{ "charmprovidedinterfaces", bson.D{{ "$elemMatch", bson.D{{ "$in", provided, }}, }}, }}}, }}) } // AddLog adds a log message to the database. func (s *Store) AddLog(data *json.RawMessage, logLevel mongodoc.LogLevel, logType mongodoc.LogType, urls []*charm.URL) error { // Encode the JSON data. b, err := json.Marshal(data) if err != nil { return errgo.Notef(err, "cannot marshal log data") } // Add the base URLs to the list of references associated with the log. // Also remove duplicate URLs while maintaining the references' order. var allUrls []*charm.URL urlMap := make(map[string]bool) for _, url := range urls { urlStr := url.String() if ok, _ := urlMap[urlStr]; !ok { urlMap[urlStr] = true allUrls = append(allUrls, url) } base := mongodoc.BaseURL(url) urlStr = base.String() if ok, _ := urlMap[urlStr]; !ok { urlMap[urlStr] = true allUrls = append(allUrls, base) } } // Add the log to the database. log := &mongodoc.Log{ Data: b, Level: logLevel, Type: logType, URLs: allUrls, Time: time.Now(), } if err := s.DB.Logs().Insert(log); err != nil { return errgo.Mask(err) } return nil } func (s *Store) DeleteEntity(id *router.ResolvedURL) error { entity, err := s.FindEntity(id, FieldSelector("blobname", "blobhash", "prev5blobhash")) if err != nil { return errgo.Mask(err, errgo.Is(params.ErrNotFound)) } // Remove the entity. if err := s.DB.Entities().RemoveId(&id.URL); err != nil { if err == mgo.ErrNotFound { // Someone else got there first. err = params.ErrNotFound } return errgo.Mask(err, errgo.Is(params.ErrNotFound)) } // Remove the reference to the archive from the blob store. if err := s.BlobStore.Remove(entity.BlobName); err != nil { return errgo.Notef(err, "cannot remove blob %s", entity.BlobName) } if entity.BlobHash != entity.PreV5BlobHash { name := preV5CompatibilityBlobName(entity.BlobName) if err := s.BlobStore.Remove(name); err != nil { return errgo.Notef(err, "cannot remove compatibility blob %s", name) } } return nil } // StoreDatabase wraps an mgo.DB ands adds a few convenience methods. type StoreDatabase struct { *mgo.Database } // clone copies the StoreDatabase, cloning the underlying mgo session. func (s StoreDatabase) clone() StoreDatabase { return StoreDatabase{ &mgo.Database{ Name: s.Name, Session: s.Session.Clone(), }, } } // copy copies the StoreDatabase, copying the underlying mgo session. func (s StoreDatabase) copy() StoreDatabase { return StoreDatabase{ &mgo.Database{ Name: s.Name, Session: s.Session.Copy(), }, } } // Close closes the store database's underlying session. func (s StoreDatabase) Close() { s.Session.Close() } // Entities returns the mongo collection where entities are stored. func (s StoreDatabase) Entities() *mgo.Collection { return s.C("entities") } // BaseEntities returns the mongo collection where base entities are stored. func (s StoreDatabase) BaseEntities() *mgo.Collection { return s.C("base_entities") } // Logs returns the Mongo collection where charm store logs are stored. func (s StoreDatabase) Logs() *mgo.Collection { return s.C("logs") } // Migrations returns the Mongo collection where the migration info is stored. func (s StoreDatabase) Migrations() *mgo.Collection { return s.C("migrations") } func (s StoreDatabase) Macaroons() *mgo.Collection { return s.C("macaroons") } // allCollections holds for each collection used by the charm store a // function returns that collection. // The macaroons collection is omitted because it does // not exist until a macaroon is actually created. var allCollections = []func(StoreDatabase) *mgo.Collection{ StoreDatabase.StatCounters, StoreDatabase.StatTokens, StoreDatabase.Entities, StoreDatabase.BaseEntities, StoreDatabase.Logs, StoreDatabase.Migrations, } // Collections returns a slice of all the collections used // by the charm store. func (s StoreDatabase) Collections() []*mgo.Collection { cs := make([]*mgo.Collection, len(allCollections)) for i, f := range allCollections { cs[i] = f(s) } return cs } // readerAtSeeker adapts an io.ReadSeeker to an io.ReaderAt. type readerAtSeeker struct { r io.ReadSeeker off int64 } // ReadAt implemnts SizeReaderAt.ReadAt. func (r *readerAtSeeker) ReadAt(buf []byte, off int64) (n int, err error) { if off != r.off { _, err = r.r.Seek(off, 0) if err != nil { return 0, err } r.off = off } n, err = io.ReadFull(r.r, buf) r.off += int64(n) return n, err } // ReaderAtSeeker adapts r so that it can be used as // a ReaderAt. Note that, contrary to the io.ReaderAt // contract, it is not OK to use concurrently. func ReaderAtSeeker(r io.ReadSeeker) io.ReaderAt { return &readerAtSeeker{r, 0} } // Search searches the store for the given SearchParams. // It returns a SearchResult containing the results of the search. func (store *Store) Search(sp SearchParams) (SearchResult, error) { result, err := store.ES.search(sp) if err != nil { return SearchResult{}, errgo.Mask(err) } return result, nil } var listFilters = map[string]string{ "name": "name", "owner": "user", "series": "serties", "type": "type", "promulgated": "promulgated-revision", } func prepareList(sp SearchParams) (filters map[string]interface{}, sort bson.D, err error) { if len(sp.Text) > 0 { return nil, nil, errgo.New("text not allowed") } if sp.Limit > 0 { return nil, nil, errgo.New("limit not allowed") } if sp.Skip > 0 { return nil, nil, errgo.New("skip not allowed") } if sp.AutoComplete { return nil, nil, errgo.New("autocomplete not allowed") } filters = make(map[string]interface{}) for k, v := range sp.Filters { switch k { case "name": filters[k] = v[0] case "owner": filters["user"] = v[0] case "series": filters["series"] = v[0] case "type": if v[0] == "bundle" { filters["series"] = "bundle" } else { filters["series"] = map[string]interface{}{"$ne": "bundle"} } case "promulgated": if v[0] != "0" { filters["promulgated-revision"] = map[string]interface{}{"$gte": 0} } else { filters["promulgated-revision"] = map[string]interface{}{"$lt": 0} } default: return nil, nil, errgo.Newf("filter %q not allowed", k) } } sort, err = createMongoSort(sp) if err != nil { return nil, nil, errgo.Newf("invalid parameters: %s", err) } return filters, sort, nil } // sortFields contains a mapping from api fieldnames to the entity fields to search. var sortMongoFields = map[string]string{ "name": "name", "owner": "user", "series": "series", } // createMongoSort creates a sort query parameters for mongo out of a Sort parameter. func createMongoSort(sp SearchParams) (bson.D, error) { if len(sp.sort) == 0 { return bson.D{{ "_id", 1, }}, nil } sort := make(bson.D, len(sp.sort)) for i, s := range sp.sort { field := sortMongoFields[s.Field] if field == "" { return nil, errgo.Newf("sort %q not allowed", s.Field) } order := 1 if s.Order == sortDescending { order = -1 } sort[i] = bson.DocElem{field, order} } return sort, nil } // ListQuery holds a list query from which an iterator // can be created. type ListQuery struct { store *Store filters map[string]interface{} sort bson.D } // ListQuery lists entities in the store that conform to the // given search paramerters. It returns a ListQuery // that can be used to iterate through the list. func (store *Store) ListQuery(sp SearchParams) (*ListQuery, error) { filters, sort, err := prepareList(sp) if err != nil { return nil, errgo.Mask(err) } return &ListQuery{ store: store, filters: filters, sort: sort, }, nil } func (lq *ListQuery) Iter(fields map[string]int) *mgo.Iter { qfields := FieldSelector( "promulgated-url", "development", "name", "user", "series", ) for f := range fields { qfields[f] = 1 } // _id and url have special treatment. delete(qfields, "_id") delete(qfields, "url") group := make(bson.D, 0, 2+len(qfields)) group = append(group, bson.DocElem{"_id", bson.D{{ "$concat", []interface{}{ "$baseurl", "$series", bson.D{{ "$cond", []string{"$development", "true", "false"}, }}, }, }}}) group = append(group, bson.DocElem{"url", bson.D{{"$last", "$_id"}}}) for field := range qfields { group = append(group, bson.DocElem{field, bson.D{{"$last", "$" + field}}}) } project := make(bson.D, 0, len(qfields)+1) project = append(project, bson.DocElem{"_id", "$url"}) for f := range qfields { project = append(project, bson.DocElem{f, "$" + f}) } q := []bson.D{ {{"$match", lq.filters}}, {{"$sort", bson.D{{"revision", 1}}}}, {{"$group", group}}, {{"$project", project}}, {{"$sort", lq.sort}}, } return lq.store.DB.Entities().Pipe(q).Iter() } // SynchroniseElasticsearch creates new indexes in elasticsearch // and populates them with the current data from the mongodb database. func (s *Store) SynchroniseElasticsearch() error { if err := s.ES.ensureIndexes(true); err != nil { return errgo.Notef(err, "cannot create indexes") } if err := s.syncSearch(); err != nil { return errgo.Notef(err, "cannot synchronise indexes") } return nil } // EntityResolvedURL returns the ResolvedURL for the entity. It requires // that the PromulgatedURL field has been filled out in the entity. func EntityResolvedURL(e *mongodoc.Entity) *router.ResolvedURL { rurl := &router.ResolvedURL{ URL: *e.URL, PromulgatedRevision: -1, } if e.PromulgatedURL != nil { rurl.PromulgatedRevision = e.PromulgatedURL.Revision } return rurl } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/search.go0000664000175000017500000006031612672604603027161 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" import ( "crypto/sha1" "encoding/base64" "encoding/json" "fmt" "strings" "time" "github.com/juju/utils" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/mgo.v2/bson" "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" "gopkg.in/juju/charmstore.v5-unstable/internal/router" "gopkg.in/juju/charmstore.v5-unstable/internal/series" ) type SearchIndex struct { *elasticsearch.Database Index string } const typeName = "entity" // seriesBoost defines how much the results for each // series will be boosted. Series are currently ranked in // reverse order of LTS releases, followed by the latest // non-LTS release, followed by everything else. var seriesBoost = func() map[string]float64 { m := make(map[string]float64) for k, v := range series.Series { if !v.SearchIndex { continue } m[k] = v.SearchBoost } return m }() // SearchDoc is a mongodoc.Entity with additional fields useful for searching. // This is the document that is stored in the search index. type SearchDoc struct { *mongodoc.Entity TotalDownloads int64 ReadACLs []string Series []string // SingleSeries is true if the document referes to an entity that // describes a single series. This will either be a bundle, a // single-series charm or an expanded record for a multi-series // charm. SingleSeries bool // AllSeries is true if the document referes to an entity that // describes all series supported by the entity. This will either // be a bundle, a single-series charm or the canonical record for // a multi-series charm. AllSeries bool } // UpdateSearchAsync will update the search record for the entity // reference r in the backgroud. func (s *Store) UpdateSearchAsync(r *router.ResolvedURL) { s.Go(func(s *Store) { if err := s.UpdateSearch(r); err != nil { logger.Errorf("cannot update search record for %v: %s", r, err) } }) } // UpdateSearch updates the search record for the entity reference r. The // search index only includes the latest stable revision of each entity // so the latest stable revision of the charm specified by r will be // indexed. func (s *Store) UpdateSearch(r *router.ResolvedURL) error { if s.ES == nil || s.ES.Database == nil { return nil } // For multi-series charms update the whole base URL. if r.URL.Series == "" { return s.UpdateSearchBaseURL(&r.URL) } if !series.Series[r.URL.Series].SearchIndex { return nil } baseEntity, err := s.FindBaseEntity(&r.URL, nil) if err != nil { return errgo.NoteMask(err, fmt.Sprintf("cannot update search record for %q", &r.URL), errgo.Is(params.ErrNotFound)) } series := r.URL.Series entityURL := baseEntity.ChannelEntities[params.StableChannel][series] if entityURL == nil { // There is no stable version of the entity to index. return nil } entity, err := s.FindEntity(&router.ResolvedURL{URL: *entityURL}, nil) if err != nil { return errgo.Notef(err, "cannot update search record for %q", entityURL) } if err := s.updateSearchEntity(entity, baseEntity); err != nil { return errgo.Notef(err, "cannot update search record for %q", entityURL) } return nil } // UpdateSearchBaseURL updates the search record for all entities with // the specified base URL. It must be called whenever the entry for the // given URL in the BaseEntitites collection has changed. func (s *Store) UpdateSearchBaseURL(baseURL *charm.URL) error { if s.ES == nil || s.ES.Database == nil { return nil } baseEntity, err := s.FindBaseEntity(baseURL, nil) if err != nil { return errgo.NoteMask(err, fmt.Sprintf("cannot index %s", baseURL), errgo.Is(params.ErrNotFound)) } stableEntities := baseEntity.ChannelEntities[params.StableChannel] updated := make(map[string]bool, len(stableEntities)) for urlSeries, url := range stableEntities { if !series.Series[urlSeries].SearchIndex { continue } if updated[url.String()] { continue } updated[url.String()] = true entity, err := s.FindEntity(&router.ResolvedURL{URL: *url}, nil) if err != nil { return errgo.Notef(err, "cannot update search record for %q", url) } if err := s.updateSearchEntity(entity, baseEntity); err != nil { return errgo.Notef(err, "cannot update search record for %q", url) } } return nil } func (s *Store) updateSearchEntity(entity *mongodoc.Entity, baseEntity *mongodoc.BaseEntity) error { doc, err := s.searchDocFromEntity(entity, baseEntity) if err != nil { return errgo.Mask(err) } if err := s.ES.update(doc); err != nil { return errgo.Notef(err, "cannot update search index") } return nil } // UpdateSearchFields updates the search record for the entity reference r // with the updated values in fields. func (s *Store) UpdateSearchFields(r *router.ResolvedURL, fields map[string]interface{}) error { if s.ES == nil || s.ES.Database == nil { return nil } var needUpdate bool for k := range fields { // Add any additional fields here that should update the search index. if k == "extrainfo.legacy-download-stats" { needUpdate = true } } if !needUpdate { return nil } if err := s.UpdateSearch(r); err != nil { return errgo.Mask(err) } return nil } // searchDocFromEntity performs the processing required to convert a // mongodoc.Entity and the corresponding mongodoc.BaseEntity to an esDoc // for indexing. func (s *Store) searchDocFromEntity(e *mongodoc.Entity, be *mongodoc.BaseEntity) (*SearchDoc, error) { doc := SearchDoc{Entity: e} doc.ReadACLs = be.ChannelACLs[params.StableChannel].Read // There should only be one record for the promulgated entity, which // should be the latest promulgated revision. In the case that the base // entity is not promulgated assume that there is a later promulgated // entity. if !be.Promulgated { doc.Entity.PromulgatedURL = nil doc.Entity.PromulgatedRevision = -1 } _, allRevisions, err := s.ArchiveDownloadCounts(EntityResolvedURL(e).PreferredURL(), false) if err != nil { return nil, errgo.Mask(err) } doc.TotalDownloads = allRevisions.Total if doc.Entity.Series == "bundle" { doc.Series = []string{"bundle"} } else { doc.Series = doc.Entity.SupportedSeries } doc.AllSeries = true doc.SingleSeries = doc.Entity.Series != "" return &doc, nil } // update inserts an entity into elasticsearch if elasticsearch // is configured. The entity with id r is extracted from mongodb // and written into elasticsearch. func (si *SearchIndex) update(doc *SearchDoc) error { if si == nil || si.Database == nil { return nil } err := si.PutDocumentVersionWithType( si.Index, typeName, si.getID(doc.URL), int64(doc.URL.Revision), elasticsearch.ExternalGTE, doc) if err != nil && err != elasticsearch.ErrConflict { return errgo.Mask(err) } if doc.Entity.URL.Series != "" { return nil } // This document represents a multi-series charm. Expand the // document for each of the supported series. for _, series := range doc.Entity.SupportedSeries { u := *doc.Entity.URL u.Series = series doc.Entity.URL = &u if doc.PromulgatedURL != nil { u := *doc.Entity.PromulgatedURL u.Series = series doc.Entity.PromulgatedURL = &u } doc.Series = []string{series} doc.AllSeries = false doc.SingleSeries = true if err := si.update(doc); err != nil { return errgo.Mask(err) } } return nil } // getID returns an ID for the elasticsearch document based on the contents of the // mongoDB document. This is to allow elasticsearch documents to be replaced with // updated versions when charm data is changed. func (si *SearchIndex) getID(r *charm.URL) string { ref := *r ref.Revision = -1 b := sha1.Sum([]byte(ref.String())) s := base64.URLEncoding.EncodeToString(b[:]) // Cut off any trailing = as there is no need for them and they will get URL escaped. return strings.TrimRight(s, "=") } // Search searches for matching entities in the configured elasticsearch index. // If there is no elasticsearch index configured then it will return an empty // SearchResult, as if no results were found. func (si *SearchIndex) search(sp SearchParams) (SearchResult, error) { if si == nil || si.Database == nil { return SearchResult{}, nil } q := createSearchDSL(sp) q.Fields = append(q.Fields, "URL", "PromulgatedURL", "Series") esr, err := si.Search(si.Index, typeName, q) if err != nil { return SearchResult{}, errgo.Mask(err) } r := SearchResult{ SearchTime: time.Duration(esr.Took) * time.Millisecond, Total: esr.Hits.Total, Results: make([]*mongodoc.Entity, 0, len(esr.Hits.Hits)), } for _, h := range esr.Hits.Hits { urlStr := h.Fields.GetString("URL") url, err := charm.ParseURL(urlStr) if err != nil { return SearchResult{}, errgo.Notef(err, "invalid URL in result %q", urlStr) } e := &mongodoc.Entity{ URL: url, } if url.Series == "" { series := make([]string, len(h.Fields["Series"])) for i, s := range h.Fields["Series"] { series[i] = s.(string) } e.SupportedSeries = series } else if url.Series != "bundle" { e.SupportedSeries = []string{url.Series} } if purlStr := h.Fields.GetString("PromulgatedURL"); purlStr != "" { purl, err := charm.ParseURL(purlStr) if err != nil { return SearchResult{}, errgo.Notef(err, "invalid promulgated URL in result %q", purlStr) } e.PromulgatedURL = purl e.PromulgatedRevision = purl.Revision } else { e.PromulgatedURL = nil e.PromulgatedRevision = -1 } r.Results = append(r.Results, e) } return r, nil } // GetSearchDocument retrieves the current search record for the charm // reference id. func (si *SearchIndex) GetSearchDocument(id *charm.URL) (*SearchDoc, error) { if si == nil || si.Database == nil { return &SearchDoc{}, nil } var s SearchDoc err := si.GetDocument(si.Index, "entity", si.getID(id), &s) if err != nil { return nil, errgo.Notef(err, "cannot retrieve search document for %v", id) } return &s, nil } // version is a document that stores the structure information // in the elasticsearch database. type version struct { Version int64 Index string } const versionIndex = ".versions" const versionType = "version" // ensureIndexes makes sure that the required indexes exist and have the right // settings. If force is true then ensureIndexes will create new indexes irrespective // of the status of the current index. func (si *SearchIndex) ensureIndexes(force bool) error { if si == nil || si.Database == nil { return nil } old, dv, err := si.getCurrentVersion() if err != nil { return errgo.Notef(err, "cannot get current version") } if !force && old.Version >= esSettingsVersion { return nil } index, err := si.newIndex() if err != nil { return errgo.Notef(err, "cannot create index") } new := version{ Version: esSettingsVersion, Index: index, } updated, err := si.updateVersion(new, dv) if err != nil { return errgo.Notef(err, "cannot update version") } if !updated { // Update failed so delete the new index if err := si.DeleteIndex(index); err != nil { return errgo.Notef(err, "cannot delete index") } return nil } // Update succeeded - update the aliases if err := si.Alias(index, si.Index); err != nil { return errgo.Notef(err, "cannot create alias") } // Delete the old unused index if old.Index != "" { if err := si.DeleteIndex(old.Index); err != nil { return errgo.Notef(err, "cannot delete index") } } return nil } // getCurrentVersion gets the version of elasticsearch settings, if any // that are deployed to elasticsearch. func (si *SearchIndex) getCurrentVersion() (version, int64, error) { var v version d, err := si.GetESDocument(versionIndex, versionType, si.Index) if err != nil && err != elasticsearch.ErrNotFound { return version{}, 0, errgo.Notef(err, "cannot get settings version") } if d.Found { if err := json.Unmarshal(d.Source, &v); err != nil { return version{}, 0, errgo.Notef(err, "invalid version") } } return v, d.Version, nil } // newIndex creates a new index with current elasticsearch settings. // The new Index will have a randomized name based on si.Index. func (si *SearchIndex) newIndex() (string, error) { uuid, err := utils.NewUUID() if err != nil { return "", errgo.Notef(err, "cannot create index name") } index := si.Index + "-" + uuid.String() if err := si.PutIndex(index, esIndex); err != nil { return "", errgo.Notef(err, "cannot set index settings") } if err := si.PutMapping(index, "entity", esMapping); err != nil { return "", errgo.Notef(err, "cannot set index mapping") } return index, nil } // updateVersion attempts to atomically update the document specifying the version of // the elasticsearch settings. If it succeeds then err will be nil, if the update could not be // made atomically then err will be elasticsearch.ErrConflict, otherwise err is a non-nil // error. func (si *SearchIndex) updateVersion(v version, dv int64) (bool, error) { var err error if dv == 0 { err = si.CreateDocument(versionIndex, versionType, si.Index, v) } else { err = si.PutDocumentVersion(versionIndex, versionType, si.Index, dv, v) } if err != nil { if errgo.Cause(err) == elasticsearch.ErrConflict { return false, nil } return false, err } return true, nil } // syncSearch populates the SearchIndex with all the data currently stored in // mongodb. If the SearchIndex is not configured then this method returns a nil error. func (s *Store) syncSearch() error { if s.ES == nil || s.ES.Database == nil { return nil } var result mongodoc.Entity // Only get the IDs here, UpdateSearch will get the full document // if it is in a series that is indexed. iter := s.DB.Entities().Find(nil).Select(bson.M{"_id": 1, "promulgated-url": 1}).Iter() defer iter.Close() // Make sure we always close on error. for iter.Next(&result) { rurl := EntityResolvedURL(&result) if err := s.UpdateSearch(rurl); err != nil { return errgo.Notef(err, "cannot index %s", rurl) } } logger.Infof("finished sync search") if err := iter.Close(); err != nil { return err } return nil } // SearchParams represents the search parameters used to search the store. type SearchParams struct { // The text to use in the full text search query. Text string // If autocomplete is specified, the search will return only charms and // bundles with a name that has text as a prefix. AutoComplete bool // Limit the search to items with attributes that match the specified filter value. Filters map[string][]string // Limit the number of returned items to the specified count. Limit int // Include the following metadata items in the search results. Include []string // Start the the returned items at a specific offset. Skip int // ACL values to search in addition to everyone. ACL values may represent user names // or group names. Groups []string // Admin searches will not filter on the ACL and will show results for all matching // charms. Admin bool // Sort the returned items. sort []sortParam // ExpandedMultiSeries returns a number of entries for // multi-series charms, one for each entity. ExpandedMultiSeries bool } var allowedSortFields = map[string]bool{ "name": true, "owner": true, "series": true, "downloads": true, } func (sp *SearchParams) ParseSortFields(f ...string) error { for _, s := range f { for _, s := range strings.Split(s, ",") { var sort sortParam if strings.HasPrefix(s, "-") { sort.Order = sortDescending s = s[1:] } if !allowedSortFields[s] { return errgo.Newf("unrecognized sort parameter %q", s) } sort.Field = s sp.sort = append(sp.sort, sort) } } return nil } // sortOrder defines the order in which a field should be sorted. type sortOrder int const ( sortAscending sortOrder = iota sortDescending ) // sortParam represents a field and direction on which results should be sorted. type sortParam struct { Field string Order sortOrder } // SearchResult represents the result of performing a search. The entites // in Results will have the following fields completed: // - URL // - SupportedSeries // - PromulgatedURL // - PromulgatedRevision type SearchResult struct { SearchTime time.Duration Total int Results []*mongodoc.Entity } // ListResult represents the result of performing a list. type ListResult struct { Results []*mongodoc.Entity } // queryFields provides a map of fields to weighting to use with the // elasticsearch query. func queryFields(sp SearchParams) map[string]float64 { fields := map[string]float64{ "URL.ngrams": 8, "CharmMeta.Categories": 5, "CharmMeta.Tags": 5, "BundleData.Tags": 5, "Series.ngrams": 5, "CharmProvidedInterfaces": 3, "CharmRequiredInterfaces": 3, "CharmMeta.Description": 1, "BundleReadMe": 1, } if sp.AutoComplete { fields["CharmMeta.Name.ngrams"] = 10 } else { fields["CharmMeta.Name"] = 10 } return fields } // encodeFields takes a map of field name to weight and builds a slice of strings // representing those weighted fields for a MultiMatchQuery. func encodeFields(fields map[string]float64) []string { fs := make([]string, 0, len(fields)) for k, v := range fields { fs = append(fs, elasticsearch.BoostField(k, v)) } return fs } // createSearchDSL builds an elasticsearch query from the query parameters. // http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html func createSearchDSL(sp SearchParams) elasticsearch.QueryDSL { qdsl := elasticsearch.QueryDSL{ From: sp.Skip, Size: sp.Limit, } // Full text search var q elasticsearch.Query if sp.Text == "" { q = elasticsearch.MatchAllQuery{} } else { q = elasticsearch.MultiMatchQuery{ Query: sp.Text, Fields: encodeFields(queryFields(sp)), } } // Boosting f := []elasticsearch.Function{ // TODO(mhilton) review this function in future if downloads get sufficiently // large that the order becomes undesirable. elasticsearch.FieldValueFactorFunction{ Field: "TotalDownloads", Factor: 0.000001, Modifier: "ln2p", }, elasticsearch.BoostFactorFunction{ Filter: promulgatedFilter("1"), BoostFactor: 1.25, }, } for k, v := range seriesBoost { f = append(f, elasticsearch.BoostFactorFunction{ Filter: seriesFilter(k), BoostFactor: v, }) } q = elasticsearch.FunctionScoreQuery{ Query: q, Functions: f, } // Filters qdsl.Query = elasticsearch.FilteredQuery{ Query: q, Filter: createFilters(sp), } // Sorting for _, s := range sp.sort { qdsl.Sort = append(qdsl.Sort, createElasticSort(s)) } return qdsl } // createFilters converts the filters requested with the search API into // filters in the elasticsearch query DSL. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-search // for details of how filters are specified in the API. For each key in f a // filter is created that matches any one of the set of values specified for // that key. The created filter will only match when at least one of the // requested values matches for all of the requested keys. Any filter names // that are not defined in the filters map will be silently skipped func createFilters(sp SearchParams) elasticsearch.Filter { af := make(elasticsearch.AndFilter, 1, len(sp.Filters)+2) if sp.ExpandedMultiSeries { af[0] = elasticsearch.TermFilter{ Field: "SingleSeries", Value: "true", } } else { af[0] = elasticsearch.TermFilter{ Field: "AllSeries", Value: "true", } } for k, vals := range sp.Filters { filter, ok := filters[k] if !ok { continue } of := make(elasticsearch.OrFilter, 0, len(vals)) for _, v := range vals { of = append(of, filter(v)) } af = append(af, of) } if sp.Admin { return af } gf := make(elasticsearch.OrFilter, 0, len(sp.Groups)+1) gf = append(gf, elasticsearch.TermFilter{ Field: "ReadACLs", Value: params.Everyone, }) for _, g := range sp.Groups { gf = append(gf, elasticsearch.TermFilter{ Field: "ReadACLs", Value: g, }) } af = append(af, gf) return af } // filters contains a mapping from a filter parameter in the API to a // function that will generate an elasticsearch query DSL filter for the // given value. var filters = map[string]func(string) elasticsearch.Filter{ "description": descriptionFilter, "name": nameFilter, "owner": ownerFilter, "promulgated": promulgatedFilter, "provides": termFilter("CharmProvidedInterfaces"), "requires": termFilter("CharmRequiredInterfaces"), "series": seriesFilter, "summary": summaryFilter, "tags": tagsFilter, "type": typeFilter, } // descriptionFilter generates a filter that will match against the // description field of the charm data. func descriptionFilter(value string) elasticsearch.Filter { return elasticsearch.QueryFilter{ Query: elasticsearch.MatchQuery{ Field: "CharmMeta.Description", Query: value, Type: "phrase", }, } } // nameFilter generates a filter that will match against the // name of the charm or bundle. func nameFilter(value string) elasticsearch.Filter { return elasticsearch.QueryFilter{ Query: elasticsearch.MatchQuery{ Field: "Name", Query: value, Type: "phrase", }, } } // ownerFilter generates a filter that will match against the // owner taken from the URL. func ownerFilter(value string) elasticsearch.Filter { if value == "" { return promulgatedFilter("1") } return elasticsearch.QueryFilter{ Query: elasticsearch.MatchQuery{ Field: "User", Query: value, Type: "phrase", }, } } // promulgatedFilter generates a filter that will match against the // existence of a promulgated URL. func promulgatedFilter(value string) elasticsearch.Filter { f := elasticsearch.ExistsFilter("PromulgatedURL") if value == "1" { return f } return elasticsearch.NotFilter{f} } // seriesFilter generates a filter that will match against the // series taken from the URL. func seriesFilter(value string) elasticsearch.Filter { return elasticsearch.QueryFilter{ Query: elasticsearch.MatchQuery{ Field: "Series", Query: value, Type: "phrase", }, } } // summaryFilter generates a filter that will match against the // summary field from the charm data. func summaryFilter(value string) elasticsearch.Filter { return elasticsearch.QueryFilter{ Query: elasticsearch.MatchQuery{ Field: "CharmMeta.Summary", Query: value, Type: "phrase", }, } } // tagsFilter generates a filter that will match against the "tags" field // in the data. For charms this is the Categories field and for bundles this // is the Tags field. func tagsFilter(value string) elasticsearch.Filter { tags := strings.Split(value, " ") af := make(elasticsearch.AndFilter, 0, len(tags)) for _, t := range tags { if t == "" { continue } af = append(af, elasticsearch.OrFilter{ elasticsearch.TermFilter{ Field: "CharmMeta.Categories", Value: t, }, elasticsearch.TermFilter{ Field: "CharmMeta.Tags", Value: t, }, elasticsearch.TermFilter{ Field: "BundleData.Tags", Value: t, }, }) } return af } // termFilter creates a function that generates a filter on the specified // document field. func termFilter(field string) func(string) elasticsearch.Filter { return func(value string) elasticsearch.Filter { terms := strings.Split(value, " ") af := make(elasticsearch.AndFilter, 0, len(terms)) for _, t := range terms { if t == "" { continue } af = append(af, elasticsearch.TermFilter{ Field: field, Value: t, }) } return af } } // bundleFilter is a filter that matches against bundles, based on // the URL. var bundleFilter = seriesFilter("bundle") // typeFilter generates a filter that is used to match either only charms, // or only bundles. func typeFilter(value string) elasticsearch.Filter { if value == "bundle" { return bundleFilter } return elasticsearch.NotFilter{bundleFilter} } // sortFields contains a mapping from api fieldnames to the entity fields to search. var sortESFields = map[string]string{ "name": "Name", "owner": "User", "series": "Series", "downloads": "TotalDownloads", } // createSort creates an elasticsearch.Sort query parameter out of a Sort parameter. func createElasticSort(s sortParam) elasticsearch.Sort { sort := elasticsearch.Sort{ Field: sortESFields[s.Field], Order: elasticsearch.Ascending, } if s.Order == sortDescending { sort.Order = elasticsearch.Descending } return sort } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/addentity.go0000664000175000017500000006553612672604603027712 0ustar marcomarco// Copyright 2016 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" import ( "archive/zip" "bytes" "crypto/sha256" "crypto/sha512" "encoding/json" "fmt" "io" "io/ioutil" "sort" "time" jujuzip "github.com/juju/zip" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/yaml.v2" "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" "gopkg.in/juju/charmstore.v5-unstable/internal/router" "gopkg.in/juju/charmstore.v5-unstable/internal/series" ) // addParams holds parameters held in common between the // Store.addCharm and Store.addBundle methods. type addParams struct { // url holds the id to be associated with the stored entity. // If URL.PromulgatedRevision is not -1, the entity will // be promulgated. url *router.ResolvedURL // blobName holds the name of the entity's archive blob. blobName string // blobHash holds the hash of the entity's archive blob. blobHash string // preV5BlobHash holds the hash of the entity's archive blob for // pre-v5 compatibility purposes. preV5BlobHash string // preV5BlobHash256 holds the SHA256 hash of the entity's archive blob for // pre-v5 compatibility purposes. preV5BlobHash256 string // preV5BlobSize holds the size of the entity's archive blob for // pre-v5 compatibility purposes. preV5BlobSize int64 // blobHash256 holds the sha256 hash of the entity's archive blob. blobHash256 string // bobSize holds the size of the entity's archive blob. blobSize int64 // chans holds the channels to associate with the entity. chans []params.Channel } // AddCharmWithArchive adds the given charm, which must // be either a *charm.CharmDir or implement ArchiverTo, // to the charmstore under the given URL. // // This method is provided for testing purposes only. func (s *Store) AddCharmWithArchive(url *router.ResolvedURL, ch charm.Charm) error { return s.AddEntityWithArchive(url, ch) } // AddBundleWithArchive adds the given bundle, which must // be either a *charm.BundleDir or implement ArchiverTo, // to the charmstore under the given URL. // // This method is provided for testing purposes only. func (s *Store) AddBundleWithArchive(url *router.ResolvedURL, b charm.Bundle) error { return s.AddEntityWithArchive(url, b) } // AddEntityWithArchive provides the implementation for // both AddCharmWithArchive and AddBundleWithArchive. // It accepts charm.Charm or charm.Bundle implementations // defined in the charm package, and any that implement // ArchiverTo. func (s *Store) AddEntityWithArchive(url *router.ResolvedURL, archive interface{}) error { blob, err := getArchive(archive) if err != nil { return errgo.Notef(err, "cannot get archive") } defer blob.Close() hash := blobstore.NewHash() size, err := io.Copy(hash, blob) if err != nil { return errgo.Notef(err, "cannot copy archive") } if _, err := blob.Seek(0, 0); err != nil { return errgo.Notef(err, "cannot seek to start of archive") } if err := s.UploadEntity(url, blob, fmt.Sprintf("%x", hash.Sum(nil)), size, nil); err != nil { return errgo.Mask(err, errgo.Any) } return nil } // UploadEntity reads the given blob, which should have the given hash // and size, and uploads it to the charm store, associating it with // the given channels (without actually making it current in any of them). // // The following error causes may be returned: // params.ErrDuplicateUpload if the URL duplicates an existing entity. // params.ErrEntityIdNotAllowed if the id may not be created. // params.ErrInvalidEntity if the provided blob is invalid. func (s *Store) UploadEntity(url *router.ResolvedURL, blob io.Reader, blobHash string, size int64, chans []params.Channel) error { // Strictly speaking these tests are redundant, because a ResolvedURL should // always be canonical, but check just in case anyway, as this is // final gateway before a potentially invalid url might be stored // in the database. if url.URL.User == "" { return errgo.WithCausef(nil, params.ErrEntityIdNotAllowed, "entity id does not specify user") } if url.URL.Revision == -1 { return errgo.WithCausef(nil, params.ErrEntityIdNotAllowed, "entity id does not specify revision") } blobName, blobHash256, err := s.putArchive(blob, size, blobHash) if err != nil { return errgo.Mask(err) } r, _, err := s.BlobStore.Open(blobName) if err != nil { return errgo.Notef(err, "cannot open newly created blob") } defer r.Close() if err := s.addEntityFromReader(url, r, blobName, blobHash, blobHash256, size, chans); err != nil { if err1 := s.BlobStore.Remove(blobName); err1 != nil { logger.Errorf("cannot remove blob %s after error: %v", blobName, err1) } return errgo.Mask(err, errgo.Is(params.ErrDuplicateUpload), errgo.Is(params.ErrEntityIdNotAllowed), errgo.Is(params.ErrInvalidEntity), ) } return nil } // putArchive reads the charm or bundle archive from the given reader and // puts into the blob store. The archiveSize and hash must holds the length // of the blob content and its SHA384 hash respectively. func (s *Store) putArchive(blob io.Reader, blobSize int64, hash string) (blobName, blobHash256 string, err error) { name := bson.NewObjectId().Hex() // Calculate the SHA256 hash while uploading the blob in the blob store. hash256 := sha256.New() blob = io.TeeReader(blob, hash256) // Upload the actual blob, and make sure that it is removed // if we fail later. err = s.BlobStore.PutUnchallenged(blob, name, blobSize, hash) if err != nil { // TODO return error with ErrInvalidEntity cause when // there's a hash or size mismatch. return "", "", errgo.Notef(err, "cannot put archive blob") } return name, fmt.Sprintf("%x", hash256.Sum(nil)), nil } // addEntityFromReader adds the entity represented by the contents // of the given reader, associating it with the given id. func (s *Store) addEntityFromReader(id *router.ResolvedURL, r io.ReadSeeker, blobName, hash, hash256 string, blobSize int64, chans []params.Channel) error { p := addParams{ url: id, blobName: blobName, blobHash: hash, blobHash256: hash256, blobSize: blobSize, preV5BlobHash: hash, preV5BlobHash256: hash256, preV5BlobSize: blobSize, chans: chans, } if id.URL.Series == "bundle" { b, err := s.newBundle(id, r, blobSize) if err != nil { return errgo.Mask(err, errgo.Is(params.ErrInvalidEntity), errgo.Is(params.ErrDuplicateUpload), errgo.Is(params.ErrEntityIdNotAllowed)) } if err := s.addBundle(b, p); err != nil { return errgo.Mask(err, errgo.Is(params.ErrDuplicateUpload), errgo.Is(params.ErrEntityIdNotAllowed)) } return nil } ch, err := s.newCharm(id, r, blobSize) if err != nil { return errgo.Mask(err, errgo.Is(params.ErrInvalidEntity), errgo.Is(params.ErrDuplicateUpload), errgo.Is(params.ErrEntityIdNotAllowed)) } if len(ch.Meta().Series) > 0 { if _, err := r.Seek(0, 0); err != nil { return errgo.Notef(err, "cannot seek to start of archive") } logger.Infof("adding pre-v5 compat blob for %#v", id) info, err := addPreV5CompatibilityHackBlob(s.BlobStore, r, p.blobName, p.blobSize) if err != nil { return errgo.Notef(err, "cannot add pre-v5 compatibility blob") } p.preV5BlobHash = info.hash p.preV5BlobHash256 = info.hash256 p.preV5BlobSize = info.size } err = s.addCharm(ch, p) if err != nil && len(ch.Meta().Series) > 0 { // We added a compatibility blob so we need to remove it. compatBlobName := preV5CompatibilityBlobName(p.blobName) if err1 := s.BlobStore.Remove(compatBlobName); err1 != nil { logger.Errorf("cannot remove blob %s after error: %v", compatBlobName, err1) } } if err != nil { return errgo.Mask(err, errgo.Is(params.ErrDuplicateUpload), errgo.Is(params.ErrEntityIdNotAllowed)) } return nil } type preV5CompatibilityHackBlobInfo struct { hash string hash256 string size int64 } // addPreV5CompatibilityHackBlob adds a second blob to the blob store that // contains a suffix to the zipped charm archive file that updates the zip // index to point to an updated version of metadata.yaml that does // not have a series field. The original blob is held in r. // It updates the fields in p accordingly. // // We do this because earlier versions of the charm package have a version // of the series field that holds a single string rather than a slice of string // so will fail when reading the new slice-of-string form, and we // don't want to change the field name from "series". func addPreV5CompatibilityHackBlob(blobStore *blobstore.Store, r io.ReadSeeker, blobName string, blobSize int64) (*preV5CompatibilityHackBlobInfo, error) { readerAt := ReaderAtSeeker(r) z, err := jujuzip.NewReader(readerAt, blobSize) if err != nil { return nil, errgo.Notef(err, "cannot open charm archive") } var metadataf *jujuzip.File for _, f := range z.File { if f.Name == "metadata.yaml" { metadataf = f break } } if metadataf == nil { return nil, errgo.New("no metadata.yaml file found") } fr, err := metadataf.Open() if err != nil { return nil, errgo.Notef(err, "cannot open metadata.yaml from archive") } defer fr.Close() data, err := removeSeriesField(fr) if err != nil { return nil, errgo.Notef(err, "cannot remove series field from metadata") } var appendedBlob bytes.Buffer zw := z.Append(&appendedBlob) updatedf := metadataf.FileHeader // Work around invalid duplicate FileHeader issue. zwf, err := zw.CreateHeader(&updatedf) if err != nil { return nil, errgo.Notef(err, "cannot create appended metadata entry") } if _, err := zwf.Write(data); err != nil { return nil, errgo.Notef(err, "cannot write appended metadata data") } if err := zw.Close(); err != nil { return nil, errgo.Notef(err, "cannot close zip file") } data = appendedBlob.Bytes() sha384sum := sha512.Sum384(data) err = blobStore.PutUnchallenged(&appendedBlob, preV5CompatibilityBlobName(blobName), int64(len(data)), fmt.Sprintf("%x", sha384sum[:])) if err != nil { return nil, errgo.Notef(err, "cannot put archive blob") } sha384w := sha512.New384() sha256w := sha256.New() hashw := io.MultiWriter(sha384w, sha256w) if _, err := r.Seek(0, 0); err != nil { return nil, errgo.Notef(err, "cannnot seek to start of blob") } if _, err := io.Copy(hashw, r); err != nil { return nil, errgo.Notef(err, "cannot recalculate blob checksum") } hashw.Write(data) return &preV5CompatibilityHackBlobInfo{ size: blobSize + int64(len(data)), hash256: fmt.Sprintf("%x", sha256w.Sum(nil)), hash: fmt.Sprintf("%x", sha384w.Sum(nil)), }, nil } // preV5CompatibilityBlobName returns the name of the zip file suffix used // to overwrite the metadata.yaml file for pre-v5 compatibility purposes. func preV5CompatibilityBlobName(blobName string) string { return blobName + ".pre-v5-suffix" } func removeSeriesField(r io.Reader) ([]byte, error) { data, err := ioutil.ReadAll(r) if err != nil { return nil, errgo.Mask(err) } var meta map[string]interface{} if err := yaml.Unmarshal(data, &meta); err != nil { return nil, errgo.Notef(err, "cannot unmarshal metadata.yaml") } delete(meta, "series") data, err = yaml.Marshal(meta) if err != nil { return nil, errgo.Notef(err, "cannot re-marshal metadata.yaml") } return data, nil } // newCharm returns a new charm implementation from the archive blob // read from r, that should have the given size and will // be named with the given id. // // The charm is checked for validity before returning. func (s *Store) newCharm(id *router.ResolvedURL, r io.ReadSeeker, blobSize int64) (charm.Charm, error) { readerAt := ReaderAtSeeker(r) ch, err := charm.ReadCharmArchiveFromReader(readerAt, blobSize) if err != nil { return nil, zipReadError(err, "cannot read charm archive") } if err := checkCharmIsValid(ch); err != nil { return nil, errgo.Mask(err, errgo.Is(params.ErrInvalidEntity)) } if err := checkIdAllowed(id, ch); err != nil { return nil, errgo.Mask(err, errgo.Is(params.ErrEntityIdNotAllowed)) } return ch, nil } func checkCharmIsValid(ch charm.Charm) error { m := ch.Meta() for _, rels := range []map[string]charm.Relation{m.Provides, m.Requires, m.Peers} { if err := checkRelationsAreValid(rels); err != nil { return errgo.Mask(err, errgo.Is(params.ErrInvalidEntity)) } } if err := checkConsistentSeries(m.Series); err != nil { return errgo.Mask(err, errgo.Is(params.ErrInvalidEntity)) } return nil } func checkRelationsAreValid(rels map[string]charm.Relation) error { for _, rel := range rels { if rel.Name == "relation-name" { return errgo.WithCausef(nil, params.ErrInvalidEntity, "relation %s has almost certainly not been changed from the template", rel.Name) } if rel.Interface == "interface-name" { return errgo.WithCausef(nil, params.ErrInvalidEntity, "interface %s in relation %s has almost certainly not been changed from the template", rel.Interface, rel.Name) } } return nil } // checkConsistentSeries ensures that all of the series listed in the // charm metadata come from the same distribution. If an error is // returned it will have a cause of params.ErrInvalidEntity. func checkConsistentSeries(metadataSeries []string) error { var dist series.Distribution for _, s := range metadataSeries { d := series.Series[s].Distribution if d == "" { return errgo.WithCausef(nil, params.ErrInvalidEntity, "unrecognized series %q in metadata", s) } if dist == "" { dist = d } else if dist != d { return errgo.WithCausef(nil, params.ErrInvalidEntity, "cannot mix series from %s and %s in single charm", dist, d) } } return nil } // checkIdAllowed ensures that the given id may be used for the provided // charm. If an error is returned it will have a cause of // params.ErrEntityIdNotAllowed. func checkIdAllowed(id *router.ResolvedURL, ch charm.Charm) error { m := ch.Meta() if id.URL.Series == "" && len(m.Series) == 0 { return errgo.WithCausef(nil, params.ErrEntityIdNotAllowed, "series not specified in url or charm metadata") } else if id.URL.Series == "" || len(m.Series) == 0 { return nil } // if we get here we have series in both the id and metadata, ensure they agree. for _, s := range m.Series { if s == id.URL.Series { return nil } } return errgo.WithCausef(nil, params.ErrEntityIdNotAllowed, "%q series not listed in charm metadata", id.URL.Series) } // addCharm adds a charm to the entities collection with the given parameters. // If p.URL cannot be used as a name for the charm then the returned // error will have the cause params.ErrEntityIdNotAllowed. If the charm // duplicates an existing charm then the returned error will have the // cause params.ErrDuplicateUpload. func (s *Store) addCharm(c charm.Charm, p addParams) (err error) { // Strictly speaking this test is redundant, because a ResolvedURL should // always be canonical, but check just in case anyway, as this is // final gateway before a potentially invalid url might be stored // in the database. id := p.url.URL logger.Infof("add charm url %s; promulgated rev %d", &id, p.url.PromulgatedRevision) entity := &mongodoc.Entity{ URL: &id, PromulgatedURL: p.url.PromulgatedURL(), BlobHash: p.blobHash, BlobHash256: p.blobHash256, BlobName: p.blobName, PreV5BlobSize: p.preV5BlobSize, PreV5BlobHash: p.preV5BlobHash, PreV5BlobHash256: p.preV5BlobHash256, Size: p.blobSize, UploadTime: time.Now(), CharmMeta: c.Meta(), CharmConfig: c.Config(), CharmActions: c.Actions(), CharmProvidedInterfaces: interfacesForRelations(c.Meta().Provides), CharmRequiredInterfaces: interfacesForRelations(c.Meta().Requires), SupportedSeries: c.Meta().Series, } denormalizeEntity(entity) setEntityChannels(entity, p.chans) // Check that we're not going to create a charm that duplicates // the name of a bundle. This is racy, but it's the best we can // do. Also check that there isn't an existing multi-series charm // that would be replaced by this one. entities, err := s.FindEntities(entity.BaseURL, nil) if err != nil { return errgo.Notef(err, "cannot check for existing entities") } for _, entity := range entities { if entity.URL.Series == "bundle" { return errgo.WithCausef(err, params.ErrEntityIdNotAllowed, "charm name duplicates bundle name %v", entity.URL) } if id.Series != "" && entity.URL.Series == "" { return errgo.WithCausef(err, params.ErrEntityIdNotAllowed, "charm name duplicates multi-series charm name %v", entity.URL) } } if err := s.addEntity(entity); err != nil { return errgo.Mask(err, errgo.Is(params.ErrDuplicateUpload)) } return nil } // setEntityChannels associates the entity with the given channels, ignoring // unknown channels. func setEntityChannels(entity *mongodoc.Entity, chans []params.Channel) { for _, c := range chans { switch c { case params.DevelopmentChannel: entity.Development = true case params.StableChannel: entity.Stable = true } } } // addBundle adds a bundle to the entities collection with the given // parameters. If p.URL cannot be used as a name for the bundle then the // returned error will have the cause params.ErrEntityIdNotAllowed. If // the bundle duplicates an existing bundle then the returned error will // have the cause params.ErrDuplicateUpload. func (s *Store) addBundle(b charm.Bundle, p addParams) error { bundleData := b.Data() urls, err := bundleCharms(bundleData) if err != nil { return errgo.Mask(err) } entity := &mongodoc.Entity{ URL: &p.url.URL, BlobHash: p.blobHash, BlobHash256: p.blobHash256, BlobName: p.blobName, PreV5BlobSize: p.preV5BlobSize, PreV5BlobHash: p.preV5BlobHash, PreV5BlobHash256: p.preV5BlobHash256, Size: p.blobSize, UploadTime: time.Now(), BundleData: bundleData, BundleUnitCount: newInt(bundleUnitCount(bundleData)), BundleMachineCount: newInt(bundleMachineCount(bundleData)), BundleReadMe: b.ReadMe(), BundleCharms: urls, PromulgatedURL: p.url.PromulgatedURL(), } denormalizeEntity(entity) setEntityChannels(entity, p.chans) // Check that we're not going to create a bundle that duplicates // the name of a charm. This is racy, but it's the best we can do. entities, err := s.FindEntities(entity.BaseURL, nil) if err != nil { return errgo.Notef(err, "cannot check for existing entities") } for _, entity := range entities { if entity.URL.Series != "bundle" { return errgo.WithCausef(err, params.ErrEntityIdNotAllowed, "bundle name duplicates charm name %s", entity.URL) } } if err := s.addEntity(entity); err != nil { return errgo.Mask(err, errgo.Is(params.ErrDuplicateUpload)) } return nil } // addEntity actually adds the entity (and its base entity if required) to // the database. It assumes that the blob associated with the // entity has already been validated and stored. func (s *Store) addEntity(entity *mongodoc.Entity) (err error) { // Add the base entity to the database. perms := []string{entity.User} acls := mongodoc.ACL{ Read: perms, Write: perms, } baseEntity := &mongodoc.BaseEntity{ URL: entity.BaseURL, User: entity.User, Name: entity.Name, ChannelACLs: map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: acls, params.DevelopmentChannel: acls, params.StableChannel: acls, }, Promulgated: entity.PromulgatedURL != nil, } err = s.DB.BaseEntities().Insert(baseEntity) if err != nil && !mgo.IsDup(err) { return errgo.Notef(err, "cannot insert base entity") } // Add the entity to the database. err = s.DB.Entities().Insert(entity) if mgo.IsDup(err) { return params.ErrDuplicateUpload } if err != nil { return errgo.Notef(err, "cannot insert entity") } return nil } // denormalizeEntity sets all denormalized fields in e // from their associated canonical fields. // // It is the responsibility of the caller to set e.SupportedSeries // if the entity URL does not contain a series. If the entity // URL *does* contain a series, e.SupportedSeries will // be overwritten. func denormalizeEntity(e *mongodoc.Entity) { e.BaseURL = mongodoc.BaseURL(e.URL) e.Name = e.URL.Name e.User = e.URL.User e.Revision = e.URL.Revision e.Series = e.URL.Series if e.URL.Series != "" { if e.URL.Series == "bundle" { e.SupportedSeries = nil } else { e.SupportedSeries = []string{e.URL.Series} } } if e.PromulgatedURL == nil { e.PromulgatedRevision = -1 } else { e.PromulgatedRevision = e.PromulgatedURL.Revision } } // newBundle returns a new bundle implementation from the archive blob // read from r, that should have the given size and will // be named with the given id. // // The bundle is checked for validity before returning. func (s *Store) newBundle(id *router.ResolvedURL, r io.ReadSeeker, blobSize int64) (charm.Bundle, error) { readerAt := ReaderAtSeeker(r) b, err := charm.ReadBundleArchiveFromReader(readerAt, blobSize) if err != nil { return nil, zipReadError(err, "cannot read bundle archive") } bundleData := b.Data() charms, err := s.bundleCharms(bundleData.RequiredCharms()) if err != nil { return nil, errgo.Notef(err, "cannot retrieve bundle charms") } if err := bundleData.VerifyWithCharms(verifyConstraints, verifyStorage, charms); err != nil { // TODO frankban: use multiError (defined in internal/router). return nil, errgo.NoteMask(verificationError(err), "bundle verification failed", errgo.Is(params.ErrInvalidEntity)) } return b, nil } func (s *Store) bundleCharms(ids []string) (map[string]charm.Charm, error) { numIds := len(ids) urls := make([]*charm.URL, 0, numIds) idKeys := make([]string, 0, numIds) // TODO resolve ids concurrently. for _, id := range ids { url, err := charm.ParseURL(id) if err != nil { // Ignore this error. This will be caught in the bundle // verification process (see bundleData.VerifyWithCharms) and will // be returned to the user along with other bundle errors. continue } e, err := s.FindBestEntity(url, params.NoChannel, map[string]int{}) if err != nil { if errgo.Cause(err) == params.ErrNotFound { // Ignore this error too, for the same reasons // described above. continue } return nil, err } urls = append(urls, e.URL) idKeys = append(idKeys, id) } var entities []mongodoc.Entity if err := s.DB.Entities(). Find(bson.D{{"_id", bson.D{{"$in", urls}}}}). All(&entities); err != nil { return nil, err } entityCharms := make(map[charm.URL]charm.Charm, len(entities)) for i, entity := range entities { entityCharms[*entity.URL] = &entityCharm{entities[i]} } charms := make(map[string]charm.Charm, len(urls)) for i, url := range urls { if ch, ok := entityCharms[*url]; ok { charms[idKeys[i]] = ch } } return charms, nil } // bundleCharms returns all the charm URLs used by a bundle, // without duplicates. // TODO this seems to overlap slightly with Store.bundleCharms. func bundleCharms(data *charm.BundleData) ([]*charm.URL, error) { // Use a map to de-duplicate the URL list: a bundle can include services // deployed by the same charm. urlMap := make(map[string]*charm.URL) for _, service := range data.Services { url, err := charm.ParseURL(service.Charm) if err != nil { return nil, errgo.Mask(err) } urlMap[url.String()] = url // Also add the corresponding base URL. base := mongodoc.BaseURL(url) urlMap[base.String()] = base } urls := make([]*charm.URL, 0, len(urlMap)) for _, url := range urlMap { urls = append(urls, url) } return urls, nil } func newInt(x int) *int { return &x } // bundleUnitCount returns the number of units created by the bundle. func bundleUnitCount(b *charm.BundleData) int { count := 0 for _, service := range b.Services { count += service.NumUnits } return count } // bundleMachineCount returns the number of machines // that will be created or used by the bundle. func bundleMachineCount(b *charm.BundleData) int { count := len(b.Machines) for _, service := range b.Services { // The default placement is "new". placement := &charm.UnitPlacement{ Machine: "new", } // Check for "new" placements, which means a new machine // must be added. for _, location := range service.To { var err error placement, err = charm.ParsePlacement(location) if err != nil { // Ignore invalid placements - a bundle should always // be verified before adding to the charm store so this // should never happen in practice. continue } if placement.Machine == "new" { count++ } } // If there are less elements in To than NumUnits, the last placement // element is replicated. For this reason, if the last element is // "new", we need to add more machines. if placement != nil && placement.Machine == "new" { count += service.NumUnits - len(service.To) } } return count } func interfacesForRelations(rels map[string]charm.Relation) []string { // Eliminate duplicates by storing interface names into a map. interfaces := make(map[string]bool) for _, rel := range rels { interfaces[rel.Interface] = true } result := make([]string, 0, len(interfaces)) for iface := range interfaces { result = append(result, iface) } return result } // zipReadError creates an appropriate error for errors in reading an // uploaded archive. If the archive could not be read because the data // uploaded is invalid then an error with a cause of // params.ErrInvalidEntity will be returned. The given message will be // added as context. func zipReadError(err error, msg string) error { switch errgo.Cause(err) { case zip.ErrFormat, zip.ErrAlgorithm, zip.ErrChecksum: return errgo.WithCausef(err, params.ErrInvalidEntity, msg) } return errgo.Notef(err, msg) } func verifyConstraints(s string) error { // TODO(rog) provide some actual constraints checking here. return nil } func verifyStorage(s string) error { // TODO(frankban) provide some actual storage checking here. return nil } // verificationError returns an error whose string representation is a list of // all the verification error messages stored in err, in JSON format. // Note that err must be a *charm.VerificationError. func verificationError(err error) error { verr, ok := err.(*charm.VerificationError) if !ok { return err } messages := make([]string, len(verr.Errors)) for i, err := range verr.Errors { messages[i] = err.Error() } sort.Strings(messages) encodedMessages, err := json.Marshal(messages) if err != nil { // This should never happen. return err } return errgo.WithCausef(nil, params.ErrInvalidEntity, string(encodedMessages)) } // entityCharm implements charm.Charm. type entityCharm struct { mongodoc.Entity } func (e *entityCharm) Meta() *charm.Meta { return e.CharmMeta } func (e *entityCharm) Metrics() *charm.Metrics { return nil } func (e *entityCharm) Config() *charm.Config { return e.CharmConfig } func (e *entityCharm) Actions() *charm.Actions { return e.CharmActions } func (e *entityCharm) Revision() int { return e.URL.Revision } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/search_test.go0000664000175000017500000007076612672604603030232 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" import ( "encoding/json" "sort" "strings" "sync" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" "gopkg.in/juju/charmstore.v5-unstable/internal/router" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" ) type StoreSearchSuite struct { storetesting.IsolatedMgoESSuite pool *Pool store *Store index SearchIndex } var _ = gc.Suite(&StoreSearchSuite{}) func (s *StoreSearchSuite) SetUpTest(c *gc.C) { s.IsolatedMgoESSuite.SetUpTest(c) // Temporarily set LegacyDownloadCountsEnabled to false, so that the real // code path can be reached by tests in this suite. // TODO (frankban): remove this block when removing the legacy counts // logic. original := LegacyDownloadCountsEnabled LegacyDownloadCountsEnabled = false s.AddCleanup(func(*gc.C) { LegacyDownloadCountsEnabled = original }) s.index = SearchIndex{s.ES, s.TestIndex} s.ES.RefreshIndex(".versions") pool, err := NewPool(s.Session.DB("foo"), &s.index, nil, ServerParams{}) c.Assert(err, gc.IsNil) s.pool = pool s.store = pool.Store() s.addCharmsToStore(c) c.Assert(err, gc.IsNil) } func (s *StoreSearchSuite) TearDownTest(c *gc.C) { s.store.Close() s.pool.Close() s.IsolatedMgoESSuite.TearDownTest(c) } func newEntity(id string, promulgatedRevision int, supportedSeries ...string) *mongodoc.Entity { url := charm.MustParseURL(id) var purl *charm.URL if promulgatedRevision > -1 { purl = new(charm.URL) *purl = *url purl.User = "" purl.Revision = promulgatedRevision } if url.Series == "bundle" { supportedSeries = nil } else if url.Series != "" { supportedSeries = []string{url.Series} } return &mongodoc.Entity{ URL: url, SupportedSeries: supportedSeries, PromulgatedURL: purl, PromulgatedRevision: promulgatedRevision, } } var exportTestCharms = map[string]*mongodoc.Entity{ "wordpress": newEntity("cs:~charmers/precise/wordpress-23", 23), "mysql": newEntity("cs:~openstack-charmers/trusty/mysql-7", 7), "varnish": newEntity("cs:~foo/trusty/varnish-1", -1), "riak": newEntity("cs:~charmers/trusty/riak-67", 67), } var exportTestBundles = map[string]*mongodoc.Entity{ "wordpress-simple": newEntity("cs:~charmers/bundle/wordpress-simple-4", 4), } var charmDownloadCounts = map[string]int{ "wordpress": 0, "wordpress-simple": 1, "mysql": 3, "varnish": 5, } func (s *StoreSearchSuite) TestSuccessfulExport(c *gc.C) { s.store.pool.statsCache.EvictAll() for name, ent := range exportTestCharms { entity, err := s.store.FindEntity(EntityResolvedURL(ent), nil) c.Assert(err, gc.IsNil) var actual json.RawMessage err = s.store.ES.GetDocument(s.TestIndex, typeName, s.store.ES.getID(entity.URL), &actual) c.Assert(err, gc.IsNil) readACLs := []string{ent.URL.User, params.Everyone} if ent.URL.Name == "riak" { readACLs = []string{ent.URL.User} } doc := SearchDoc{ Entity: entity, TotalDownloads: int64(charmDownloadCounts[name]), ReadACLs: readACLs, Series: entity.SupportedSeries, AllSeries: true, SingleSeries: true, } c.Assert(string(actual), jc.JSONEquals, doc) } } func (s *StoreSearchSuite) TestNoExportDeprecated(c *gc.C) { charmArchive := storetesting.Charms.CharmDir("mysql") url := router.MustNewResolvedURL("cs:~charmers/saucy/mysql-4", -1) addCharmForSearch( c, s.store, url, charmArchive, nil, 0, ) var entity *mongodoc.Entity err := s.store.DB.Entities().FindId("cs:~openstack-charmers/trusty/mysql-7").One(&entity) c.Assert(err, gc.IsNil) present, err := s.store.ES.HasDocument(s.TestIndex, typeName, s.store.ES.getID(entity.URL)) c.Assert(err, gc.IsNil) c.Assert(present, gc.Equals, true) err = s.store.DB.Entities().FindId("cs:~charmers/saucy/mysql-4").One(&entity) c.Assert(err, gc.IsNil) present, err = s.store.ES.HasDocument(s.TestIndex, typeName, s.store.ES.getID(entity.URL)) c.Assert(err, gc.IsNil) c.Assert(present, gc.Equals, false) } func (s *StoreSearchSuite) TestExportOnlyLatest(c *gc.C) { charmArchive := storetesting.Charms.CharmDir("wordpress") url := router.MustNewResolvedURL("cs:~charmers/precise/wordpress-24", -1) addCharmForSearch( c, s.store, url, charmArchive, []string{"charmers", params.Everyone}, 0, ) var expected, old *mongodoc.Entity var actual json.RawMessage err := s.store.DB.Entities().FindId("cs:~charmers/precise/wordpress-23").One(&old) c.Assert(err, gc.IsNil) err = s.store.DB.Entities().FindId("cs:~charmers/precise/wordpress-24").One(&expected) c.Assert(err, gc.IsNil) err = s.store.ES.GetDocument(s.TestIndex, typeName, s.store.ES.getID(old.URL), &actual) c.Assert(err, gc.IsNil) doc := SearchDoc{ Entity: expected, ReadACLs: []string{"charmers", params.Everyone}, Series: expected.SupportedSeries, SingleSeries: true, AllSeries: true, } c.Assert(string(actual), jc.JSONEquals, doc) } func (s *StoreSearchSuite) TestExportMultiSeriesCharmsCreateExpandedVersions(c *gc.C) { charmArchive := storetesting.Charms.CharmDir("wordpress") url := router.MustNewResolvedURL("cs:~charmers/trusty/juju-gui-24", -1) addCharmForSearch( c, s.store, url, charmArchive, []string{"charmers"}, 0, ) charmArchive = storetesting.Charms.CharmDir("multi-series") url = router.MustNewResolvedURL("cs:~charmers/juju-gui-25", -1) addCharmForSearch( c, s.store, url, charmArchive, []string{"charmers"}, 0, ) var expected, old *mongodoc.Entity var actual json.RawMessage err := s.store.DB.Entities().FindId("cs:~charmers/trusty/juju-gui-24").One(&old) c.Assert(err, gc.IsNil) err = s.store.DB.Entities().FindId("cs:~charmers/juju-gui-25").One(&expected) c.Assert(err, gc.IsNil) err = s.store.ES.GetDocument(s.TestIndex, typeName, s.store.ES.getID(expected.URL), &actual) c.Assert(err, gc.IsNil) doc := SearchDoc{ Entity: expected, ReadACLs: []string{"charmers"}, Series: expected.SupportedSeries, SingleSeries: false, AllSeries: true, } c.Assert(string(actual), jc.JSONEquals, doc) err = s.store.ES.GetDocument(s.TestIndex, typeName, s.store.ES.getID(old.URL), &actual) c.Assert(err, gc.IsNil) expected.URL.Series = old.URL.Series doc = SearchDoc{ Entity: expected, ReadACLs: []string{"charmers"}, Series: []string{old.URL.Series}, SingleSeries: true, AllSeries: false, } c.Assert(string(actual), jc.JSONEquals, doc) } func (s *StoreSearchSuite) TestExportSearchDocument(c *gc.C) { var entity *mongodoc.Entity var actual json.RawMessage err := s.store.DB.Entities().FindId("cs:~charmers/precise/wordpress-23").One(&entity) c.Assert(err, gc.IsNil) doc := SearchDoc{Entity: entity, TotalDownloads: 4000} err = s.store.ES.update(&doc) c.Assert(err, gc.IsNil) err = s.store.ES.GetDocument(s.TestIndex, typeName, s.store.ES.getID(entity.URL), &actual) c.Assert(err, gc.IsNil) c.Assert(string(actual), jc.JSONEquals, doc) } func (s *StoreSearchSuite) addCharmsToStore(c *gc.C) { for name, ent := range exportTestCharms { charmArchive := storetesting.Charms.CharmDir(name) cats := strings.Split(name, "-") charmArchive.Meta().Categories = cats tags := make([]string, len(cats)) for i, s := range cats { tags[i] = s + "TAG" } meta := charmArchive.Meta() meta.Tags = tags acl := []string{ent.URL.User} if ent.URL.Name != "riak" { acl = append(acl, params.Everyone) } addCharmForSearch( c, s.store, EntityResolvedURL(ent), storetesting.NewCharm(meta), acl, charmDownloadCounts[name], ) } for name, ent := range exportTestBundles { bundleArchive := storetesting.Charms.BundleDir(name) data := bundleArchive.Data() data.Tags = strings.Split(name, "-") addBundleForSearch( c, s.store, EntityResolvedURL(ent), storetesting.NewBundle(data), []string{ent.URL.User, params.Everyone}, charmDownloadCounts[name], ) } s.store.pool.statsCache.EvictAll() err := s.store.syncSearch() c.Assert(err, gc.IsNil) } var searchTests = []struct { about string sp SearchParams results []*mongodoc.Entity totalDiff int // len(results) + totalDiff = expected total }{ { about: "basic text search", sp: SearchParams{ Text: "wordpress", }, results: []*mongodoc.Entity{ exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }, { about: "blank text search", sp: SearchParams{ Text: "", }, results: []*mongodoc.Entity{ exportTestCharms["wordpress"], exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], }, }, { about: "autocomplete search", sp: SearchParams{ Text: "word", AutoComplete: true, }, results: []*mongodoc.Entity{ exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }, { about: "description filter search", sp: SearchParams{ Text: "", Filters: map[string][]string{ "description": {"blog"}, }, }, results: []*mongodoc.Entity{ exportTestCharms["wordpress"], }, }, { about: "name filter search", sp: SearchParams{ Text: "", Filters: map[string][]string{ "name": {"wordpress"}, }, }, results: []*mongodoc.Entity{ exportTestCharms["wordpress"], }, }, { about: "owner filter search", sp: SearchParams{ Text: "", Filters: map[string][]string{ "owner": {"foo"}, }, }, results: []*mongodoc.Entity{ exportTestCharms["varnish"], }, }, { about: "provides filter search", sp: SearchParams{ Text: "", Filters: map[string][]string{ "provides": {"mysql"}, }, }, results: []*mongodoc.Entity{ exportTestCharms["mysql"], }, }, { about: "requires filter search", sp: SearchParams{ Text: "", Filters: map[string][]string{ "requires": {"mysql"}, }, }, results: []*mongodoc.Entity{ exportTestCharms["wordpress"], }, }, { about: "series filter search", sp: SearchParams{ Text: "", Filters: map[string][]string{ "series": {"trusty"}, }, }, results: []*mongodoc.Entity{ exportTestCharms["mysql"], exportTestCharms["varnish"], }, }, { about: "summary filter search", sp: SearchParams{ Text: "", Filters: map[string][]string{ "summary": {"Database engine"}, }, }, results: []*mongodoc.Entity{ exportTestCharms["mysql"], exportTestCharms["varnish"], }, }, { about: "tags filter search", sp: SearchParams{ Text: "", Filters: map[string][]string{ "tags": {"wordpress"}, }, }, results: []*mongodoc.Entity{ exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }, { about: "bundle type filter search", sp: SearchParams{ Text: "", Filters: map[string][]string{ "type": {"bundle"}, }, }, results: []*mongodoc.Entity{ exportTestBundles["wordpress-simple"], }, }, { about: "charm type filter search", sp: SearchParams{ Text: "", Filters: map[string][]string{ "type": {"charm"}, }, }, results: []*mongodoc.Entity{ exportTestCharms["wordpress"], exportTestCharms["mysql"], exportTestCharms["varnish"], }, }, { about: "charm & bundle type filter search", sp: SearchParams{ Text: "", Filters: map[string][]string{ "type": {"charm", "bundle"}, }, }, results: []*mongodoc.Entity{ exportTestCharms["wordpress"], exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], }, }, { about: "invalid filter search", sp: SearchParams{ Text: "", Filters: map[string][]string{ "no such filter": {"foo"}, }, }, results: []*mongodoc.Entity{ exportTestCharms["wordpress"], exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], }, }, { about: "valid & invalid filter search", sp: SearchParams{ Text: "", Filters: map[string][]string{ "no such filter": {"foo"}, "type": {"charm"}, }, }, results: []*mongodoc.Entity{ exportTestCharms["wordpress"], exportTestCharms["mysql"], exportTestCharms["varnish"], }, }, { about: "paginated search", sp: SearchParams{ Filters: map[string][]string{ "name": {"mysql"}, }, Skip: 1, }, totalDiff: +1, }, { about: "additional groups", sp: SearchParams{ Groups: []string{"charmers"}, }, results: []*mongodoc.Entity{ exportTestCharms["riak"], exportTestCharms["wordpress"], exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], }, }, { about: "admin search", sp: SearchParams{ Admin: true, }, results: []*mongodoc.Entity{ exportTestCharms["riak"], exportTestCharms["wordpress"], exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], }, }, { about: "charm tags filter search", sp: SearchParams{ Text: "", Filters: map[string][]string{ "tags": {"wordpressTAG"}, }, }, results: []*mongodoc.Entity{ exportTestCharms["wordpress"], }, }, { about: "blank owner filter search", sp: SearchParams{ Text: "", Filters: map[string][]string{ "owner": {""}, }, }, results: []*mongodoc.Entity{ exportTestCharms["wordpress"], exportTestCharms["mysql"], exportTestBundles["wordpress-simple"], }, }, { about: "promulgated search", sp: SearchParams{ Text: "", Filters: map[string][]string{ "promulgated": {"1"}, }, }, results: []*mongodoc.Entity{ exportTestCharms["wordpress"], exportTestCharms["mysql"], exportTestBundles["wordpress-simple"], }, }, { about: "not promulgated search", sp: SearchParams{ Text: "", Filters: map[string][]string{ "promulgated": {"0"}, }, }, results: []*mongodoc.Entity{ exportTestCharms["varnish"], }, }, { about: "owner and promulgated filter search", sp: SearchParams{ Text: "", Filters: map[string][]string{ "promulgated": {"1"}, "owner": {"openstack-charmers"}, }, }, results: []*mongodoc.Entity{ exportTestCharms["mysql"], }, }, } func (s *StoreSearchSuite) TestSearches(c *gc.C) { s.store.ES.Database.RefreshIndex(s.TestIndex) for i, test := range searchTests { c.Logf("test %d: %s", i, test.about) res, err := s.store.Search(test.sp) c.Assert(err, gc.IsNil) c.Logf("results: %v", res.Results) sort.Sort(resolvedURLsByString(res.Results)) sort.Sort(resolvedURLsByString(test.results)) c.Check(res.Results, jc.DeepEquals, test.results) c.Check(res.Total, gc.Equals, len(test.results)+test.totalDiff) } } type resolvedURLsByString []*mongodoc.Entity func (r resolvedURLsByString) Less(i, j int) bool { return r[i].URL.String() < r[j].URL.String() } func (r resolvedURLsByString) Swap(i, j int) { r[i], r[j] = r[j], r[i] } func (r resolvedURLsByString) Len() int { return len(r) } func (s *StoreSearchSuite) TestPaginatedSearch(c *gc.C) { err := s.store.ES.Database.RefreshIndex(s.TestIndex) c.Assert(err, gc.IsNil) sp := SearchParams{ Text: "wordpress", Skip: 1, } res, err := s.store.Search(sp) c.Assert(err, gc.IsNil) c.Assert(res.Results, gc.HasLen, 1) c.Assert(res.Total, gc.Equals, 2) } func (s *StoreSearchSuite) TestLimitTestSearch(c *gc.C) { err := s.store.ES.Database.RefreshIndex(s.TestIndex) c.Assert(err, gc.IsNil) sp := SearchParams{ Text: "wordpress", Limit: 1, } res, err := s.store.Search(sp) c.Assert(err, gc.IsNil) c.Assert(res.Results, gc.HasLen, 1) } func (s *StoreSearchSuite) TestPromulgatedRank(c *gc.C) { charmArchive := storetesting.Charms.CharmDir("varnish") ent := newEntity("cs:~charmers/trusty/varnish-1", 1) addCharmForSearch( c, s.store, EntityResolvedURL(ent), charmArchive, []string{ent.URL.User, params.Everyone}, 0, ) s.store.ES.Database.RefreshIndex(s.TestIndex) sp := SearchParams{ Filters: map[string][]string{ "name": {"varnish"}, }, } res, err := s.store.Search(sp) c.Assert(err, gc.IsNil) c.Logf("results: %#v", res.Results) c.Assert(res.Results, jc.DeepEquals, []*mongodoc.Entity{ ent, exportTestCharms["varnish"], }) } func (s *StoreSearchSuite) TestSorting(c *gc.C) { s.store.ES.Database.RefreshIndex(s.TestIndex) tests := []struct { about string sortQuery string results []*mongodoc.Entity }{{ about: "name ascending", sortQuery: "name", results: []*mongodoc.Entity{ exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }, { about: "name descending", sortQuery: "-name", results: []*mongodoc.Entity{ exportTestBundles["wordpress-simple"], exportTestCharms["wordpress"], exportTestCharms["varnish"], exportTestCharms["mysql"], }, }, { about: "series ascending", sortQuery: "series,name", results: []*mongodoc.Entity{ exportTestBundles["wordpress-simple"], exportTestCharms["wordpress"], exportTestCharms["mysql"], exportTestCharms["varnish"], }, }, { about: "series descending", sortQuery: "-series,name", results: []*mongodoc.Entity{ exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }, { about: "owner ascending", sortQuery: "owner,name", results: []*mongodoc.Entity{ exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], exportTestCharms["varnish"], exportTestCharms["mysql"], }, }, { about: "owner descending", sortQuery: "-owner,name", results: []*mongodoc.Entity{ exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }, { about: "downloads ascending", sortQuery: "downloads", results: []*mongodoc.Entity{ exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], exportTestCharms["mysql"], exportTestCharms["varnish"], }, }, { about: "downloads descending", sortQuery: "-downloads", results: []*mongodoc.Entity{ exportTestCharms["varnish"], exportTestCharms["mysql"], exportTestBundles["wordpress-simple"], exportTestCharms["wordpress"], }, }} for i, test := range tests { c.Logf("test %d. %s", i, test.about) var sp SearchParams err := sp.ParseSortFields(test.sortQuery) c.Assert(err, gc.IsNil) res, err := s.store.Search(sp) c.Assert(err, gc.IsNil) c.Assert(res.Results, jc.DeepEquals, test.results) c.Assert(res.Total, gc.Equals, len(test.results)) } } func (s *StoreSearchSuite) TestBoosting(c *gc.C) { s.store.ES.Database.RefreshIndex(s.TestIndex) var sp SearchParams res, err := s.store.Search(sp) c.Assert(err, gc.IsNil) c.Assert(res.Results, gc.HasLen, 4) c.Logf("results: %#v", res.Results) c.Assert(res.Results, jc.DeepEquals, []*mongodoc.Entity{ exportTestBundles["wordpress-simple"], exportTestCharms["mysql"], exportTestCharms["wordpress"], exportTestCharms["varnish"], }) } func (s *StoreSearchSuite) TestEnsureIndex(c *gc.C) { s.store.ES.Index = s.TestIndex + "-ensure-index" defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) indexes, err := s.ES.ListIndexesForAlias(s.store.ES.Index) c.Assert(err, gc.Equals, nil) c.Assert(indexes, gc.HasLen, 0) err = s.store.ES.ensureIndexes(false) c.Assert(err, gc.Equals, nil) indexes, err = s.ES.ListIndexesForAlias(s.store.ES.Index) c.Assert(err, gc.Equals, nil) c.Assert(indexes, gc.HasLen, 1) index := indexes[0] err = s.store.ES.ensureIndexes(false) c.Assert(err, gc.Equals, nil) indexes, err = s.ES.ListIndexesForAlias(s.store.ES.Index) c.Assert(err, gc.Equals, nil) c.Assert(indexes, gc.HasLen, 1) c.Assert(indexes[0], gc.Equals, index) } func (s *StoreSearchSuite) TestEnsureConcurrent(c *gc.C) { s.store.ES.Index = s.TestIndex + "-ensure-index-conc" defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) indexes, err := s.ES.ListIndexesForAlias(s.store.ES.Index) c.Assert(err, gc.Equals, nil) c.Assert(indexes, gc.HasLen, 0) var wg sync.WaitGroup wg.Add(1) go func() { err := s.store.ES.ensureIndexes(false) c.Check(err, gc.Equals, nil) wg.Done() }() err = s.store.ES.ensureIndexes(false) c.Assert(err, gc.Equals, nil) indexes, err = s.ES.ListIndexesForAlias(s.store.ES.Index) c.Assert(err, gc.Equals, nil) c.Assert(indexes, gc.HasLen, 1) wg.Wait() } func (s *StoreSearchSuite) TestEnsureIndexForce(c *gc.C) { s.store.ES.Index = s.TestIndex + "-ensure-index-force" defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) indexes, err := s.ES.ListIndexesForAlias(s.store.ES.Index) c.Assert(err, gc.Equals, nil) c.Assert(indexes, gc.HasLen, 0) err = s.store.ES.ensureIndexes(false) c.Assert(err, gc.Equals, nil) indexes, err = s.ES.ListIndexesForAlias(s.store.ES.Index) c.Assert(err, gc.Equals, nil) c.Assert(indexes, gc.HasLen, 1) index := indexes[0] err = s.store.ES.ensureIndexes(true) c.Assert(err, gc.Equals, nil) indexes, err = s.ES.ListIndexesForAlias(s.store.ES.Index) c.Assert(err, gc.Equals, nil) c.Assert(indexes, gc.HasLen, 1) c.Assert(indexes[0], gc.Not(gc.Equals), index) } func (s *StoreSearchSuite) TestGetCurrentVersionNoVersion(c *gc.C) { s.store.ES.Index = s.TestIndex + "-current-version" defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) v, dv, err := s.store.ES.getCurrentVersion() c.Assert(err, gc.Equals, nil) c.Assert(v, gc.Equals, version{}) c.Assert(dv, gc.Equals, int64(0)) } func (s *StoreSearchSuite) TestGetCurrentVersionWithVersion(c *gc.C) { s.store.ES.Index = s.TestIndex + "-current-version" defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) index, err := s.store.ES.newIndex() c.Assert(err, gc.Equals, nil) updated, err := s.store.ES.updateVersion(version{1, index}, 0) c.Assert(err, gc.Equals, nil) c.Assert(updated, gc.Equals, true) v, dv, err := s.store.ES.getCurrentVersion() c.Assert(err, gc.Equals, nil) c.Assert(v, gc.Equals, version{1, index}) c.Assert(dv, gc.Equals, int64(1)) } func (s *StoreSearchSuite) TestUpdateVersionNew(c *gc.C) { s.store.ES.Index = s.TestIndex + "-update-version" defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) index, err := s.store.ES.newIndex() c.Assert(err, gc.Equals, nil) updated, err := s.store.ES.updateVersion(version{1, index}, 0) c.Assert(err, gc.Equals, nil) c.Assert(updated, gc.Equals, true) } func (s *StoreSearchSuite) TestUpdateVersionUpdate(c *gc.C) { s.store.ES.Index = s.TestIndex + "-update-version" defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) index, err := s.store.ES.newIndex() c.Assert(err, gc.Equals, nil) updated, err := s.store.ES.updateVersion(version{1, index}, 0) c.Assert(err, gc.Equals, nil) c.Assert(updated, gc.Equals, true) index, err = s.store.ES.newIndex() c.Assert(err, gc.Equals, nil) updated, err = s.store.ES.updateVersion(version{2, index}, 1) c.Assert(err, gc.Equals, nil) c.Assert(updated, gc.Equals, true) } func (s *StoreSearchSuite) TestUpdateCreateConflict(c *gc.C) { s.store.ES.Index = s.TestIndex + "-update-version" defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) index, err := s.store.ES.newIndex() c.Assert(err, gc.Equals, nil) updated, err := s.store.ES.updateVersion(version{1, index}, 0) c.Assert(err, gc.Equals, nil) c.Assert(updated, gc.Equals, true) index, err = s.store.ES.newIndex() c.Assert(err, gc.Equals, nil) updated, err = s.store.ES.updateVersion(version{1, index}, 0) c.Assert(err, gc.Equals, nil) c.Assert(updated, gc.Equals, false) } func (s *StoreSearchSuite) TestUpdateConflict(c *gc.C) { s.store.ES.Index = s.TestIndex + "-update-version" defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) index, err := s.store.ES.newIndex() c.Assert(err, gc.Equals, nil) updated, err := s.store.ES.updateVersion(version{1, index}, 0) c.Assert(err, gc.Equals, nil) c.Assert(updated, gc.Equals, true) index, err = s.store.ES.newIndex() c.Assert(err, gc.Equals, nil) updated, err = s.store.ES.updateVersion(version{1, index}, 3) c.Assert(err, gc.Equals, nil) c.Assert(updated, gc.Equals, false) } func (s *StoreSearchSuite) TestMultiSeriesCharmFiltersSeriesCorrectly(c *gc.C) { charmArchive := storetesting.Charms.CharmDir("multi-series") url := router.MustNewResolvedURL("cs:~charmers/juju-gui-25", -1) addCharmForSearch( c, s.store, url, charmArchive, []string{url.URL.User, params.Everyone}, 0, ) s.store.ES.Database.RefreshIndex(s.TestIndex) filterTests := []struct { series string notFound bool }{{ series: "trusty", }, { series: "vivid", }, { series: "sauch", notFound: true, }} for i, test := range filterTests { c.Logf("%d. %s", i, test.series) res, err := s.store.Search(SearchParams{ Filters: map[string][]string{ "name": []string{"juju-gui"}, "series": []string{test.series}, }, }) c.Assert(err, gc.IsNil) if test.notFound { c.Assert(res.Results, gc.HasLen, 0) continue } c.Assert(res.Results, gc.HasLen, 1) c.Assert(res.Results[0].URL.String(), gc.Equals, url.String()) } } func (s *StoreSearchSuite) TestMultiSeriesCharmSortsSeriesCorrectly(c *gc.C) { charmArchive := storetesting.Charms.CharmDir("multi-series") url := router.MustNewResolvedURL("cs:~charmers/juju-gui-25", -1) addCharmForSearch( c, s.store, url, charmArchive, []string{url.URL.User, params.Everyone}, 0, ) s.store.ES.Database.RefreshIndex(s.TestIndex) var sp SearchParams sp.ParseSortFields("-series", "owner") res, err := s.store.Search(sp) c.Assert(err, gc.IsNil) c.Assert(res.Results, jc.DeepEquals, []*mongodoc.Entity{ newEntity("cs:~charmers/juju-gui-25", -1, "trusty", "utopic", "vivid", "wily"), newEntity("cs:~foo/trusty/varnish-1", -1), newEntity("cs:~openstack-charmers/trusty/mysql-7", 7), newEntity("cs:~charmers/precise/wordpress-23", 23), newEntity("cs:~charmers/bundle/wordpress-simple-4", 4), }) } func (s *StoreSearchSuite) TestOnlyIndexStableCharms(c *gc.C) { ch := storetesting.NewCharm(&charm.Meta{ Name: "test", }) id := router.MustNewResolvedURL("~test/trusty/test-0", -1) err := s.store.AddCharmWithArchive(id, ch) c.Assert(err, gc.IsNil) err = s.store.SetPerms(&id.URL, "read", "test", params.Everyone) c.Assert(err, gc.IsNil) err = s.store.SetPerms(&id.URL, "development.read", "test", params.Everyone) c.Assert(err, gc.IsNil) err = s.store.SetPerms(&id.URL, "stable.read", "test", params.Everyone) c.Assert(err, gc.IsNil) var actual json.RawMessage err = s.store.UpdateSearch(id) c.Assert(err, gc.IsNil) err = s.store.ES.GetDocument(s.TestIndex, typeName, s.store.ES.getID(&id.URL), &actual) c.Assert(err, gc.ErrorMatches, "elasticsearch document not found") err = s.store.Publish(id, params.DevelopmentChannel) c.Assert(err, gc.IsNil) err = s.store.UpdateSearch(id) c.Assert(err, gc.IsNil) err = s.store.ES.GetDocument(s.TestIndex, typeName, s.store.ES.getID(&id.URL), &actual) c.Assert(err, gc.ErrorMatches, "elasticsearch document not found") err = s.store.Publish(id, params.StableChannel) c.Assert(err, gc.IsNil) err = s.store.UpdateSearch(id) c.Assert(err, gc.IsNil) err = s.store.ES.GetDocument(s.TestIndex, typeName, s.store.ES.getID(&id.URL), &actual) c.Assert(err, gc.IsNil) entity, err := s.store.FindEntity(id, nil) c.Assert(err, gc.IsNil) doc := SearchDoc{ Entity: entity, ReadACLs: []string{"test", params.Everyone}, Series: []string{"trusty"}, AllSeries: true, SingleSeries: true, } c.Assert(string(actual), jc.JSONEquals, doc) } // addCharmForSearch adds a charm to the specified store such that it // will be indexed in search. In order that it is indexed it is // automatically published on the stable channel. func addCharmForSearch(c *gc.C, s *Store, id *router.ResolvedURL, ch charm.Charm, acl []string, downloads int) { err := s.AddCharmWithArchive(id, ch) c.Assert(err, gc.IsNil) for i := 0; i < downloads; i++ { err := s.IncrementDownloadCounts(id) c.Assert(err, gc.IsNil) } err = s.SetPerms(&id.URL, "stable.read", acl...) c.Assert(err, gc.IsNil) err = s.Publish(id, params.StableChannel) c.Assert(err, gc.IsNil) } // addBundleForSearch adds a bundle to the specified store such that it // will be indexed in search. In order that it is indexed it is // automatically published on the stable channel. func addBundleForSearch(c *gc.C, s *Store, id *router.ResolvedURL, b charm.Bundle, acl []string, downloads int) { err := s.AddBundleWithArchive(id, b) c.Assert(err, gc.IsNil) for i := 0; i < downloads; i++ { err := s.IncrementDownloadCounts(id) c.Assert(err, gc.IsNil) } err = s.SetPerms(&id.URL, "stable.read", acl...) c.Assert(err, gc.IsNil) err = s.Publish(id, params.StableChannel) c.Assert(err, gc.IsNil) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/archive.go0000664000175000017500000001753212672604603027337 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" import ( "archive/zip" "bytes" "io" "os" "path" "strings" "github.com/juju/utils" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/mgo.v2/bson" "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" "gopkg.in/juju/charmstore.v5-unstable/internal/router" ) // Blob represents a blob of data from the charm store. type Blob struct { blobstore.ReadSeekCloser // Size holds the total size of the blob. Size int64 // Hash holds the hash checksum of the blob. Hash string } var preV5ArchiveFields = []string{ "size", "blobhash", "blobname", "prev5blobhash", "prev5blobsize", } // OpenBlob returns the blob associated with the given URL. func (s *Store) OpenBlob(id *router.ResolvedURL) (*Blob, error) { return s.openBlob(id, false) } // OpenBlob returns the blob associated with the given URL. // As required by pre-v5 versions of the API, it will return a blob // with a hacked-up metadata.yaml that elides the Series field. func (s *Store) OpenBlobPreV5(id *router.ResolvedURL) (*Blob, error) { return s.openBlob(id, true) } func (s *Store) openBlob(id *router.ResolvedURL, preV5 bool) (*Blob, error) { entity, err := s.FindEntity(id, FieldSelector(preV5ArchiveFields...)) if err != nil { return nil, errgo.Mask(err, errgo.Is(params.ErrNotFound)) } r, size, err := s.BlobStore.Open(entity.BlobName) if err != nil { return nil, errgo.Notef(err, "cannot open archive data for %s", id) } hash := entity.BlobHash if entity.PreV5BlobHash != entity.BlobHash && preV5 { // The v5 blob is different so we open the blob suffix that // contains the metadata hack. r2, size2, err := s.BlobStore.Open(preV5CompatibilityBlobName(entity.BlobName)) if err != nil { r.Close() return nil, errgo.Notef(err, "cannot find pre-v5 hack blob") } r = newMultiReadSeekCloser(r, r2) size += size2 hash = entity.PreV5BlobHash } return &Blob{ ReadSeekCloser: r, Size: size, Hash: hash, }, nil } type multiReadSeekCloser struct { readers []blobstore.ReadSeekCloser io.ReadSeeker } func newMultiReadSeekCloser(readers ...blobstore.ReadSeekCloser) blobstore.ReadSeekCloser { br := make([]io.ReadSeeker, len(readers)) for i, r := range readers { br[i] = r } return &multiReadSeekCloser{ readers: readers, ReadSeeker: utils.NewMultiReaderSeeker(br...), } } func (r *multiReadSeekCloser) Close() error { for _, r := range r.readers { r.Close() } return nil } // OpenBlobFile opens the file with the given path from the // given blob and returns a reader for its contents, // and its size. // // If no such file was found, it returns an error // with a params.ErrNotFound cause. // // If the file is actually a directory in the blob, it returns // an error with a params.ErrForbidden cause. func (s *Store) OpenBlobFile(blob *Blob, filePath string) (io.ReadCloser, int64, error) { zipReader, err := zip.NewReader(ReaderAtSeeker(blob), blob.Size) if err != nil { return nil, 0, errgo.Notef(err, "cannot read archive data") } filePath = strings.TrimPrefix(path.Clean(filePath), "/") for _, file := range zipReader.File { if path.Clean(file.Name) != filePath { continue } // The file is found. fileInfo := file.FileInfo() if fileInfo.IsDir() { return nil, 0, errgo.WithCausef(nil, params.ErrForbidden, "directory listing not allowed") } content, err := file.Open() if err != nil { return nil, 0, errgo.Notef(err, "unable to read file %q", filePath) } return content, fileInfo.Size(), nil } return nil, 0, errgo.WithCausef(nil, params.ErrNotFound, "file %q not found in the archive", filePath) } // OpenCachedBlobFile opens a file from the given entity's archive blob. // The file is identified by the provided fileId. If the file has not // previously been opened on this entity, the isFile function will be // used to determine which file in the zip file to use. The result will // be cached for the next time. // // When retrieving the entity, at least the BlobName and // Contents fields must be populated. func (s *Store) OpenCachedBlobFile( entity *mongodoc.Entity, fileId mongodoc.FileId, isFile func(f *zip.File) bool, ) (_ io.ReadCloser, err error) { if entity.BlobName == "" { // We'd like to check that the Contents field was populated // here but we can't because it doesn't necessarily // exist in the entity. return nil, errgo.New("provided entity does not have required fields") } zipf, ok := entity.Contents[fileId] if ok && !zipf.IsValid() { return nil, errgo.WithCausef(nil, params.ErrNotFound, "") } blob, size, err := s.BlobStore.Open(entity.BlobName) if err != nil { return nil, errgo.Notef(err, "cannot open archive blob") } defer func() { // When there's an error, we want to close // the blob, otherwise we need to keep the blob // open because it's used by the returned Reader. if err != nil { blob.Close() } }() if !ok { // We haven't already searched the archive for the icon, // so find its archive now. zipf, err = s.findZipFile(blob, size, isFile) if err != nil && errgo.Cause(err) != params.ErrNotFound { return nil, errgo.Mask(err) } } // We update the content entry regardless of whether we've // found a file, so that the next time that serveIcon is called // it can know that we've already looked. err = s.DB.Entities().UpdateId( entity.URL, bson.D{{"$set", bson.D{{"contents." + string(fileId), zipf}}, }}, ) if err != nil { return nil, errgo.Notef(err, "cannot update %q", entity.URL) } if !zipf.IsValid() { // We searched for the file and didn't find it. return nil, errgo.WithCausef(nil, params.ErrNotFound, "") } // We know where the icon is stored. Now serve it up. r, err := ZipFileReader(blob, zipf) if err != nil { return nil, errgo.Notef(err, "cannot make zip file reader") } // We return a ReadCloser that reads from the newly created // zip file reader, but when closed, will close the originally // opened blob. return struct { io.Reader io.Closer }{r, blob}, nil } func (s *Store) findZipFile(blob io.ReadSeeker, size int64, isFile func(f *zip.File) bool) (mongodoc.ZipFile, error) { zipReader, err := zip.NewReader(&readerAtSeeker{r: blob}, size) if err != nil { return mongodoc.ZipFile{}, errgo.Notef(err, "cannot read archive data") } for _, f := range zipReader.File { if isFile(f) { return NewZipFile(f) } } return mongodoc.ZipFile{}, params.ErrNotFound } // ArchiverTo can be used to archive a charm or bundle's // contents to a writer. It is implemented by *charm.CharmArchive // and *charm.BundleArchive. type ArchiverTo interface { ArchiveTo(io.Writer) error } // getArchive is used to turn the current charm and bundle implementations // into ReadSeekClosers for their corresponding archive. func getArchive(c interface{}) (blobstore.ReadSeekCloser, error) { var path string switch c := c.(type) { case ArchiverTo: // For example: charm.CharmDir or charm.BundleDir. var buffer bytes.Buffer if err := c.ArchiveTo(&buffer); err != nil { return nil, errgo.Mask(err) } return nopCloser(bytes.NewReader(buffer.Bytes())), nil case *charm.BundleArchive: path = c.Path case *charm.CharmArchive: path = c.Path default: return nil, errgo.Newf("cannot get the archive for charm type %T", c) } file, err := os.Open(path) if err != nil { return nil, errgo.Mask(err) } return file, nil } type nopCloserReadSeeker struct { io.ReadSeeker } func (nopCloserReadSeeker) Close() error { return nil } // nopCloser returns a blobstore.ReadSeekCloser with a no-op Close method // wrapping the provided ReadSeeker r. func nopCloser(r io.ReadSeeker) blobstore.ReadSeekCloser { return nopCloserReadSeeker{r} } ././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrations_integration_test.gocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrations_integration_test0000664000175000017500000004327412672604603033132 0ustar marcomarcopackage charmstore import ( "flag" "net/http" jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" ) type migrationsIntegrationSuite struct { commonSuite } var _ = gc.Suite(&migrationsIntegrationSuite{}) const earliestDeployedVersion = "4.4.3" var dumpMigrationHistoryFlag = flag.Bool("dump-migration-history", false, "dump migration history to file") func (s *migrationsIntegrationSuite) SetUpSuite(c *gc.C) { if *dumpMigrationHistoryFlag { s.dump(c) } s.commonSuite.SetUpSuite(c) } func (s *migrationsIntegrationSuite) dump(c *gc.C) { // We can't use the usual s.Session because we're using // commonSuite which uses IsolationSuite which hides the // environment variables which are needed for // dumpMigrationHistory to run. session, err := jujutesting.MgoServer.Dial() c.Assert(err, gc.IsNil) defer session.Close() err = dumpMigrationHistory(session, earliestDeployedVersion, migrationHistory) c.Assert(err, gc.IsNil) } var migrationHistory = []versionSpec{{ version: "4.1.5", update: func(db *mgo.Database, csv *charmStoreVersion) error { err := csv.Upload("v4", []uploadSpec{{ id: "~charmers/precise/promulgated-0", promulgatedId: "precise/promulgated-0", entity: storetesting.NewCharm(nil), }, { id: "~bob/trusty/nonpromulgated-0", entity: storetesting.NewCharm(nil), }, { id: "~charmers/bundle/promulgatedbundle-0", promulgatedId: "bundle/promulgatedbundle-0", entity: storetesting.NewBundle(&charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "promulgated": { Charm: "promulgated", }, }, }), }, { id: "~charmers/bundle/nonpromulgatedbundle-0", entity: storetesting.NewBundle(&charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "promulgated": { Charm: "promulgated", }, }, }), }}) if err != nil { return errgo.Mask(err) } if err := csv.Put("/v4/~charmers/precise/promulgated/meta/perm", params.PermRequest{ Read: []string{"everyone"}, Write: []string{"alice", "bob", "charmers"}, }); err != nil { return errgo.Mask(err) } if err := csv.Put("/v4/~bob/trusty/nonpromulgated/meta/perm", params.PermRequest{ Read: []string{"bobgroup"}, Write: []string{"bob", "someoneelse"}, }); err != nil { return errgo.Mask(err) } return nil }, }, { // Multi-series charms. // Development channel + ACLs version: "4.3.0", update: func(db *mgo.Database, csv *charmStoreVersion) error { err := csv.Upload("v4", []uploadSpec{{ // Uploads to ~charmers/multiseries-0 id: "~charmers/multiseries", // Note: PUT doesn't work on multi-series. usePost: true, entity: storetesting.NewCharm(&charm.Meta{ Series: []string{"precise", "trusty", "utopic"}, }), }, { // This triggers the bug where we created a base // entity with a bogus "development" channel in the URL. // Uploads to ~charmers/precise/promulgated-1 id: "~charmers/development/precise/promulgated", usePost: true, entity: storetesting.NewCharm(&charm.Meta{ Name: "different", }), }}) if err != nil { return errgo.Mask(err) } // Sanity check that we really did trigger the bug. err = db.C("entities").Find(bson.D{{ "promulgated-url", "cs:development/precise/promulgated-1", }}).One(new(interface{})) if err != nil { return errgo.Notef(err, "we don't seem to have triggered the bug") } if err := csv.Put("/v4/development/promulgated/meta/perm", params.PermRequest{ Read: []string{"charmers"}, Write: []string{"charmers"}, }); err != nil { return errgo.Mask(err) } return nil }, }, { // V5 API. // Fix bogus promulgated URL. // V4 multi-series compatibility (this didn't work). version: "4.4.3", update: func(db *mgo.Database, csv *charmStoreVersion) error { err := csv.Upload("v5", []uploadSpec{{ // Uploads to ~charmers/multiseries-1 id: "~charmers/multiseries", usePost: true, entity: storetesting.NewCharm(&charm.Meta{ Series: []string{"precise", "trusty", "wily"}, }), }, { id: "~someone/precise/southerncharm-0", entity: storetesting.NewCharm(nil), }, { id: "~someone/development/precise/southerncharm-3", entity: storetesting.NewCharm(nil), }, { id: "~someone/development/trusty/southerncharm-5", entity: storetesting.NewCharm(nil), }, { id: "~someone/trusty/southerncharm-6", entity: storetesting.NewCharm(nil), }}) if err != nil { return errgo.Mask(err) } return nil }, }} var migrationFromDumpEntityTests = []struct { id string checkers []entityChecker }{{ id: "~charmers/precise/promulgated-0", checkers: []entityChecker{ hasPromulgatedRevision(0), hasCompatibilityBlob(false), isDevelopment(true), isStable(true), }, }, { id: "~charmers/precise/promulgated-1", checkers: []entityChecker{ hasPromulgatedRevision(1), hasCompatibilityBlob(false), isDevelopment(true), isStable(false), }, }, { id: "~bob/trusty/nonpromulgated-0", checkers: []entityChecker{ hasPromulgatedRevision(-1), hasCompatibilityBlob(false), isDevelopment(true), isStable(true), }, }, { id: "~charmers/bundle/promulgatedbundle-0", checkers: []entityChecker{ hasPromulgatedRevision(0), hasCompatibilityBlob(false), isDevelopment(true), isStable(true), }, }, { id: "~charmers/bundle/nonpromulgatedbundle-0", checkers: []entityChecker{ hasPromulgatedRevision(-1), hasCompatibilityBlob(false), isDevelopment(true), isStable(true), }, }, { id: "~charmers/multiseries-0", checkers: []entityChecker{ hasPromulgatedRevision(-1), hasCompatibilityBlob(true), isDevelopment(true), isStable(true), }, }, { id: "~charmers/multiseries-1", checkers: []entityChecker{ hasPromulgatedRevision(-1), hasCompatibilityBlob(true), isDevelopment(true), isStable(true), }, }, { id: "~someone/precise/southerncharm-0", checkers: []entityChecker{ hasPromulgatedRevision(-1), hasCompatibilityBlob(false), isDevelopment(true), isStable(true), }, }, { id: "~someone/precise/southerncharm-3", checkers: []entityChecker{ hasPromulgatedRevision(-1), hasCompatibilityBlob(false), isDevelopment(true), isStable(false), }, }, { id: "~someone/trusty/southerncharm-5", checkers: []entityChecker{ hasPromulgatedRevision(-1), hasCompatibilityBlob(false), isDevelopment(true), isStable(false), }, }, { id: "~someone/trusty/southerncharm-6", checkers: []entityChecker{ hasPromulgatedRevision(-1), hasCompatibilityBlob(false), isDevelopment(true), isStable(true), }, }} var migrationFromDumpBaseEntityTests = []struct { id string checkers []baseEntityChecker }{{ id: "cs:~charmers/promulgated", checkers: []baseEntityChecker{ isPromulgated(true), hasACLs(map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, params.DevelopmentChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, params.StableChannel: { Read: []string{"everyone"}, Write: []string{"alice", "bob", "charmers"}, }, }), hasChannelEntities(map[params.Channel]map[string]*charm.URL{ params.DevelopmentChannel: { "precise": charm.MustParseURL("~charmers/precise/promulgated-1"), }, params.StableChannel: { "precise": charm.MustParseURL("~charmers/precise/promulgated-0"), }, }), }, }, { id: "cs:~bob/nonpromulgated", checkers: []baseEntityChecker{ isPromulgated(false), hasACLs(map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"bobgroup"}, Write: []string{"bob", "someoneelse"}, }, params.DevelopmentChannel: { Read: []string{"bobgroup"}, Write: []string{"bob", "someoneelse"}, }, params.StableChannel: { Read: []string{"bobgroup"}, Write: []string{"bob", "someoneelse"}, }, }), hasChannelEntities(map[params.Channel]map[string]*charm.URL{ params.DevelopmentChannel: { "trusty": charm.MustParseURL("~bob/trusty/nonpromulgated-0"), }, params.StableChannel: { "trusty": charm.MustParseURL("~bob/trusty/nonpromulgated-0"), }, }), }, }, { id: "~charmers/promulgatedbundle", checkers: []baseEntityChecker{ isPromulgated(true), hasAllACLs("charmers"), hasChannelEntities(map[params.Channel]map[string]*charm.URL{ params.DevelopmentChannel: { "bundle": charm.MustParseURL("~charmers/bundle/promulgatedbundle-0"), }, params.StableChannel: { "bundle": charm.MustParseURL("~charmers/bundle/promulgatedbundle-0"), }, }), }, }, { id: "cs:~charmers/nonpromulgatedbundle", checkers: []baseEntityChecker{ isPromulgated(false), hasAllACLs("charmers"), hasChannelEntities(map[params.Channel]map[string]*charm.URL{ params.DevelopmentChannel: { "bundle": charm.MustParseURL("~charmers/bundle/nonpromulgatedbundle-0"), }, params.StableChannel: { "bundle": charm.MustParseURL("~charmers/bundle/nonpromulgatedbundle-0"), }, }), }, }, { id: "cs:~charmers/multiseries", checkers: []baseEntityChecker{ isPromulgated(false), hasAllACLs("charmers"), hasChannelEntities(map[params.Channel]map[string]*charm.URL{ params.DevelopmentChannel: { "precise": charm.MustParseURL("~charmers/multiseries-1"), "trusty": charm.MustParseURL("~charmers/multiseries-1"), "utopic": charm.MustParseURL("~charmers/multiseries-0"), "wily": charm.MustParseURL("~charmers/multiseries-1"), }, params.StableChannel: { "precise": charm.MustParseURL("~charmers/multiseries-1"), "trusty": charm.MustParseURL("~charmers/multiseries-1"), "utopic": charm.MustParseURL("~charmers/multiseries-0"), "wily": charm.MustParseURL("~charmers/multiseries-1"), }, }), }, }, { id: "cs:~someone/southerncharm", checkers: []baseEntityChecker{ isPromulgated(false), hasAllACLs("someone"), hasChannelEntities(map[params.Channel]map[string]*charm.URL{ params.DevelopmentChannel: { "precise": charm.MustParseURL("~someone/precise/southerncharm-3"), "trusty": charm.MustParseURL("~someone/trusty/southerncharm-6"), }, params.StableChannel: { "precise": charm.MustParseURL("~someone/precise/southerncharm-0"), "trusty": charm.MustParseURL("~someone/trusty/southerncharm-6"), }, }), }, }} func (s *migrationsIntegrationSuite) TestMigrationFromDump(c *gc.C) { db := s.Session.DB("juju_test") err := createDatabaseAtVersion(db, migrationHistory[len(migrationHistory)-1].version) c.Assert(err, gc.IsNil) err = s.runMigrations(db) c.Assert(err, gc.IsNil) store := s.newStore(c, false) defer store.Close() checkAllEntityInvariants(c, store) for i, test := range migrationFromDumpEntityTests { c.Logf("test %d: entity %v", i, test.id) e, err := store.FindEntity(MustParseResolvedURL(test.id), nil) c.Assert(err, gc.IsNil) for j, check := range test.checkers { c.Logf("test %d: entity %v; check %d", i, test.id, j) check(c, e) } } for i, test := range migrationFromDumpBaseEntityTests { c.Logf("test %d: base entity %v", i, test.id) e, err := store.FindBaseEntity(charm.MustParseURL(test.id), nil) c.Assert(err, gc.IsNil) for j, check := range test.checkers { c.Logf("test %d: base entity %v; check %d", i, test.id, j) check(c, e) } } } func checkAllEntityInvariants(c *gc.C, store *Store) { var entities []*mongodoc.Entity err := store.DB.Entities().Find(nil).All(&entities) c.Assert(err, gc.IsNil) for _, e := range entities { c.Logf("check entity invariants %v", e.URL) checkEntityInvariants(c, e, store) } var baseEntities []*mongodoc.BaseEntity err = store.DB.BaseEntities().Find(nil).All(&baseEntities) c.Assert(err, gc.IsNil) for _, e := range baseEntities { c.Logf("check base entity invariants %v", e.URL) checkBaseEntityInvariants(c, e, store) } } func checkEntityInvariants(c *gc.C, e *mongodoc.Entity, store *Store) { // Basic "this must have some non-zero value" checks. c.Assert(e.URL.Name, gc.Not(gc.Equals), "") c.Assert(e.URL.Revision, gc.Not(gc.Equals), -1) c.Assert(e.URL.User, gc.Not(gc.Equals), "") c.Assert(e.PreV5BlobHash, gc.Not(gc.Equals), "") c.Assert(e.PreV5BlobHash256, gc.Not(gc.Equals), "") c.Assert(e.BlobHash, gc.Not(gc.Equals), "") c.Assert(e.BlobHash256, gc.Not(gc.Equals), "") c.Assert(e.Size, gc.Not(gc.Equals), 0) c.Assert(e.BlobName, gc.Not(gc.Equals), "") if e.UploadTime.IsZero() { c.Fatalf("zero upload time") } // URL denormalization checks. c.Assert(e.BaseURL, jc.DeepEquals, mongodoc.BaseURL(e.URL)) c.Assert(e.URL.Name, gc.Equals, e.Name) c.Assert(e.URL.User, gc.Equals, e.User) c.Assert(e.URL.Revision, gc.Equals, e.Revision) c.Assert(e.URL.Series, gc.Equals, e.Series) if e.PromulgatedRevision != -1 { expect := *e.URL expect.User = "" expect.Revision = e.PromulgatedRevision c.Assert(e.PromulgatedURL, jc.DeepEquals, &expect) } else { c.Assert(e.PromulgatedURL, gc.IsNil) } // Multi-series vs single-series vs bundle checks. if e.URL.Series == "bundle" { c.Assert(e.BundleData, gc.NotNil) c.Assert(e.BundleCharms, gc.NotNil) c.Assert(e.BundleMachineCount, gc.NotNil) c.Assert(e.BundleUnitCount, gc.NotNil) c.Assert(e.SupportedSeries, gc.HasLen, 0) c.Assert(e.BlobHash, gc.Equals, e.PreV5BlobHash) c.Assert(e.Size, gc.Equals, e.PreV5BlobSize) c.Assert(e.BlobHash256, gc.Equals, e.PreV5BlobHash256) } else { c.Assert(e.CharmMeta, gc.NotNil) if e.URL.Series == "" { c.Assert(e.SupportedSeries, jc.DeepEquals, e.CharmMeta.Series) c.Assert(e.BlobHash, gc.Not(gc.Equals), e.PreV5BlobHash) c.Assert(e.Size, gc.Not(gc.Equals), e.PreV5BlobSize) c.Assert(e.BlobHash256, gc.Not(gc.Equals), e.PreV5BlobHash256) } else { c.Assert(e.SupportedSeries, jc.DeepEquals, []string{e.URL.Series}) c.Assert(e.BlobHash, gc.Equals, e.PreV5BlobHash) c.Assert(e.Size, gc.Equals, e.PreV5BlobSize) c.Assert(e.BlobHash256, gc.Equals, e.PreV5BlobHash256) } } // Check that the blobs exist. r, err := store.OpenBlob(EntityResolvedURL(e)) c.Assert(err, gc.IsNil) r.Close() r, err = store.OpenBlobPreV5(EntityResolvedURL(e)) c.Assert(err, gc.IsNil) r.Close() // Check that the base entity exists. _, err = store.FindBaseEntity(e.URL, nil) c.Assert(err, gc.IsNil) } func stringInSlice(s string, ss []string) bool { for _, t := range ss { if s == t { return true } } return false } func checkBaseEntityInvariants(c *gc.C, e *mongodoc.BaseEntity, store *Store) { c.Assert(e.URL.Name, gc.Not(gc.Equals), "") c.Assert(e.URL.User, gc.Not(gc.Equals), "") c.Assert(e.URL, jc.DeepEquals, mongodoc.BaseURL(e.URL)) c.Assert(e.User, gc.Equals, e.URL.User) c.Assert(e.Name, gc.Equals, e.URL.Name) // Check that each entity mentioned in ChannelEntities exists and has the // correct channel. for ch, seriesEntities := range e.ChannelEntities { c.Assert(ch, gc.Not(gc.Equals), params.UnpublishedChannel) for series, url := range seriesEntities { if url.Series != "" { c.Assert(url.Series, gc.Equals, series) } ce, err := store.FindEntity(MustParseResolvedURL(url.String()), nil) c.Assert(err, gc.IsNil) switch ch { case params.DevelopmentChannel: c.Assert(ce.Development, gc.Equals, true) case params.StableChannel: c.Assert(ce.Stable, gc.Equals, true) default: c.Fatalf("unknown channel %q found", ch) } if series != "bundle" && !stringInSlice(series, ce.SupportedSeries) { c.Fatalf("series %q not found in supported series %q", series, ce.SupportedSeries) } } } } // runMigrations starts a new server which will cause all migrations // to be triggered. func (s *migrationsIntegrationSuite) runMigrations(db *mgo.Database) error { apiHandler := func(p *Pool, config ServerParams, _ string) HTTPCloseHandler { return nopCloseHandler{http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {})} } srv, err := NewServer(db, nil, serverParams, map[string]NewAPIHandlerFunc{ "version1": apiHandler, }) if err == nil { srv.Close() } return err } type entityChecker func(c *gc.C, entity *mongodoc.Entity) func hasPromulgatedRevision(rev int) entityChecker { return func(c *gc.C, entity *mongodoc.Entity) { c.Assert(entity.PromulgatedRevision, gc.Equals, rev) } } func hasCompatibilityBlob(hasBlob bool) entityChecker { return func(c *gc.C, entity *mongodoc.Entity) { if hasBlob { c.Assert(entity.PreV5BlobHash, gc.Not(gc.Equals), entity.BlobHash) } else { c.Assert(entity.PreV5BlobHash, gc.Equals, entity.BlobHash) } } } func isDevelopment(isDev bool) entityChecker { return func(c *gc.C, entity *mongodoc.Entity) { c.Assert(entity.Development, gc.Equals, isDev) } } func isStable(isStable bool) entityChecker { return func(c *gc.C, entity *mongodoc.Entity) { c.Assert(entity.Stable, gc.Equals, isStable) } } type baseEntityChecker func(c *gc.C, entity *mongodoc.BaseEntity) func isPromulgated(isProm bool) baseEntityChecker { return func(c *gc.C, entity *mongodoc.BaseEntity) { c.Assert(entity.Promulgated, gc.Equals, mongodoc.IntBool(isProm)) } } func hasACLs(acls map[params.Channel]mongodoc.ACL) baseEntityChecker { return func(c *gc.C, entity *mongodoc.BaseEntity) { c.Assert(entity.ChannelACLs, jc.DeepEquals, acls) } } func hasAllACLs(user string) baseEntityChecker { userACL := mongodoc.ACL{ Read: []string{user}, Write: []string{user}, } return hasACLs(map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: userACL, params.DevelopmentChannel: userACL, params.StableChannel: userACL, }) } func hasChannelEntities(ce map[params.Channel]map[string]*charm.URL) baseEntityChecker { return func(c *gc.C, entity *mongodoc.BaseEntity) { c.Assert(entity.ChannelEntities, jc.DeepEquals, ce) } } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/debug_test.go0000664000175000017500000000506312672604603030037 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" import ( "errors" "net/http" "github.com/juju/testing/httptesting" gc "gopkg.in/check.v1" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/charmstore.v5-unstable/internal/router" appver "gopkg.in/juju/charmstore.v5-unstable/version" ) type debugSuite struct{} var _ = gc.Suite(&debugSuite{}) var debugCheckTests = []struct { about string checks map[string]func() error expectStatus int expectBody interface{} }{{ about: "no checks", expectStatus: http.StatusOK, expectBody: map[string]string{}, }, { about: "passing check", checks: map[string]func() error{ "pass": func() error { return nil }, }, expectStatus: http.StatusOK, expectBody: map[string]string{ "pass": "OK", }, }, { about: "failing check", checks: map[string]func() error{ "fail": func() error { return errors.New("test fail") }, }, expectStatus: http.StatusInternalServerError, expectBody: params.Error{ Message: "check failure: [fail: test fail]", }, }, { about: "many pass", checks: map[string]func() error{ "pass1": func() error { return nil }, "pass2": func() error { return nil }, }, expectStatus: http.StatusOK, expectBody: map[string]string{ "pass1": "OK", "pass2": "OK", }, }, { about: "many fail", checks: map[string]func() error{ "fail1": func() error { return errors.New("test fail1") }, "fail2": func() error { return errors.New("test fail2") }, }, expectStatus: http.StatusInternalServerError, expectBody: params.Error{ Message: "check failure: [fail1: test fail1] [fail2: test fail2]", }, }, { about: "pass and fail", checks: map[string]func() error{ "pass": func() error { return nil }, "fail": func() error { return errors.New("test fail") }, }, expectStatus: http.StatusInternalServerError, expectBody: params.Error{ Message: "check failure: [fail: test fail] [pass: OK]", }, }} func (s *debugSuite) TestDebugCheck(c *gc.C) { for i, test := range debugCheckTests { c.Logf("%d. %s", i, test.about) hnd := debugCheck(test.checks) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: hnd, ExpectStatus: test.expectStatus, ExpectBody: test.expectBody, }) } } func (s *debugSuite) TestDebugInfo(c *gc.C) { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: router.HandleJSON(serveDebugInfo), ExpectStatus: http.StatusOK, ExpectBody: appver.VersionInfo, }) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/store_test.go0000664000175000017500000036522112672604603030112 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" import ( "archive/zip" "bytes" "crypto/sha256" "crypto/sha512" "encoding/json" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "sort" "strconv" "strings" "time" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/mgo.v2/bson" "gopkg.in/natefinch/lumberjack.v2" "gopkg.in/juju/charmstore.v5-unstable/audit" "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" "gopkg.in/juju/charmstore.v5-unstable/internal/router" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" ) type StoreSuite struct { commonSuite } var _ = gc.Suite(&StoreSuite{}) var urlFindingTests = []struct { inStore []string expand string expect []string }{{ inStore: []string{"23 cs:~charmers/precise/wordpress-23"}, expand: "wordpress", expect: []string{"23 cs:~charmers/precise/wordpress-23"}, }, { inStore: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/precise/wordpress-24", "25 cs:~charmers/precise/wordpress-25"}, expand: "wordpress", expect: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/precise/wordpress-24", "25 cs:~charmers/precise/wordpress-25"}, }, { inStore: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/precise/wordpress-24", "25 cs:~charmers/precise/wordpress-25"}, expand: "~charmers/precise/wordpress-24", expect: []string{"24 cs:~charmers/precise/wordpress-24"}, }, { inStore: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/precise/wordpress-24", "25 cs:~charmers/precise/wordpress-25"}, expand: "~charmers/precise/wordpress-25", expect: []string{"25 cs:~charmers/precise/wordpress-25"}, }, { inStore: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/trusty/wordpress-24", "25 cs:~charmers/precise/wordpress-25"}, expand: "precise/wordpress", expect: []string{"23 cs:~charmers/precise/wordpress-23", "25 cs:~charmers/precise/wordpress-25"}, }, { inStore: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/trusty/wordpress-24", "434 cs:~charmers/foo/varnish-434"}, expand: "wordpress", expect: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/trusty/wordpress-24"}, }, { inStore: []string{"23 cs:~charmers/precise/wordpress-23", "23 cs:~charmers/trusty/wordpress-23", "24 cs:~charmers/trusty/wordpress-24"}, expand: "wordpress-23", expect: []string{}, }, { inStore: []string{"cs:~user/precise/wordpress-23", "cs:~user/trusty/wordpress-23"}, expand: "~user/precise/wordpress", expect: []string{"cs:~user/precise/wordpress-23"}, }, { inStore: []string{"cs:~user/precise/wordpress-23", "cs:~user/trusty/wordpress-23"}, expand: "~user/wordpress", expect: []string{"cs:~user/precise/wordpress-23", "cs:~user/trusty/wordpress-23"}, }, { inStore: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/trusty/wordpress-24", "434 cs:~charmers/foo/varnish-434"}, expand: "precise/wordpress-23", expect: []string{"23 cs:~charmers/precise/wordpress-23"}, }, { inStore: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/trusty/wordpress-24", "434 cs:~charmers/foo/varnish-434"}, expand: "arble", expect: []string{}, }, { inStore: []string{"23 cs:~charmers/multi-series-23", "24 cs:~charmers/multi-series-24"}, expand: "multi-series", expect: []string{"23 cs:~charmers/multi-series-23", "24 cs:~charmers/multi-series-24"}, }, { inStore: []string{"23 cs:~charmers/multi-series-23", "24 cs:~charmers/multi-series-24"}, expand: "trusty/multi-series", expect: []string{"23 cs:~charmers/multi-series-23", "24 cs:~charmers/multi-series-24"}, }, { inStore: []string{"23 cs:~charmers/multi-series-23", "24 cs:~charmers/multi-series-24"}, expand: "multi-series-24", expect: []string{"24 cs:~charmers/multi-series-24"}, }, { inStore: []string{"23 cs:~charmers/multi-series-23", "24 cs:~charmers/multi-series-24"}, expand: "trusty/multi-series-24", expect: []string{"24 cs:~charmers/multi-series-24"}, }, { inStore: []string{"1 cs:~charmers/multi-series-23", "2 cs:~charmers/multi-series-24"}, expand: "trusty/multi-series-1", expect: []string{"1 cs:~charmers/multi-series-23"}, }, { inStore: []string{"1 cs:~charmers/multi-series-23", "2 cs:~charmers/multi-series-24"}, expand: "multi-series-23", expect: []string{}, }, { inStore: []string{"1 cs:~charmers/multi-series-23", "2 cs:~charmers/multi-series-24"}, expand: "cs:~charmers/utopic/multi-series-23", expect: []string{"1 cs:~charmers/multi-series-23"}, }, { inStore: []string{}, expand: "precise/wordpress-23", expect: []string{}, }} func (s *StoreSuite) testURLFinding(c *gc.C, check func(store *Store, expand *charm.URL, expect []*router.ResolvedURL)) { charms := make(map[string]*charm.CharmDir) store := s.newStore(c, false) defer store.Close() for i, test := range urlFindingTests { c.Logf("test %d: %q from %q", i, test.expand, test.inStore) _, err := store.DB.Entities().RemoveAll(nil) c.Assert(err, gc.IsNil) urls := MustParseResolvedURLs(test.inStore) for _, url := range urls { name := url.URL.Name if charms[name] == nil { charms[name] = storetesting.Charms.CharmDir(name) } err := store.AddCharmWithArchive(url, charms[name]) c.Assert(err, gc.IsNil) } check(store, charm.MustParseURL(test.expand), MustParseResolvedURLs(test.expect)) } } func (s *StoreSuite) TestRequestStore(c *gc.C) { config := ServerParams{ HTTPRequestWaitDuration: time.Millisecond, MaxMgoSessions: 1, } p, err := NewPool(s.Session.DB("juju_test"), nil, nil, config) c.Assert(err, gc.IsNil) defer p.Close() // Instances within the limit can be acquired // instantly without error. store, err := p.RequestStore() c.Assert(err, gc.IsNil) store.Close() // Check that when we get another instance, // we reuse the original. store1, err := p.RequestStore() c.Assert(err, gc.IsNil) defer store1.Close() c.Assert(store1, gc.Equals, store) // If we try to exceed the limit, we'll wait for a while, // then return an error. t0 := time.Now() store2, err := p.RequestStore() c.Assert(err, gc.ErrorMatches, "too many mongo sessions in use") c.Assert(errgo.Cause(err), gc.Equals, ErrTooManySessions) c.Assert(store2, gc.IsNil) if d := time.Since(t0); d < config.HTTPRequestWaitDuration { c.Errorf("got wait of %v; want at least %v", d, config.HTTPRequestWaitDuration) } } func (s *StoreSuite) TestRequestStoreSatisfiedWithinTimeout(c *gc.C) { config := ServerParams{ HTTPRequestWaitDuration: 5 * time.Second, MaxMgoSessions: 1, } p, err := NewPool(s.Session.DB("juju_test"), nil, nil, config) c.Assert(err, gc.IsNil) defer p.Close() store, err := p.RequestStore() c.Assert(err, gc.IsNil) // Start a goroutine that will close the Store after a short period. go func() { time.Sleep(time.Millisecond) store.Close() }() store1, err := p.RequestStore() c.Assert(err, gc.IsNil) c.Assert(store1, gc.Equals, store) store1.Close() } func (s *StoreSuite) TestRequestStoreLimitCanBeExceeded(c *gc.C) { config := ServerParams{ HTTPRequestWaitDuration: 5 * time.Second, MaxMgoSessions: 1, } p, err := NewPool(s.Session.DB("juju_test"), nil, nil, config) c.Assert(err, gc.IsNil) defer p.Close() store, err := p.RequestStore() c.Assert(err, gc.IsNil) defer store.Close() store1 := store.Copy() defer store1.Close() c.Assert(store1.Pool(), gc.Equals, store.Pool()) store2 := p.Store() defer store2.Close() c.Assert(store2.Pool(), gc.Equals, store.Pool()) } func (s *StoreSuite) TestRequestStoreFailsWhenPoolIsClosed(c *gc.C) { config := ServerParams{ HTTPRequestWaitDuration: 5 * time.Second, MaxMgoSessions: 1, } p, err := NewPool(s.Session.DB("juju_test"), nil, nil, config) c.Assert(err, gc.IsNil) p.Close() store, err := p.RequestStore() c.Assert(err, gc.ErrorMatches, "charm store has been closed") c.Assert(store, gc.IsNil) } func (s *StoreSuite) TestRequestStoreLimitMaintained(c *gc.C) { config := ServerParams{ HTTPRequestWaitDuration: time.Millisecond, MaxMgoSessions: 1, } p, err := NewPool(s.Session.DB("juju_test"), nil, nil, config) c.Assert(err, gc.IsNil) defer p.Close() // Acquire an instance. store, err := p.RequestStore() c.Assert(err, gc.IsNil) defer store.Close() // Acquire another instance, exceeding the limit, // and put it back. store1 := p.Store() c.Assert(err, gc.IsNil) store1.Close() // We should still be unable to acquire another // store for a request because we're still // at the request limit. _, err = p.RequestStore() c.Assert(errgo.Cause(err), gc.Equals, ErrTooManySessions) } func (s *StoreSuite) TestPoolDoubleClose(c *gc.C) { p, err := NewPool(s.Session.DB("juju_test"), nil, nil, ServerParams{}) c.Assert(err, gc.IsNil) p.Close() p.Close() // Close a third time to ensure that the lock has properly // been released. p.Close() } func (s *StoreSuite) TestFindEntities(c *gc.C) { s.testURLFinding(c, func(store *Store, expand *charm.URL, expect []*router.ResolvedURL) { // Check FindEntities works when just retrieving the id and promulgated id. gotEntities, err := store.FindEntities(expand, FieldSelector("_id", "promulgated-url")) c.Assert(err, gc.IsNil) if expand.User == "" { sort.Sort(entitiesByPromulgatedURL(gotEntities)) } else { sort.Sort(entitiesByURL(gotEntities)) } c.Assert(gotEntities, gc.HasLen, len(expect)) for i, url := range expect { c.Assert(gotEntities[i], jc.DeepEquals, &mongodoc.Entity{ URL: &url.URL, PromulgatedURL: url.PromulgatedURL(), }, gc.Commentf("index %d", i)) } // check FindEntities works when retrieving all fields. gotEntities, err = store.FindEntities(expand, nil) c.Assert(err, gc.IsNil) if expand.User == "" { sort.Sort(entitiesByPromulgatedURL(gotEntities)) } else { sort.Sort(entitiesByURL(gotEntities)) } c.Assert(gotEntities, gc.HasLen, len(expect)) for i, url := range expect { var entity mongodoc.Entity err := store.DB.Entities().FindId(&url.URL).One(&entity) c.Assert(err, gc.IsNil) c.Assert(gotEntities[i], jc.DeepEquals, &entity) } }) } func (s *StoreSuite) TestFindEntity(c *gc.C) { store := s.newStore(c, false) defer store.Close() rurl := MustParseResolvedURL("cs:~charmers/precise/wordpress-5") err := store.AddCharmWithArchive(rurl, storetesting.Charms.CharmDir("wordpress")) c.Assert(err, gc.IsNil) entity0, err := store.FindEntity(rurl, nil) c.Assert(err, gc.IsNil) c.Assert(entity0, gc.NotNil) c.Assert(entity0.Size, gc.Not(gc.Equals), 0) // Check that the field selector works. entity2, err := store.FindEntity(rurl, FieldSelector("blobhash")) c.Assert(err, gc.IsNil) c.Assert(entity2.BlobHash, gc.Equals, entity0.BlobHash) c.Assert(entity2.Size, gc.Equals, int64(0)) rurl.URL.Name = "another" entity3, err := store.FindEntity(rurl, nil) c.Assert(err, gc.ErrorMatches, "entity not found") c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) c.Assert(entity3, gc.IsNil) } var findBaseEntityTests = []struct { about string stored []string url string fields []string expect *mongodoc.BaseEntity }{{ about: "entity found, base url, all fields", stored: []string{"42 cs:~charmers/utopic/mysql-42"}, url: "mysql", expect: storetesting.NormalizeBaseEntity(&mongodoc.BaseEntity{ URL: charm.MustParseURL("~charmers/mysql"), User: "charmers", Name: "mysql", Promulgated: true, ChannelACLs: map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, params.DevelopmentChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, params.StableChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, }, }), }, { about: "entity found, fully qualified url, few fields", stored: []string{"42 cs:~charmers/utopic/mysql-42", "~who/precise/mysql-47"}, url: "~who/precise/mysql-0", fields: []string{"user"}, expect: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/mysql"), User: "who", }, }, { about: "entity found, partial url, only the ACLs", stored: []string{"42 cs:~charmers/utopic/mysql-42", "~who/trusty/mysql-47"}, url: "~who/mysql-42", fields: []string{"channelacls"}, expect: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/mysql"), ChannelACLs: map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"who"}, Write: []string{"who"}, }, params.DevelopmentChannel: { Read: []string{"who"}, Write: []string{"who"}, }, params.StableChannel: { Read: []string{"who"}, Write: []string{"who"}, }, }, }, }, { about: "entity not found, charm name", stored: []string{"42 cs:~charmers/utopic/mysql-42", "~who/trusty/mysql-47"}, url: "rails", }, { about: "entity not found, user", stored: []string{"42 cs:~charmers/utopic/mysql-42", "~who/trusty/mysql-47"}, url: "~dalek/mysql", fields: []string{"channelacls"}, }} func (s *StoreSuite) TestFindBaseEntity(c *gc.C) { ch := storetesting.Charms.CharmDir("wordpress") store := s.newStore(c, false) defer store.Close() for i, test := range findBaseEntityTests { c.Logf("test %d: %s", i, test.about) // Add initial charms to the store. for _, url := range MustParseResolvedURLs(test.stored) { err := store.AddCharmWithArchive(url, ch) c.Assert(err, gc.IsNil) } // Find the entity. id := charm.MustParseURL(test.url) baseEntity, err := store.FindBaseEntity(id, FieldSelector(test.fields...)) if test.expect == nil { // We don't expect the entity to be found. c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) c.Assert(baseEntity, gc.IsNil) } else { c.Assert(err, gc.IsNil) c.Assert(storetesting.NormalizeBaseEntity(baseEntity), jc.DeepEquals, storetesting.NormalizeBaseEntity(test.expect)) } // Remove all the entities from the store. _, err = store.DB.Entities().RemoveAll(nil) c.Assert(err, gc.IsNil) _, err = store.DB.BaseEntities().RemoveAll(nil) c.Assert(err, gc.IsNil) } } func (s *StoreSuite) TestAddCharmsWithTheSameBaseEntity(c *gc.C) { store := s.newStore(c, false) defer store.Close() // Add a charm to the database. ch := storetesting.Charms.CharmDir("wordpress") url := router.MustNewResolvedURL("~charmers/trusty/wordpress-12", 12) err := store.AddCharmWithArchive(url, ch) c.Assert(err, gc.IsNil) // Add a second charm to the database, sharing the same base URL. err = store.AddCharmWithArchive(router.MustNewResolvedURL("~charmers/utopic/wordpress-13", -1), ch) c.Assert(err, gc.IsNil) // Ensure a single base entity has been created. num, err := store.DB.BaseEntities().Count() c.Assert(err, gc.IsNil) c.Assert(num, gc.Equals, 1) } type entitiesByURL []*mongodoc.Entity func (s entitiesByURL) Len() int { return len(s) } func (s entitiesByURL) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s entitiesByURL) Less(i, j int) bool { return s[i].URL.String() < s[j].URL.String() } type entitiesByPromulgatedURL []*mongodoc.Entity func (s entitiesByPromulgatedURL) Len() int { return len(s) } func (s entitiesByPromulgatedURL) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s entitiesByPromulgatedURL) Less(i, j int) bool { return s[i].PromulgatedURL.String() < s[j].PromulgatedURL.String() } var bundleUnitCountTests = []struct { about string data *charm.BundleData expectUnits int }{{ about: "no units", data: &charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "wordpress": { Charm: "cs:utopic/wordpress-0", NumUnits: 0, }, "mysql": { Charm: "cs:trusty/mysql-0", NumUnits: 0, }, }, }, }, { about: "a single unit", data: &charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "wordpress": { Charm: "cs:trusty/wordpress-42", NumUnits: 1, }, "mysql": { Charm: "cs:trusty/mysql-47", NumUnits: 0, }, }, }, expectUnits: 1, }, { about: "multiple units", data: &charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "wordpress": { Charm: "cs:utopic/wordpress-1", NumUnits: 1, }, "mysql": { Charm: "cs:utopic/mysql-2", NumUnits: 2, }, "riak": { Charm: "cs:utopic/riak-3", NumUnits: 5, }, }, }, expectUnits: 8, }} func (s *StoreSuite) TestBundleUnitCount(c *gc.C) { store := s.newStore(c, false) defer store.Close() entities := store.DB.Entities() for i, test := range bundleUnitCountTests { c.Logf("test %d: %s", i, test.about) url := router.MustNewResolvedURL("cs:~charmers/bundle/wordpress-simple-0", -1) url.URL.Revision = i url.PromulgatedRevision = i // Add the bundle used for this test. b := storetesting.NewBundle(test.data) s.addRequiredCharms(c, b) err := store.AddBundleWithArchive(url, b) c.Assert(err, gc.IsNil) // Retrieve the bundle from the database. var doc mongodoc.Entity err = entities.FindId(&url.URL).One(&doc) c.Assert(err, gc.IsNil) c.Assert(*doc.BundleUnitCount, gc.Equals, test.expectUnits) } } var bundleMachineCountTests = []struct { about string data *charm.BundleData expectMachines int }{{ about: "no machines", data: &charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "mysql": { Charm: "cs:utopic/mysql-0", NumUnits: 0, }, "wordpress": { Charm: "cs:trusty/wordpress-0", NumUnits: 0, }, }, }, }, { about: "a single machine (no placement)", data: &charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "mysql": { Charm: "cs:trusty/mysql-42", NumUnits: 1, }, "wordpress": { Charm: "cs:trusty/wordpress-47", NumUnits: 0, }, }, }, expectMachines: 1, }, { about: "a single machine (machine placement)", data: &charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "mysql": { Charm: "cs:trusty/mysql-42", NumUnits: 1, To: []string{"1"}, }, }, Machines: map[string]*charm.MachineSpec{ "1": nil, }, }, expectMachines: 1, }, { about: "a single machine (hulk smash)", data: &charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "mysql": { Charm: "cs:trusty/mysql-42", NumUnits: 1, To: []string{"1"}, }, "wordpress": { Charm: "cs:trusty/wordpress-47", NumUnits: 1, To: []string{"1"}, }, }, Machines: map[string]*charm.MachineSpec{ "1": nil, }, }, expectMachines: 1, }, { about: "a single machine (co-location)", data: &charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "mysql": { Charm: "cs:trusty/mysql-42", NumUnits: 1, }, "wordpress": { Charm: "cs:trusty/wordpress-47", NumUnits: 1, To: []string{"mysql/0"}, }, }, }, expectMachines: 1, }, { about: "a single machine (containerization)", data: &charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "mysql": { Charm: "cs:trusty/mysql-42", NumUnits: 1, To: []string{"1"}, }, "wordpress": { Charm: "cs:trusty/wordpress-47", NumUnits: 1, To: []string{"lxc:1"}, }, "riak": { Charm: "cs:utopic/riak-3", NumUnits: 2, To: []string{"kvm:1"}, }, }, Machines: map[string]*charm.MachineSpec{ "1": nil, }, }, expectMachines: 1, }, { about: "multiple machines (no placement)", data: &charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "mysql": { Charm: "cs:utopic/mysql-1", NumUnits: 1, }, "wordpress": { Charm: "cs:utopic/wordpress-2", NumUnits: 2, }, "riak": { Charm: "cs:utopic/riak-3", NumUnits: 5, }, }, }, expectMachines: 1 + 2 + 5, }, { about: "multiple machines (machine placement)", data: &charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "mysql": { Charm: "cs:utopic/mysql-1", NumUnits: 2, To: []string{"1", "3"}, }, "wordpress": { Charm: "cs:utopic/wordpress-2", NumUnits: 1, To: []string{"2"}, }, }, Machines: map[string]*charm.MachineSpec{ "1": nil, "2": nil, "3": nil, }, }, expectMachines: 2 + 1, }, { about: "multiple machines (hulk smash)", data: &charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "mysql": { Charm: "cs:trusty/mysql-42", NumUnits: 1, To: []string{"1"}, }, "wordpress": { Charm: "cs:trusty/wordpress-47", NumUnits: 1, To: []string{"2"}, }, "riak": { Charm: "cs:utopic/riak-3", NumUnits: 2, To: []string{"1", "2"}, }, }, Machines: map[string]*charm.MachineSpec{ "1": nil, "2": nil, }, }, expectMachines: 1 + 1 + 0, }, { about: "multiple machines (co-location)", data: &charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "mysql": { Charm: "cs:trusty/mysql-42", NumUnits: 2, }, "wordpress": { Charm: "cs:trusty/wordpress-47", NumUnits: 3, To: []string{"mysql/0", "mysql/1", "new"}, }, }, }, expectMachines: 2 + 1, }, { about: "multiple machines (containerization)", data: &charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "mysql": { Charm: "cs:trusty/mysql-42", NumUnits: 2, To: []string{"1", "2"}, }, "wordpress": { Charm: "cs:trusty/wordpress-47", NumUnits: 4, To: []string{"lxc:1", "lxc:2", "lxc:3", "lxc:3"}, }, "riak": { Charm: "cs:utopic/riak-3", NumUnits: 1, To: []string{"kvm:2"}, }, }, Machines: map[string]*charm.MachineSpec{ "1": nil, "2": nil, "3": nil, }, }, expectMachines: 2 + 1 + 0, }, { about: "multiple machines (partial placement in a container)", data: &charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "mysql": { Charm: "cs:trusty/mysql-42", NumUnits: 1, To: []string{"1"}, }, "wordpress": { Charm: "cs:trusty/wordpress-47", NumUnits: 10, To: []string{"lxc:1", "lxc:2"}, }, }, Machines: map[string]*charm.MachineSpec{ "1": nil, "2": nil, }, }, expectMachines: 1 + 1, }, { about: "multiple machines (partial placement in a new machine)", data: &charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "mysql": { Charm: "cs:trusty/mysql-42", NumUnits: 1, To: []string{"1"}, }, "wordpress": { Charm: "cs:trusty/wordpress-47", NumUnits: 10, To: []string{"lxc:1", "1", "new"}, }, }, Machines: map[string]*charm.MachineSpec{ "1": nil, }, }, expectMachines: 1 + 8, }, { about: "multiple machines (partial placement with new machines)", data: &charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "mysql": { Charm: "cs:trusty/mysql-42", NumUnits: 3, }, "wordpress": { Charm: "cs:trusty/wordpress-47", NumUnits: 6, To: []string{"new", "1", "lxc:1", "new"}, }, "riak": { Charm: "cs:utopic/riak-3", NumUnits: 10, To: []string{"kvm:2", "lxc:mysql/1", "new", "new", "kvm:2"}, }, }, Machines: map[string]*charm.MachineSpec{ "1": nil, "2": nil, }, }, expectMachines: 3 + 5 + 3, }, { about: "placement into container on new machine", data: &charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "wordpress": { Charm: "cs:trusty/wordpress-47", NumUnits: 6, To: []string{"lxc:new", "1", "lxc:1", "kvm:new"}, }, }, Machines: map[string]*charm.MachineSpec{ "1": nil, }, }, expectMachines: 5, }} func (s *StoreSuite) TestBundleMachineCount(c *gc.C) { store := s.newStore(c, false) defer store.Close() entities := store.DB.Entities() for i, test := range bundleMachineCountTests { c.Logf("test %d: %s", i, test.about) url := router.MustNewResolvedURL("cs:~charmers/bundle/testbundle-0", -1) url.URL.Revision = i url.PromulgatedRevision = i err := test.data.Verify(nil, nil) c.Assert(err, gc.IsNil) // Add the bundle used for this test. b := storetesting.NewBundle(test.data) s.addRequiredCharms(c, b) err = store.AddBundleWithArchive(url, b) c.Assert(err, gc.IsNil) // Retrieve the bundle from the database. var doc mongodoc.Entity err = entities.FindId(&url.URL).One(&doc) c.Assert(err, gc.IsNil) c.Assert(*doc.BundleMachineCount, gc.Equals, test.expectMachines) } } func urlStrings(urls []*charm.URL) []string { urlStrs := make([]string, len(urls)) for i, url := range urls { urlStrs[i] = url.String() } return urlStrs } // MustParseResolvedURL parses a resolved URL in string form, with // the optional promulgated revision preceding the entity URL // separated by a space. func MustParseResolvedURL(urlStr string) *router.ResolvedURL { s := strings.Fields(urlStr) promRev := -1 switch len(s) { default: panic(fmt.Errorf("invalid resolved URL string %q", urlStr)) case 2: var err error promRev, err = strconv.Atoi(s[0]) if err != nil || promRev < 0 { panic(fmt.Errorf("invalid resolved URL string %q", urlStr)) } case 1: } url := charm.MustParseURL(s[len(s)-1]) if url.User == "" { panic("resolved URL with no user") } if url.Revision == -1 { panic("resolved URL with no revision") } return &router.ResolvedURL{ URL: *url.WithChannel(""), PromulgatedRevision: promRev, } } func MustParseResolvedURLs(urlStrs []string) []*router.ResolvedURL { urls := make([]*router.ResolvedURL, len(urlStrs)) for i, u := range urlStrs { urls[i] = MustParseResolvedURL(u) } return urls } func (s *StoreSuite) TestOpenBlob(c *gc.C) { charmArchive := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") store := s.newStore(c, false) defer store.Close() url := router.MustNewResolvedURL("cs:~charmers/precise/wordpress-23", 23) err := store.AddCharmWithArchive(url, charmArchive) c.Assert(err, gc.IsNil) f, err := os.Open(charmArchive.Path) c.Assert(err, gc.IsNil) defer f.Close() expectHash := hashOfReader(c, f) blob, err := store.OpenBlob(url) c.Assert(err, gc.IsNil) defer blob.Close() c.Assert(hashOfReader(c, blob), gc.Equals, expectHash) c.Assert(blob.Hash, gc.Equals, expectHash) info, err := f.Stat() c.Assert(err, gc.IsNil) c.Assert(blob.Size, gc.Equals, info.Size()) } func (s *StoreSuite) TestOpenBlobPreV5(c *gc.C) { store := s.newStore(c, false) defer store.Close() ch := storetesting.NewCharm(storetesting.MetaWithSupportedSeries(nil, "trusty", "precise")) url := router.MustNewResolvedURL("cs:~charmers/multi-series-23", 23) err := store.AddCharmWithArchive(url, ch) c.Assert(err, gc.IsNil) blob, err := store.OpenBlobPreV5(url) c.Assert(err, gc.IsNil) defer blob.Close() data, err := ioutil.ReadAll(blob) c.Assert(err, gc.IsNil) preV5Ch, err := charm.ReadCharmArchiveBytes(data) c.Assert(err, gc.IsNil) // Check that the hashes and sizes are consistent with the data // we've read. c.Assert(blob.Hash, gc.Equals, fmt.Sprintf("%x", sha512.Sum384(data))) c.Assert(blob.Size, gc.Equals, int64(len(data))) entity, err := store.FindEntity(url, nil) c.Assert(err, gc.IsNil) c.Assert(entity.PreV5BlobHash, gc.Equals, blob.Hash) c.Assert(entity.PreV5BlobHash256, gc.Equals, fmt.Sprintf("%x", sha256.Sum256(data))) c.Assert(entity.PreV5BlobSize, gc.Equals, blob.Size) c.Assert(preV5Ch.Meta().Series, gc.HasLen, 0) // Sanity check that the series really are in the post-v5 blob. blob, err = store.OpenBlob(url) c.Assert(err, gc.IsNil) defer blob.Close() data, err = ioutil.ReadAll(blob) c.Assert(err, gc.IsNil) postV5Ch, err := charm.ReadCharmArchiveBytes(data) c.Assert(err, gc.IsNil) c.Assert(postV5Ch.Meta().Series, jc.DeepEquals, []string{"trusty", "precise"}) } func (s *StoreSuite) TestOpenBlobPreV5WithMultiSeriesCharmInSingleSeriesId(c *gc.C) { store := s.newStore(c, false) defer store.Close() ch := storetesting.NewCharm(storetesting.MetaWithSupportedSeries(nil, "trusty", "precise")) url := router.MustNewResolvedURL("cs:~charmers/precise/multi-series-23", 23) err := store.AddCharmWithArchive(url, ch) c.Assert(err, gc.IsNil) blob, err := store.OpenBlobPreV5(url) c.Assert(err, gc.IsNil) defer blob.Close() data, err := ioutil.ReadAll(blob) c.Assert(err, gc.IsNil) preV5Ch, err := charm.ReadCharmArchiveBytes(data) c.Assert(err, gc.IsNil) c.Assert(preV5Ch.Meta().Series, gc.HasLen, 0) } func (s *StoreSuite) TestAddLog(c *gc.C) { store := s.newStore(c, false) defer store.Close() urls := []*charm.URL{ charm.MustParseURL("cs:mysql"), charm.MustParseURL("cs:rails"), } infoData := json.RawMessage([]byte(`"info data"`)) errorData := json.RawMessage([]byte(`"error data"`)) // Add logs to the store. beforeAdding := time.Now().Add(-time.Second) err := store.AddLog(&infoData, mongodoc.InfoLevel, mongodoc.IngestionType, nil) c.Assert(err, gc.IsNil) err = store.AddLog(&errorData, mongodoc.ErrorLevel, mongodoc.IngestionType, urls) c.Assert(err, gc.IsNil) afterAdding := time.Now().Add(time.Second) // Retrieve the logs from the store. var docs []mongodoc.Log err = store.DB.Logs().Find(nil).Sort("_id").All(&docs) c.Assert(err, gc.IsNil) c.Assert(docs, gc.HasLen, 2) // The docs have been correctly added to the Mongo collection. infoDoc, errorDoc := docs[0], docs[1] c.Assert(infoDoc.Time, jc.TimeBetween(beforeAdding, afterAdding)) c.Assert(errorDoc.Time, jc.TimeBetween(beforeAdding, afterAdding)) infoDoc.Time = time.Time{} errorDoc.Time = time.Time{} c.Assert(infoDoc, jc.DeepEquals, mongodoc.Log{ Data: []byte(infoData), Level: mongodoc.InfoLevel, Type: mongodoc.IngestionType, URLs: nil, }) c.Assert(errorDoc, jc.DeepEquals, mongodoc.Log{ Data: []byte(errorData), Level: mongodoc.ErrorLevel, Type: mongodoc.IngestionType, URLs: urls, }) } func (s *StoreSuite) TestAddLogDataError(c *gc.C) { store := s.newStore(c, false) defer store.Close() data := json.RawMessage([]byte("!")) // Try to add the invalid log message to the store. err := store.AddLog(&data, mongodoc.InfoLevel, mongodoc.IngestionType, nil) c.Assert(err, gc.ErrorMatches, "cannot marshal log data: json: error calling MarshalJSON .*") } func (s *StoreSuite) TestAddLogBaseURLs(c *gc.C) { store := s.newStore(c, false) defer store.Close() // Add the log to the store with associated URLs. data := json.RawMessage([]byte(`"info data"`)) err := store.AddLog(&data, mongodoc.WarningLevel, mongodoc.IngestionType, []*charm.URL{ charm.MustParseURL("trusty/mysql-42"), charm.MustParseURL("~who/utopic/wordpress"), }) c.Assert(err, gc.IsNil) // Retrieve the log from the store. var doc mongodoc.Log err = store.DB.Logs().Find(nil).One(&doc) c.Assert(err, gc.IsNil) // The log includes the base URLs. c.Assert(doc.URLs, jc.DeepEquals, []*charm.URL{ charm.MustParseURL("trusty/mysql-42"), charm.MustParseURL("mysql"), charm.MustParseURL("~who/utopic/wordpress"), charm.MustParseURL("~who/wordpress"), }) } func (s *StoreSuite) TestAddLogDuplicateURLs(c *gc.C) { store := s.newStore(c, false) defer store.Close() // Add the log to the store with associated URLs. data := json.RawMessage([]byte(`"info data"`)) err := store.AddLog(&data, mongodoc.WarningLevel, mongodoc.IngestionType, []*charm.URL{ charm.MustParseURL("trusty/mysql-42"), charm.MustParseURL("mysql"), charm.MustParseURL("trusty/mysql-42"), charm.MustParseURL("mysql"), }) c.Assert(err, gc.IsNil) // Retrieve the log from the store. var doc mongodoc.Log err = store.DB.Logs().Find(nil).One(&doc) c.Assert(err, gc.IsNil) // The log excludes duplicate URLs. c.Assert(doc.URLs, jc.DeepEquals, []*charm.URL{ charm.MustParseURL("trusty/mysql-42"), charm.MustParseURL("mysql"), }) } func (s *StoreSuite) TestCollections(c *gc.C) { store := s.newStore(c, false) defer store.Close() colls := store.DB.Collections() names, err := store.DB.CollectionNames() c.Assert(err, gc.IsNil) // Some collections don't have indexes so they are created only when used. createdOnUse := map[string]bool{ "migrations": true, "macaroons": true, } // Check that all collections mentioned by Collections are actually created. for _, coll := range colls { found := false for _, name := range names { if name == coll.Name || createdOnUse[coll.Name] { found = true } } if !found { c.Errorf("collection %q not created", coll.Name) } } // Check that all created collections are mentioned in Collections. for _, name := range names { if name == "system.indexes" || name == "managedStoredResources" || name == "entitystore.files" { continue } found := false for _, coll := range colls { if coll.Name == name { found = true } } if !found { c.Errorf("extra collection %q found", name) } } } func (s *StoreSuite) TestOpenCachedBlobFileWithInvalidEntity(c *gc.C) { store := s.newStore(c, false) defer store.Close() wordpress := storetesting.Charms.CharmDir("wordpress") url := router.MustNewResolvedURL("cs:~charmers/precise/wordpress-23", 23) err := store.AddCharmWithArchive(url, wordpress) c.Assert(err, gc.IsNil) entity, err := store.FindEntity(url, FieldSelector("charmmeta")) c.Assert(err, gc.IsNil) r, err := store.OpenCachedBlobFile(entity, "", nil) c.Assert(err, gc.ErrorMatches, "provided entity does not have required fields") c.Assert(r, gc.Equals, nil) } func (s *StoreSuite) TestOpenCachedBlobFileWithFoundContent(c *gc.C) { store := s.newStore(c, false) defer store.Close() wordpress := storetesting.Charms.CharmDir("wordpress") url := router.MustNewResolvedURL("cs:~charmers/precise/wordpress-23", 23) err := store.AddCharmWithArchive(url, wordpress) c.Assert(err, gc.IsNil) // Get our expected content. data, err := ioutil.ReadFile(filepath.Join(wordpress.Path, "metadata.yaml")) c.Assert(err, gc.IsNil) expectContent := string(data) entity, err := store.FindEntity(url, FieldSelector("blobname", "contents")) c.Assert(err, gc.IsNil) // Check that, when we open the file for the first time, // we see the expected content. r, err := store.OpenCachedBlobFile(entity, mongodoc.FileIcon, func(f *zip.File) bool { return path.Clean(f.Name) == "metadata.yaml" }) c.Assert(err, gc.IsNil) defer r.Close() data, err = ioutil.ReadAll(r) c.Assert(err, gc.IsNil) c.Assert(string(data), gc.Equals, expectContent) // When retrieving the entity again, check that the Contents // map has been set appropriately... entity, err = store.FindEntity(url, FieldSelector("blobname", "contents")) c.Assert(err, gc.IsNil) c.Assert(entity.Contents, gc.HasLen, 1) c.Assert(entity.Contents[mongodoc.FileIcon].IsValid(), gc.Equals, true) // ... and that OpenCachedBlobFile still returns a reader with the // same data, without making use of the isFile callback. r, err = store.OpenCachedBlobFile(entity, mongodoc.FileIcon, func(f *zip.File) bool { c.Errorf("isFile called unexpectedly") return false }) c.Assert(err, gc.IsNil) defer r.Close() data, err = ioutil.ReadAll(r) c.Assert(err, gc.IsNil) c.Assert(string(data), gc.Equals, expectContent) } func (s *StoreSuite) TestOpenCachedBlobFileWithNotFoundContent(c *gc.C) { store := s.newStore(c, false) defer store.Close() wordpress := storetesting.Charms.CharmDir("wordpress") url := router.MustNewResolvedURL("cs:~charmers/precise/wordpress-23", 23) err := store.AddCharmWithArchive(url, wordpress) c.Assert(err, gc.IsNil) entity, err := store.FindEntity(url, FieldSelector("blobname", "contents")) c.Assert(err, gc.IsNil) // Check that, when we open the file for the first time, // we get a NotFound error. r, err := store.OpenCachedBlobFile(entity, mongodoc.FileIcon, func(f *zip.File) bool { return false }) c.Assert(err, gc.ErrorMatches, "not found") c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) c.Assert(r, gc.Equals, nil) // When retrieving the entity again, check that the Contents // map has been set appropriately... entity, err = store.FindEntity(url, FieldSelector("blobname", "contents")) c.Assert(err, gc.IsNil) c.Assert(entity.Contents, gc.DeepEquals, map[mongodoc.FileId]mongodoc.ZipFile{ mongodoc.FileIcon: {}, }) // ... and that OpenCachedBlobFile still returns a NotFound // error, without making use of the isFile callback. r, err = store.OpenCachedBlobFile(entity, mongodoc.FileIcon, func(f *zip.File) bool { c.Errorf("isFile called unexpectedly") return false }) c.Assert(err, gc.ErrorMatches, "not found") c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) c.Assert(r, gc.Equals, nil) } func hashOfReader(c *gc.C, r io.Reader) string { hash := sha512.New384() _, err := io.Copy(hash, r) c.Assert(err, gc.IsNil) return fmt.Sprintf("%x", hash.Sum(nil)) } func getSizeAndHashes(c interface{}) (int64, string, string) { var r io.ReadWriter var err error switch c := c.(type) { case ArchiverTo: r = new(bytes.Buffer) err = c.ArchiveTo(r) case *charm.BundleArchive: r, err = os.Open(c.Path) case *charm.CharmArchive: r, err = os.Open(c.Path) default: panic(fmt.Sprintf("unable to get size and hash for type %T", c)) } if err != nil { panic(err) } hash := blobstore.NewHash() hash256 := sha256.New() size, err := io.Copy(io.MultiWriter(hash, hash256), r) if err != nil { panic(err) } return size, fmt.Sprintf("%x", hash.Sum(nil)), fmt.Sprintf("%x", hash256.Sum(nil)) } // testingBundle implements charm.Bundle, allowing tests // to create a bundle with custom data. type testingBundle struct { data *charm.BundleData } func (b *testingBundle) Data() *charm.BundleData { return b.data } func (b *testingBundle) ReadMe() string { // For the purposes of this implementation, the charm readme is not // relevant. return "" } // Define fake blob attributes to be used in tests. var fakeBlobSize, fakeBlobHash = func() (int64, string) { b := []byte("fake content") h := blobstore.NewHash() h.Write(b) return int64(len(b)), fmt.Sprintf("%x", h.Sum(nil)) }() func (s *StoreSuite) TestSESPutDoesNotErrorWithNoESConfigured(c *gc.C) { store := s.newStore(c, false) defer store.Close() err := store.UpdateSearch(nil) c.Assert(err, gc.IsNil) } var findBestEntityCharms = []struct { id *router.ResolvedURL charm charm.Charm development bool stable bool }{{ id: router.MustNewResolvedURL("~charmers/trusty/wordpress-0", 0), charm: storetesting.NewCharm(nil), development: true, stable: true, }, { id: router.MustNewResolvedURL("~charmers/trusty/wordpress-1", 1), charm: storetesting.NewCharm(nil), development: true, stable: true, }, { id: router.MustNewResolvedURL("~charmers/trusty/wordpress-2", 2), charm: storetesting.NewCharm(nil), development: true, stable: false, }, { id: router.MustNewResolvedURL("~charmers/trusty/wordpress-3", 3), charm: storetesting.NewCharm(nil), development: false, stable: false, }, { id: router.MustNewResolvedURL("~charmers/precise/wordpress-4", 4), charm: storetesting.NewCharm(nil), development: true, stable: true, }, { id: router.MustNewResolvedURL("~charmers/precise/wordpress-5", 5), charm: storetesting.NewCharm(nil), development: true, stable: true, }, { id: router.MustNewResolvedURL("~charmers/precise/wordpress-6", 6), charm: storetesting.NewCharm(nil), development: true, stable: false, }, { id: router.MustNewResolvedURL("~charmers/precise/wordpress-7", 7), charm: storetesting.NewCharm(nil), development: false, stable: false, }, { id: router.MustNewResolvedURL("~charmers/mysql-0", 0), charm: storetesting.NewCharm(storetesting.MetaWithSupportedSeries(nil, "trusty", "precise")), development: true, stable: true, }, { id: router.MustNewResolvedURL("~charmers/mysql-1", 1), charm: storetesting.NewCharm(storetesting.MetaWithSupportedSeries(nil, "trusty", "precise")), development: true, stable: true, }, { id: router.MustNewResolvedURL("~charmers/mysql-2", 2), charm: storetesting.NewCharm(storetesting.MetaWithSupportedSeries(nil, "trusty", "precise")), development: true, stable: false, }, { id: router.MustNewResolvedURL("~charmers/mysql-3", 3), charm: storetesting.NewCharm(storetesting.MetaWithSupportedSeries(nil, "trusty", "precise")), development: false, stable: false, }, { id: router.MustNewResolvedURL("~charmers/trusty/mongodb-0", -1), charm: storetesting.NewCharm(nil), development: true, stable: true, }, { id: router.MustNewResolvedURL("~charmers/trusty/mongodb-1", -1), charm: storetesting.NewCharm(nil), development: true, stable: true, }, { id: router.MustNewResolvedURL("~charmers/trusty/mongodb-2", -1), charm: storetesting.NewCharm(nil), development: true, stable: false, }, { id: router.MustNewResolvedURL("~charmers/trusty/mongodb-3", -1), charm: storetesting.NewCharm(nil), development: false, stable: false, }, { id: router.MustNewResolvedURL("~charmers/trusty/apache-0", 0), charm: storetesting.NewCharm(nil), development: true, stable: false, }, { id: router.MustNewResolvedURL("~charmers/trusty/nginx-0", 0), charm: storetesting.NewCharm(nil), development: false, stable: false, }, { id: router.MustNewResolvedURL("~charmers/trusty/postgresql-0", 0), charm: storetesting.NewCharm(nil), development: true, stable: true, }, { id: router.MustNewResolvedURL("~charmers/precise/postgresql-0", 0), charm: storetesting.NewCharm(nil), development: true, stable: true, }, { id: router.MustNewResolvedURL("~charmers/postgresql-1", 1), charm: storetesting.NewCharm(storetesting.MetaWithSupportedSeries(nil, "trusty", "precise")), development: true, stable: true, }, { id: router.MustNewResolvedURL("~charmers/trusty/ceph-0", 0), charm: storetesting.NewCharm(nil), development: true, stable: true, }, { id: router.MustNewResolvedURL("~openstack-charmers/trusty/ceph-0", 1), charm: storetesting.NewCharm(nil), development: true, stable: false, }} var findBestEntityBundles = []struct { id *router.ResolvedURL bundle charm.Bundle development bool stable bool }{{ id: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-0", 0), bundle: storetesting.NewBundle(&charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "wordpress": &charm.ServiceSpec{ Charm: "cs:wordpress", }, "mysql": &charm.ServiceSpec{ Charm: "cs:mysql", }, }, }), development: true, stable: true, }, { id: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-1", 1), bundle: storetesting.NewBundle(&charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "wordpress": &charm.ServiceSpec{ Charm: "cs:wordpress", }, "mysql": &charm.ServiceSpec{ Charm: "cs:mysql", }, }, }), development: true, stable: true, }, { id: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-2", 2), bundle: storetesting.NewBundle(&charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "wordpress": &charm.ServiceSpec{ Charm: "cs:wordpress", }, "mysql": &charm.ServiceSpec{ Charm: "cs:mysql", }, }, }), development: true, stable: false, }, { id: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-3", 3), bundle: storetesting.NewBundle(&charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "wordpress": &charm.ServiceSpec{ Charm: "cs:wordpress", }, "mysql": &charm.ServiceSpec{ Charm: "cs:mysql", }, }, }), development: false, stable: false, }} var findBestEntityTests = []struct { url string channel params.Channel expectID *router.ResolvedURL expectError string expectErrorCause error }{{ url: "~charmers/trusty/wordpress-0", expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-0", 0), }, { url: "~charmers/trusty/wordpress-0", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-0", 0), }, { url: "~charmers/trusty/wordpress-0", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-0", 0), }, { url: "~charmers/trusty/wordpress-0", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-0", 0), }, { url: "~charmers/trusty/wordpress-3", expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-3", 3), }, { url: "~charmers/trusty/wordpress-3", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-3", 3), }, { url: "~charmers/trusty/wordpress-3", channel: params.DevelopmentChannel, expectError: "cs:~charmers/trusty/wordpress-3 not found in development channel", expectErrorCause: params.ErrNotFound, }, { url: "~charmers/trusty/wordpress-2", channel: params.StableChannel, expectError: "cs:~charmers/trusty/wordpress-2 not found in stable channel", expectErrorCause: params.ErrNotFound, }, { url: "trusty/wordpress-0", expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-0", 0), }, { url: "trusty/wordpress-0", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-0", 0), }, { url: "trusty/wordpress-0", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-0", 0), }, { url: "trusty/wordpress-0", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-0", 0), }, { url: "trusty/wordpress-3", expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-3", 3), }, { url: "trusty/wordpress-3", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-3", 3), }, { url: "trusty/wordpress-3", channel: params.DevelopmentChannel, expectError: "cs:trusty/wordpress-3 not found in development channel", expectErrorCause: params.ErrNotFound, }, { url: "trusty/wordpress-2", channel: params.StableChannel, expectError: "cs:trusty/wordpress-2 not found in stable channel", expectErrorCause: params.ErrNotFound, }, { url: "~charmers/trusty/wordpress", expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-1", 1), }, { url: "~charmers/trusty/wordpress", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-1", 1), }, { url: "~charmers/trusty/wordpress", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-2", 2), }, { url: "~charmers/trusty/wordpress", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-3", 3), }, { url: "trusty/wordpress", expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-1", 1), }, { url: "trusty/wordpress", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-1", 1), }, { url: "trusty/wordpress", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-2", 2), }, { url: "trusty/wordpress", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-3", 3), }, { url: "precise/wordpress", expectID: router.MustNewResolvedURL("~charmers/precise/wordpress-5", 5), }, { url: "precise/wordpress", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/precise/wordpress-5", 5), }, { url: "precise/wordpress", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/precise/wordpress-6", 6), }, { url: "precise/wordpress", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/precise/wordpress-7", 7), }, { url: "wordpress", expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-1", 1), }, { url: "wordpress", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-1", 1), }, { url: "wordpress", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-2", 2), }, { url: "wordpress", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-3", 3), }, { url: "~charmers/wordpress", expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-1", 1), }, { url: "~charmers/wordpress", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-1", 1), }, { url: "~charmers/wordpress", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-2", 2), }, { url: "~charmers/wordpress", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-3", 3), }, { url: "~charmers/wordpress-0", expectError: "no matching charm or bundle for cs:~charmers/wordpress-0", expectErrorCause: params.ErrNotFound, }, { url: "~charmers/wordpress-0", channel: params.StableChannel, expectError: "no matching charm or bundle for cs:~charmers/wordpress-0", expectErrorCause: params.ErrNotFound, }, { url: "~charmers/wordpress-0", channel: params.DevelopmentChannel, expectError: "no matching charm or bundle for cs:~charmers/wordpress-0", expectErrorCause: params.ErrNotFound, }, { url: "~charmers/wordpress-0", channel: params.UnpublishedChannel, expectError: "no matching charm or bundle for cs:~charmers/wordpress-0", expectErrorCause: params.ErrNotFound, }, { url: "~charmers/mysql-0", expectID: router.MustNewResolvedURL("~charmers/mysql-0", 0), }, { url: "~charmers/mysql-0", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-0", 0), }, { url: "~charmers/mysql-0", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-0", 0), }, { url: "~charmers/mysql-0", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-0", 0), }, { url: "~charmers/mysql-3", expectID: router.MustNewResolvedURL("~charmers/mysql-3", 3), }, { url: "~charmers/mysql-3", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-3", 3), }, { url: "~charmers/mysql-3", channel: params.DevelopmentChannel, expectError: "cs:~charmers/mysql-3 not found in development channel", expectErrorCause: params.ErrNotFound, }, { url: "~charmers/mysql-2", channel: params.StableChannel, expectError: "cs:~charmers/mysql-2 not found in stable channel", expectErrorCause: params.ErrNotFound, }, { url: "mysql-0", expectID: findBestEntityCharms[8].id, }, { url: "mysql-0", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-0", 0), }, { url: "mysql-0", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-0", 0), }, { url: "mysql-0", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-0", 0), }, { url: "mysql-3", expectID: router.MustNewResolvedURL("~charmers/mysql-3", 3), }, { url: "mysql-3", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-3", 3), }, { url: "mysql-3", channel: params.DevelopmentChannel, expectError: "cs:mysql-3 not found in development channel", expectErrorCause: params.ErrNotFound, }, { url: "mysql-2", channel: params.StableChannel, expectError: "cs:mysql-2 not found in stable channel", expectErrorCause: params.ErrNotFound, }, { url: "~charmers/mysql", expectID: router.MustNewResolvedURL("~charmers/mysql-1", 1), }, { url: "~charmers/mysql", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-1", 1), }, { url: "~charmers/mysql", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-2", 2), }, { url: "~charmers/mysql", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-3", 3), }, { url: "mysql", expectID: router.MustNewResolvedURL("~charmers/mysql-1", 1), }, { url: "mysql", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-1", 1), }, { url: "mysql", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-2", 2), }, { url: "mysql", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-3", 3), }, { url: "~charmers/precise/mysql", expectID: router.MustNewResolvedURL("~charmers/mysql-1", 1), }, { url: "~charmers/precise/mysql", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-1", 1), }, { url: "~charmers/precise/mysql", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-2", 2), }, { url: "~charmers/precise/mysql", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-3", 3), }, { url: "precise/mysql", expectID: router.MustNewResolvedURL("~charmers/mysql-1", 1), }, { url: "precise/mysql", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-1", 1), }, { url: "precise/mysql", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-2", 2), }, { url: "precise/mysql", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-3", 3), }, { url: "~charmers/trusty/mysql", expectID: router.MustNewResolvedURL("~charmers/mysql-1", 1), }, { url: "~charmers/trusty/mysql", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-1", 1), }, { url: "~charmers/trusty/mysql", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-2", 2), }, { url: "~charmers/trusty/mysql", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-3", 3), }, { url: "trusty/mysql", expectID: router.MustNewResolvedURL("~charmers/mysql-1", 1), }, { url: "trusty/mysql", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-1", 1), }, { url: "trusty/mysql", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-2", 2), }, { url: "trusty/mysql", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-3", 3), }, { url: "~charmers/trusty/mongodb-0", expectID: router.MustNewResolvedURL("~charmers/trusty/mongodb-0", -1), }, { url: "~charmers/trusty/mongodb-0", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/mongodb-0", -1), }, { url: "~charmers/trusty/mongodb-0", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/mongodb-0", -1), }, { url: "~charmers/trusty/mongodb-0", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/mongodb-0", -1), }, { url: "~charmers/trusty/mongodb-3", expectID: router.MustNewResolvedURL("~charmers/trusty/mongodb-3", -1), }, { url: "~charmers/trusty/mongodb-3", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/mongodb-3", -1), }, { url: "~charmers/trusty/mongodb-3", channel: params.DevelopmentChannel, expectError: "cs:~charmers/trusty/mongodb-3 not found in development channel", expectErrorCause: params.ErrNotFound, }, { url: "~charmers/trusty/mongodb-2", channel: params.StableChannel, expectError: "cs:~charmers/trusty/mongodb-2 not found in stable channel", expectErrorCause: params.ErrNotFound, }, { url: "trusty/mongodb-0", expectError: "no matching charm or bundle for cs:trusty/mongodb-0", expectErrorCause: params.ErrNotFound, }, { url: "trusty/mongodb-0", channel: params.StableChannel, expectError: "no matching charm or bundle for cs:trusty/mongodb-0", expectErrorCause: params.ErrNotFound, }, { url: "trusty/mongodb-0", channel: params.DevelopmentChannel, expectError: "no matching charm or bundle for cs:trusty/mongodb-0", expectErrorCause: params.ErrNotFound, }, { url: "trusty/mongodb-0", channel: params.UnpublishedChannel, expectError: "no matching charm or bundle for cs:trusty/mongodb-0", expectErrorCause: params.ErrNotFound, }, { url: "~charmers/trusty/mongodb", expectID: router.MustNewResolvedURL("~charmers/trusty/mongodb-1", -1), }, { url: "~charmers/trusty/mongodb", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/mongodb-1", -1), }, { url: "~charmers/trusty/mongodb", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/mongodb-2", -1), }, { url: "~charmers/trusty/mongodb", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/mongodb-3", -1), }, { url: "trusty/mongodb", expectError: "no matching charm or bundle for cs:trusty/mongodb", expectErrorCause: params.ErrNotFound, }, { url: "trusty/mongodb", channel: params.StableChannel, expectError: "no matching charm or bundle for cs:trusty/mongodb", expectErrorCause: params.ErrNotFound, }, { url: "trusty/mongodb", channel: params.DevelopmentChannel, expectError: "no matching charm or bundle for cs:trusty/mongodb", expectErrorCause: params.ErrNotFound, }, { url: "trusty/mongodb", channel: params.UnpublishedChannel, expectError: "no matching charm or bundle for cs:trusty/mongodb", expectErrorCause: params.ErrNotFound, }, { url: "mongodb", expectError: "no matching charm or bundle for cs:mongodb", expectErrorCause: params.ErrNotFound, }, { url: "mongodb", channel: params.StableChannel, expectError: "no matching charm or bundle for cs:mongodb", expectErrorCause: params.ErrNotFound, }, { url: "mongodb", channel: params.DevelopmentChannel, expectError: "no matching charm or bundle for cs:mongodb", expectErrorCause: params.ErrNotFound, }, { url: "mongodb", channel: params.UnpublishedChannel, expectError: "no matching charm or bundle for cs:mongodb", expectErrorCause: params.ErrNotFound, }, { url: "~charmers/trusty/apache", expectError: "no matching charm or bundle for cs:~charmers/trusty/apache", expectErrorCause: params.ErrNotFound, }, { url: "~charmers/trusty/apache", channel: params.StableChannel, expectError: "no matching charm or bundle for cs:~charmers/trusty/apache", expectErrorCause: params.ErrNotFound, }, { url: "~charmers/trusty/apache", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/apache-0", 0), }, { url: "~charmers/trusty/apache", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/apache-0", 0), }, { url: "~charmers/trusty/apache-0", expectID: router.MustNewResolvedURL("~charmers/trusty/apache-0", 0), }, { url: "~charmers/trusty/apache-0", channel: params.StableChannel, expectError: "cs:~charmers/trusty/apache-0 not found in stable channel", }, { url: "~charmers/trusty/apache-0", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/apache-0", 0), }, { url: "~charmers/trusty/apache-0", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/apache-0", 0), }, { url: "trusty/apache", expectError: "no matching charm or bundle for cs:trusty/apache", expectErrorCause: params.ErrNotFound, }, { url: "trusty/apache", channel: params.StableChannel, expectError: "no matching charm or bundle for cs:trusty/apache", expectErrorCause: params.ErrNotFound, }, { url: "trusty/apache", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/apache-0", 0), }, { url: "trusty/apache", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/apache-0", 0), }, { url: "trusty/apache-0", expectID: router.MustNewResolvedURL("~charmers/trusty/apache-0", 0), }, { url: "trusty/apache-0", channel: params.StableChannel, expectError: "cs:trusty/apache-0 not found in stable channel", }, { url: "trusty/apache-0", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/apache-0", 0), }, { url: "trusty/apache-0", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/apache-0", 0), }, { url: "~charmers/trusty/nginx", expectError: "no matching charm or bundle for cs:~charmers/trusty/nginx", expectErrorCause: params.ErrNotFound, }, { url: "~charmers/trusty/nginx", channel: params.StableChannel, expectError: "no matching charm or bundle for cs:~charmers/trusty/nginx", expectErrorCause: params.ErrNotFound, }, { url: "~charmers/trusty/nginx", channel: params.DevelopmentChannel, expectError: "no matching charm or bundle for cs:~charmers/trusty/nginx", expectErrorCause: params.ErrNotFound, }, { url: "~charmers/trusty/nginx", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/nginx-0", 0), }, { url: "~charmers/trusty/nginx-0", expectID: router.MustNewResolvedURL("~charmers/trusty/nginx-0", 0), }, { url: "~charmers/trusty/nginx-0", channel: params.StableChannel, expectError: "cs:~charmers/trusty/nginx-0 not found in stable channel", }, { url: "~charmers/trusty/nginx-0", channel: params.DevelopmentChannel, expectError: "cs:~charmers/trusty/nginx-0 not found in development channel", }, { url: "~charmers/trusty/nginx-0", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/nginx-0", 0), }, { url: "trusty/nginx", expectError: "no matching charm or bundle for cs:trusty/nginx", expectErrorCause: params.ErrNotFound, }, { url: "trusty/nginx", channel: params.StableChannel, expectError: "no matching charm or bundle for cs:trusty/nginx", expectErrorCause: params.ErrNotFound, }, { url: "trusty/nginx", channel: params.DevelopmentChannel, expectError: "no matching charm or bundle for cs:trusty/nginx", expectErrorCause: params.ErrNotFound, }, { url: "trusty/nginx", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/nginx-0", 0), }, { url: "trusty/nginx-0", expectID: router.MustNewResolvedURL("~charmers/trusty/nginx-0", 0), }, { url: "trusty/nginx-0", channel: params.StableChannel, expectError: "cs:trusty/nginx-0 not found in stable channel", }, { url: "trusty/nginx-0", channel: params.DevelopmentChannel, expectError: "cs:trusty/nginx-0 not found in development channel", }, { url: "trusty/nginx-0", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/nginx-0", 0), }, { url: "~charmers/bundle/wordpress-simple-0", expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-0", 0), }, { url: "~charmers/bundle/wordpress-simple-0", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-0", 0), }, { url: "~charmers/bundle/wordpress-simple-0", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-0", 0), }, { url: "~charmers/bundle/wordpress-simple-0", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-0", 0), }, { url: "~charmers/bundle/wordpress-simple-3", expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-3", 3), }, { url: "~charmers/bundle/wordpress-simple-3", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-3", 3), }, { url: "~charmers/bundle/wordpress-simple-3", channel: params.DevelopmentChannel, expectError: "cs:~charmers/bundle/wordpress-simple-3 not found in development channel", }, { url: "~charmers/bundle/wordpress-simple-3", channel: params.StableChannel, expectError: "cs:~charmers/bundle/wordpress-simple-3 not found in stable channel", }, { url: "bundle/wordpress-simple-0", expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-0", 0), }, { url: "bundle/wordpress-simple-0", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-0", 0), }, { url: "bundle/wordpress-simple-0", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-0", 0), }, { url: "bundle/wordpress-simple-0", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-0", 0), }, { url: "bundle/wordpress-simple-3", expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-3", 3), }, { url: "bundle/wordpress-simple-3", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-3", 3), }, { url: "bundle/wordpress-simple-3", channel: params.DevelopmentChannel, expectError: "cs:bundle/wordpress-simple-3 not found in development channel", }, { url: "bundle/wordpress-simple-2", channel: params.StableChannel, expectError: "cs:bundle/wordpress-simple-2 not found in stable channel", }, { url: "~charmers/bundle/wordpress-simple", expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-1", 1), }, { url: "~charmers/bundle/wordpress-simple", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-1", 1), }, { url: "~charmers/bundle/wordpress-simple", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-2", 2), }, { url: "~charmers/bundle/wordpress-simple", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-3", 3), }, { url: "bundle/wordpress-simple", expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-1", 1), }, { url: "bundle/wordpress-simple", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-1", 1), }, { url: "bundle/wordpress-simple", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-2", 2), }, { url: "bundle/wordpress-simple", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-3", 3), }, { url: "wordpress-simple", expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-1", 1), }, { url: "wordpress-simple", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-1", 1), }, { url: "wordpress-simple", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-2", 2), }, { url: "wordpress-simple", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-3", 3), }, { url: "~charmers/wordpress-simple", expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-1", 1), }, { url: "~charmers/wordpress-simple", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-1", 1), }, { url: "~charmers/wordpress-simple", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-2", 2), }, { url: "~charmers/wordpress-simple", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-3", 3), }, { url: "~charmers/wordpress-simple-0", expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-0", 0), }, { url: "~charmers/wordpress-simple-0", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-0", 0), }, { url: "~charmers/wordpress-simple-0", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-0", 0), }, { url: "~charmers/wordpress-simple-0", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-0", 0), }, { url: "~charmers/trusty/wordpress", channel: "no-such-channel", expectError: "no matching charm or bundle for cs:~charmers/trusty/wordpress", expectErrorCause: params.ErrNotFound, }, { url: "~charmers/trusty/postgresql-0", expectID: router.MustNewResolvedURL("~charmers/trusty/postgresql-0", 0), }, { url: "~charmers/trusty/postgresql-0", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/postgresql-0", 0), }, { url: "~charmers/trusty/postgresql-0", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/postgresql-0", 0), }, { url: "~charmers/trusty/postgresql-0", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/postgresql-0", 0), }, { url: "~charmers/precise/postgresql-0", expectID: router.MustNewResolvedURL("~charmers/precise/postgresql-0", 0), }, { url: "~charmers/precise/postgresql-0", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/precise/postgresql-0", 0), }, { url: "~charmers/precise/postgresql-0", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/precise/postgresql-0", 0), }, { url: "~charmers/precise/postgresql-0", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/precise/postgresql-0", 0), }, { url: "~charmers/trusty/postgresql-1", expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "~charmers/trusty/postgresql-1", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "~charmers/trusty/postgresql-1", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "~charmers/trusty/postgresql-1", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "~charmers/precise/postgresql-1", expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "~charmers/precise/postgresql-1", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "~charmers/precise/postgresql-1", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "~charmers/precise/postgresql-1", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "~charmers/trusty/postgresql", expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "~charmers/trusty/postgresql", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "~charmers/trusty/postgresql", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "~charmers/trusty/postgresql", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "~charmers/precise/postgresql", expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "~charmers/precise/postgresql", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "~charmers/precise/postgresql", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "~charmers/precise/postgresql", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "trusty/postgresql", expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "trusty/postgresql", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "trusty/postgresql", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "trusty/postgresql", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "precise/postgresql", expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "precise/postgresql", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "precise/postgresql", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "precise/postgresql", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "postgresql", expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "postgresql", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "postgresql", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "postgresql", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "postgresql-1", expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "postgresql-1", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "postgresql-1", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "postgresql-1", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "postgresql-0", expectError: "no matching charm or bundle for cs:postgresql-0", expectErrorCause: params.ErrNotFound, }, { url: "postgresql-0", channel: params.StableChannel, expectError: "no matching charm or bundle for cs:postgresql-0", expectErrorCause: params.ErrNotFound, }, { url: "postgresql-0", channel: params.DevelopmentChannel, expectError: "no matching charm or bundle for cs:postgresql-0", expectErrorCause: params.ErrNotFound, }, { url: "postgresql-0", channel: params.UnpublishedChannel, expectError: "no matching charm or bundle for cs:postgresql-0", expectErrorCause: params.ErrNotFound, }, { url: "~charmers/trusty/ceph-0", expectID: router.MustNewResolvedURL("~charmers/trusty/ceph-0", 0), }, { url: "~charmers/trusty/ceph-0", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/ceph-0", 0), }, { url: "~charmers/trusty/ceph-0", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/ceph-0", 0), }, { url: "~charmers/trusty/ceph-0", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/ceph-0", 0), }, { url: "~charmers/trusty/ceph", expectID: router.MustNewResolvedURL("~charmers/trusty/ceph-0", 0), }, { url: "~charmers/trusty/ceph", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/ceph-0", 0), }, { url: "~charmers/trusty/ceph", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/ceph-0", 0), }, { url: "~charmers/trusty/ceph", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/ceph-0", 0), }, { url: "~openstack-charmers/trusty/ceph-0", expectID: router.MustNewResolvedURL("~openstack-charmers/trusty/ceph-0", 1), }, { url: "~openstack-charmers/trusty/ceph-0", channel: params.StableChannel, expectError: "cs:~openstack-charmers/trusty/ceph-0 not found in stable channel", }, { url: "~openstack-charmers/trusty/ceph-0", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~openstack-charmers/trusty/ceph-0", 1), }, { url: "~openstack-charmers/trusty/ceph-0", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~openstack-charmers/trusty/ceph-0", 1), }, { url: "~openstack-charmers/trusty/ceph", expectError: "no matching charm or bundle for cs:~openstack-charmers/trusty/ceph", expectErrorCause: params.ErrNotFound, }, { url: "~openstack-charmers/trusty/ceph", channel: params.StableChannel, expectError: "no matching charm or bundle for cs:~openstack-charmers/trusty/ceph", expectErrorCause: params.ErrNotFound, }, { url: "~openstack-charmers/trusty/ceph", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~openstack-charmers/trusty/ceph-0", 1), }, { url: "~openstack-charmers/trusty/ceph", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~openstack-charmers/trusty/ceph-0", 1), }, { url: "trusty/ceph-0", expectID: router.MustNewResolvedURL("~charmers/trusty/ceph-0", 0), }, { url: "trusty/ceph-0", channel: params.StableChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/ceph-0", 0), }, { url: "trusty/ceph-0", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/ceph-0", 0), }, { url: "trusty/ceph-0", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/ceph-0", 0), }, { url: "trusty/ceph-1", expectID: router.MustNewResolvedURL("~openstack-charmers/trusty/ceph-0", 1), }, { url: "trusty/ceph-1", channel: params.StableChannel, expectError: "cs:trusty/ceph-1 not found in stable channel", }, { url: "trusty/ceph-1", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~openstack-charmers/trusty/ceph-0", 1), }, { url: "trusty/ceph-1", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~openstack-charmers/trusty/ceph-0", 1), }, { url: "trusty/ceph", expectError: "no matching charm or bundle for cs:trusty/ceph", expectErrorCause: params.ErrNotFound, }, { url: "trusty/ceph", channel: params.StableChannel, expectError: "no matching charm or bundle for cs:trusty/ceph", expectErrorCause: params.ErrNotFound, }, { url: "trusty/ceph", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~openstack-charmers/trusty/ceph-0", 1), }, { url: "trusty/ceph", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~openstack-charmers/trusty/ceph-0", 1), }, { url: "ceph", expectError: "no matching charm or bundle for cs:ceph", expectErrorCause: params.ErrNotFound, }, { url: "ceph", channel: params.StableChannel, expectError: "no matching charm or bundle for cs:ceph", expectErrorCause: params.ErrNotFound, }, { url: "ceph", channel: params.DevelopmentChannel, expectID: router.MustNewResolvedURL("~openstack-charmers/trusty/ceph-0", 1), }, { url: "ceph", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~openstack-charmers/trusty/ceph-0", 1), }} func (s *StoreSuite) TestFindBestEntity(c *gc.C) { store := s.newStore(c, false) defer store.Close() for _, ch := range findBestEntityCharms { err := store.AddCharmWithArchive(ch.id, ch.charm) c.Assert(err, gc.IsNil) err = store.SetPromulgated(ch.id, ch.id.PromulgatedRevision != -1) c.Assert(err, gc.IsNil) if ch.development { err := store.Publish(ch.id, params.DevelopmentChannel) c.Assert(err, gc.IsNil) } if ch.stable { err := store.Publish(ch.id, params.StableChannel) c.Assert(err, gc.IsNil) } } for _, b := range findBestEntityBundles { err := store.AddBundleWithArchive(b.id, b.bundle) c.Assert(err, gc.IsNil) err = store.SetPromulgated(b.id, b.id.PromulgatedRevision != -1) c.Assert(err, gc.IsNil) if b.development { err := store.Publish(b.id, params.DevelopmentChannel) c.Assert(err, gc.IsNil) } if b.stable { err := store.Publish(b.id, params.StableChannel) c.Assert(err, gc.IsNil) } } for i, test := range findBestEntityTests { c.Logf("test %d: %s (%s)", i, test.url, test.channel) entity, err := store.FindBestEntity(charm.MustParseURL(test.url), test.channel, nil) if test.expectError != "" { c.Assert(err, gc.ErrorMatches, test.expectError) if test.expectErrorCause != nil { c.Assert(errgo.Cause(err), gc.Equals, test.expectErrorCause) } continue } c.Assert(err, gc.IsNil) c.Assert(EntityResolvedURL(entity), jc.DeepEquals, test.expectID) } } var matchingInterfacesQueryTests = []struct { required []string provided []string expect []string }{{ provided: []string{"a"}, expect: []string{ "cs:~charmers/trusty/wordpress-1", "cs:~charmers/trusty/wordpress-2", }, }, { provided: []string{"a", "b", "d"}, required: []string{"b", "c", "e"}, expect: []string{ "cs:~charmers/trusty/mysql-1", "cs:~charmers/trusty/wordpress-1", "cs:~charmers/trusty/wordpress-2", }, }, { required: []string{"x"}, expect: []string{ "cs:~charmers/trusty/mysql-1", "cs:~charmers/trusty/wordpress-2", }, }, { expect: []string{}, }} func (s *StoreSuite) TestMatchingInterfacesQuery(c *gc.C) { store := s.newStore(c, false) defer store.Close() entities := []*mongodoc.Entity{{ URL: charm.MustParseURL("~charmers/trusty/wordpress-1"), PromulgatedURL: charm.MustParseURL("trusty/wordpress-1"), CharmProvidedInterfaces: []string{"a", "b"}, CharmRequiredInterfaces: []string{"b", "c"}, }, { URL: charm.MustParseURL("~charmers/trusty/wordpress-2"), PromulgatedURL: charm.MustParseURL("trusty/wordpress-2"), CharmProvidedInterfaces: []string{"a", "b"}, CharmRequiredInterfaces: []string{"b", "c", "x"}, }, { URL: charm.MustParseURL("~charmers/trusty/mysql-1"), PromulgatedURL: charm.MustParseURL("trusty/mysql-1"), CharmProvidedInterfaces: []string{"d", "b"}, CharmRequiredInterfaces: []string{"e", "x"}, }} for _, e := range entities { err := store.DB.Entities().Insert(denormalizedEntity(e)) c.Assert(err, gc.IsNil) } for i, test := range matchingInterfacesQueryTests { c.Logf("test %d: req %v; prov %v", i, test.required, test.provided) var entities []*mongodoc.Entity err := store.MatchingInterfacesQuery(test.required, test.provided).All(&entities) c.Assert(err, gc.IsNil) var got []string for _, e := range entities { got = append(got, e.URL.String()) } sort.Strings(got) c.Assert(got, jc.DeepEquals, test.expect) } } var updateEntityTests = []struct { url string expectErr string }{{ url: "~charmers/trusty/wordpress-10", }, { url: "~charmers/precise/wordpress-10", expectErr: `cannot update "cs:precise/wordpress-10": not found`, }} func (s *StoreSuite) TestUpdateEntity(c *gc.C) { store := s.newStore(c, false) defer store.Close() for i, test := range updateEntityTests { c.Logf("test %d. %s", i, test.url) url := router.MustNewResolvedURL(test.url, 10) _, err := store.DB.Entities().RemoveAll(nil) c.Assert(err, gc.IsNil) err = store.DB.Entities().Insert(denormalizedEntity(&mongodoc.Entity{ URL: charm.MustParseURL("~charmers/trusty/wordpress-10"), PromulgatedURL: charm.MustParseURL("trusty/wordpress-4"), })) c.Assert(err, gc.IsNil) err = store.UpdateEntity(url, bson.D{{"$set", bson.D{{"extrainfo.test", []byte("PASS")}}}}) if test.expectErr != "" { c.Assert(err, gc.ErrorMatches, test.expectErr) } else { c.Assert(err, gc.IsNil) entity, err := store.FindEntity(url, nil) c.Assert(err, gc.IsNil) c.Assert(string(entity.ExtraInfo["test"]), gc.Equals, "PASS") } } } var updateBaseEntityTests = []struct { url string expectErr string }{{ url: "~charmers/trusty/wordpress-10", }, { url: "~charmers/precise/mysql-10", expectErr: `cannot update base entity for "cs:precise/mysql-10": not found`, }} func (s *StoreSuite) TestUpdateBaseEntity(c *gc.C) { store := s.newStore(c, false) defer store.Close() for i, test := range updateBaseEntityTests { c.Logf("test %d. %s", i, test.url) url := router.MustNewResolvedURL(test.url, 10) _, err := store.DB.BaseEntities().RemoveAll(nil) c.Assert(err, gc.IsNil) err = store.DB.BaseEntities().Insert(&mongodoc.BaseEntity{ URL: charm.MustParseURL("~charmers/wordpress"), User: "charmers", Name: "wordpress", Promulgated: true, }) c.Assert(err, gc.IsNil) err = store.UpdateBaseEntity(url, bson.D{{"$set", bson.D{{"channelacls.unpublished", mongodoc.ACL{ Read: []string{"test"}, }}}}}) if test.expectErr != "" { c.Assert(err, gc.ErrorMatches, test.expectErr) } else { c.Assert(err, gc.IsNil) baseEntity, err := store.FindBaseEntity(&url.URL, nil) c.Assert(err, gc.IsNil) c.Assert(baseEntity.ChannelACLs[params.UnpublishedChannel].Read, jc.DeepEquals, []string{"test"}) } } } var promulgateTests = []struct { about string entities []*mongodoc.Entity baseEntities []*mongodoc.BaseEntity url string promulgate bool expectErr string expectEntities []*mongodoc.Entity expectBaseEntities []*mongodoc.BaseEntity }{{ about: "single charm not already promulgated", entities: []*mongodoc.Entity{ entity("~charmers/trusty/wordpress-0", ""), }, baseEntities: []*mongodoc.BaseEntity{ baseEntity("~charmers/wordpress", false), }, url: "~charmers/trusty/wordpress-0", promulgate: true, expectEntities: []*mongodoc.Entity{ entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), }, expectBaseEntities: []*mongodoc.BaseEntity{ baseEntity("~charmers/wordpress", true), }, }, { about: "multiple series not already promulgated", entities: []*mongodoc.Entity{ entity("~charmers/trusty/wordpress-0", ""), entity("~charmers/precise/wordpress-0", ""), }, baseEntities: []*mongodoc.BaseEntity{ baseEntity("~charmers/wordpress", false), }, url: "~charmers/trusty/wordpress-0", promulgate: true, expectEntities: []*mongodoc.Entity{ entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), entity("~charmers/precise/wordpress-0", "precise/wordpress-0"), }, expectBaseEntities: []*mongodoc.BaseEntity{ baseEntity("~charmers/wordpress", true), }, }, { about: "charm promulgated as different user", entities: []*mongodoc.Entity{ entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), entity("~test-charmers/trusty/wordpress-0", ""), }, baseEntities: []*mongodoc.BaseEntity{ baseEntity("~charmers/wordpress", true), baseEntity("~test-charmers/wordpress", false), }, url: "~test-charmers/trusty/wordpress-0", promulgate: true, expectEntities: []*mongodoc.Entity{ entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), entity("~test-charmers/trusty/wordpress-0", "trusty/wordpress-1"), }, expectBaseEntities: []*mongodoc.BaseEntity{ baseEntity("~charmers/wordpress", false), baseEntity("~test-charmers/wordpress", true), }, }, { about: "single charm already promulgated", entities: []*mongodoc.Entity{ entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), }, baseEntities: []*mongodoc.BaseEntity{ baseEntity("~charmers/wordpress", true), }, url: "~charmers/trusty/wordpress-0", promulgate: true, expectEntities: []*mongodoc.Entity{ entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), }, expectBaseEntities: []*mongodoc.BaseEntity{ baseEntity("~charmers/wordpress", true), }, }, { about: "unrelated charms are unaffected", entities: []*mongodoc.Entity{ entity("~charmers/trusty/wordpress-0", ""), entity("~test-charmers/trusty/mysql-0", "trusty/mysql-0"), }, baseEntities: []*mongodoc.BaseEntity{ baseEntity("~charmers/wordpress", false), baseEntity("~test-charmers/mysql", true), }, url: "~charmers/trusty/wordpress-0", promulgate: true, expectEntities: []*mongodoc.Entity{ entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), entity("~test-charmers/trusty/mysql-0", "trusty/mysql-0"), }, expectBaseEntities: []*mongodoc.BaseEntity{ baseEntity("~charmers/wordpress", true), baseEntity("~test-charmers/mysql", true), }, }, { about: "only one owner promulgated", entities: []*mongodoc.Entity{ entity("~charmers/trusty/wordpress-0", ""), entity("~test-charmers/trusty/wordpress-0", "trusty/wordpress-0"), entity("~test2-charmers/trusty/wordpress-0", "trusty/wordpress-1"), }, baseEntities: []*mongodoc.BaseEntity{ baseEntity("~charmers/wordpress", false), baseEntity("~test-charmers/wordpress", false), baseEntity("~test2-charmers/wordpress", true), }, url: "~charmers/trusty/wordpress-0", promulgate: true, expectEntities: []*mongodoc.Entity{ entity("~charmers/trusty/wordpress-0", "trusty/wordpress-2"), entity("~test-charmers/trusty/wordpress-0", "trusty/wordpress-0"), entity("~test2-charmers/trusty/wordpress-0", "trusty/wordpress-1"), }, expectBaseEntities: []*mongodoc.BaseEntity{ baseEntity("~charmers/wordpress", true), baseEntity("~test-charmers/wordpress", false), baseEntity("~test2-charmers/wordpress", false), }, }, { about: "recovers from two promulgated base entities", entities: []*mongodoc.Entity{ entity("~charmers/trusty/wordpress-0", ""), entity("~test-charmers/trusty/wordpress-0", "trusty/wordpress-0"), entity("~test-charmers/trusty/wordpress-1", "trusty/wordpress-2"), entity("~test2-charmers/trusty/wordpress-0", "trusty/wordpress-1"), }, baseEntities: []*mongodoc.BaseEntity{ baseEntity("~charmers/wordpress", false), baseEntity("~test-charmers/wordpress", true), baseEntity("~test2-charmers/wordpress", true), }, url: "~test2-charmers/trusty/wordpress-0", promulgate: true, expectEntities: []*mongodoc.Entity{ entity("~charmers/trusty/wordpress-0", ""), entity("~test-charmers/trusty/wordpress-0", "trusty/wordpress-0"), entity("~test-charmers/trusty/wordpress-1", "trusty/wordpress-2"), entity("~test2-charmers/trusty/wordpress-0", "trusty/wordpress-1"), }, expectBaseEntities: []*mongodoc.BaseEntity{ baseEntity("~charmers/wordpress", false), baseEntity("~test-charmers/wordpress", false), baseEntity("~test2-charmers/wordpress", true), }, }, { about: "multiple series already promulgated", entities: []*mongodoc.Entity{ entity("~charmers/trusty/wordpress-0", "trusty/wordpress-2"), entity("~charmers/precise/wordpress-0", "precise/wordpress-1"), entity("~test-charmers/trusty/wordpress-0", ""), entity("~test-charmers/utopic/wordpress-0", ""), }, baseEntities: []*mongodoc.BaseEntity{ baseEntity("~charmers/wordpress", true), baseEntity("~test-charmers/wordpress", false), }, url: "~test-charmers/trusty/wordpress-0", promulgate: true, expectEntities: []*mongodoc.Entity{ entity("~charmers/trusty/wordpress-0", "trusty/wordpress-2"), entity("~charmers/precise/wordpress-0", "precise/wordpress-1"), entity("~test-charmers/trusty/wordpress-0", "trusty/wordpress-3"), entity("~test-charmers/utopic/wordpress-0", "utopic/wordpress-0"), }, expectBaseEntities: []*mongodoc.BaseEntity{ baseEntity("~charmers/wordpress", false), baseEntity("~test-charmers/wordpress", true), }, }, { about: "unpromulgate single promulgated charm ", entities: []*mongodoc.Entity{ entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), }, baseEntities: []*mongodoc.BaseEntity{ baseEntity("~charmers/wordpress", true), }, url: "~charmers/trusty/wordpress-0", promulgate: false, expectEntities: []*mongodoc.Entity{ entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), }, expectBaseEntities: []*mongodoc.BaseEntity{ baseEntity("~charmers/wordpress", false), }, }, { about: "unpromulgate single unpromulgated charm ", entities: []*mongodoc.Entity{ entity("~charmers/trusty/wordpress-0", ""), }, baseEntities: []*mongodoc.BaseEntity{ baseEntity("~charmers/wordpress", false), }, url: "~charmers/trusty/wordpress-0", promulgate: false, expectEntities: []*mongodoc.Entity{ entity("~charmers/trusty/wordpress-0", ""), }, expectBaseEntities: []*mongodoc.BaseEntity{ baseEntity("~charmers/wordpress", false), }, }} func (s *StoreSuite) TestSetPromulgated(c *gc.C) { store := s.newStore(c, false) defer store.Close() for i, test := range promulgateTests { c.Logf("test %d. %s", i, test.about) url := router.MustNewResolvedURL(test.url, -1) _, err := store.DB.Entities().RemoveAll(nil) c.Assert(err, gc.IsNil) _, err = store.DB.BaseEntities().RemoveAll(nil) c.Assert(err, gc.IsNil) for _, entity := range test.entities { err := store.DB.Entities().Insert(entity) c.Assert(err, gc.IsNil) } for _, baseEntity := range test.baseEntities { err := store.DB.BaseEntities().Insert(baseEntity) c.Assert(err, gc.IsNil) } err = store.SetPromulgated(url, test.promulgate) if test.expectErr != "" { c.Assert(err, gc.ErrorMatches, test.expectErr) continue } c.Assert(err, gc.IsNil) n, err := store.DB.Entities().Count() c.Assert(err, gc.IsNil) c.Assert(n, gc.Equals, len(test.expectEntities)) n, err = store.DB.BaseEntities().Count() c.Assert(err, gc.IsNil) c.Assert(n, gc.Equals, len(test.expectBaseEntities)) for _, expectEntity := range test.expectEntities { entity, err := store.FindEntity(EntityResolvedURL(expectEntity), nil) c.Assert(err, gc.IsNil) c.Assert(entity, jc.DeepEquals, expectEntity) } for _, expectBaseEntity := range test.expectBaseEntities { baseEntity, err := store.FindBaseEntity(expectBaseEntity.URL, nil) c.Assert(err, gc.IsNil) c.Assert(storetesting.NormalizeBaseEntity(baseEntity), jc.DeepEquals, storetesting.NormalizeBaseEntity(expectBaseEntity)) } } } func (s *StoreSuite) TestSetPromulgatedUpdateSearch(c *gc.C) { store := s.newStore(c, true) defer store.Close() wordpress := storetesting.NewCharm(&charm.Meta{ Name: "wordpress", }) addCharmForSearch( c, store, router.MustNewResolvedURL("~charmers/trusty/wordpress-0", 2), wordpress, nil, 0, ) addCharmForSearch( c, store, router.MustNewResolvedURL("~charmers/precise/wordpress-0", 1), wordpress, nil, 0, ) addCharmForSearch( c, store, router.MustNewResolvedURL("~openstack-charmers/trusty/wordpress-0", -1), wordpress, nil, 0, ) addCharmForSearch( c, store, router.MustNewResolvedURL("~openstack-charmers/precise/wordpress-0", -1), wordpress, nil, 0, ) url := router.MustNewResolvedURL("~openstack-charmers/trusty/wordpress-0", -1) // Change the promulgated wordpress version to openstack-charmers. err := store.SetPromulgated(url, true) c.Assert(err, gc.IsNil) err = store.ES.RefreshIndex(s.TestIndex) c.Assert(err, gc.IsNil) // Check that the search records contain the correct information. var zdoc SearchDoc doc := zdoc err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseURL("~charmers/trusty/wordpress-0")), &doc) c.Assert(err, gc.IsNil) c.Assert(doc.PromulgatedURL, gc.IsNil) c.Assert(doc.PromulgatedRevision, gc.Equals, -1) doc = zdoc err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseURL("~charmers/precise/wordpress-0")), &doc) c.Assert(err, gc.IsNil) c.Assert(doc.PromulgatedURL, gc.IsNil) c.Assert(doc.PromulgatedRevision, gc.Equals, -1) doc = zdoc err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseURL("~openstack-charmers/trusty/wordpress-0")), &doc) c.Assert(err, gc.IsNil) c.Assert(doc.PromulgatedURL.String(), gc.Equals, "cs:trusty/wordpress-3") c.Assert(doc.PromulgatedRevision, gc.Equals, 3) doc = zdoc err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseURL("~openstack-charmers/precise/wordpress-0")), &doc) c.Assert(err, gc.IsNil) c.Assert(doc.PromulgatedURL.String(), gc.Equals, "cs:precise/wordpress-2") c.Assert(doc.PromulgatedRevision, gc.Equals, 2) // Remove the promulgated flag from openstack-charmers, meaning wordpress is // no longer promulgated. err = store.SetPromulgated(url, false) c.Assert(err, gc.IsNil) err = store.ES.RefreshIndex(s.TestIndex) c.Assert(err, gc.IsNil) // Check that the search records contain the correct information. doc = zdoc err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseURL("~charmers/trusty/wordpress-0")), &doc) c.Assert(err, gc.IsNil) c.Assert(doc.PromulgatedURL, gc.IsNil) c.Assert(doc.PromulgatedRevision, gc.Equals, -1) doc = zdoc err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseURL("~charmers/precise/wordpress-0")), &doc) c.Assert(err, gc.IsNil) c.Assert(doc.PromulgatedURL, gc.IsNil) c.Assert(doc.PromulgatedRevision, gc.Equals, -1) doc = zdoc err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseURL("~openstack-charmers/trusty/wordpress-0")), &doc) c.Assert(err, gc.IsNil) c.Assert(doc.PromulgatedURL, gc.IsNil) c.Assert(doc.PromulgatedRevision, gc.Equals, -1) doc = zdoc err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseURL("~openstack-charmers/precise/wordpress-0")), &doc) c.Assert(err, gc.IsNil) c.Assert(doc.PromulgatedURL, gc.IsNil) c.Assert(doc.PromulgatedRevision, gc.Equals, -1) } var entityResolvedURLTests = []struct { about string entity *mongodoc.Entity rurl *router.ResolvedURL }{{ about: "user owned, published", entity: &mongodoc.Entity{ URL: charm.MustParseURL("~charmers/precise/wordpress-23"), }, rurl: &router.ResolvedURL{ URL: *charm.MustParseURL("~charmers/precise/wordpress-23"), PromulgatedRevision: -1, }, }, { about: "promulgated, published", entity: &mongodoc.Entity{ URL: charm.MustParseURL("~charmers/precise/wordpress-23"), PromulgatedURL: charm.MustParseURL("precise/wordpress-4"), }, rurl: &router.ResolvedURL{ URL: *charm.MustParseURL("~charmers/precise/wordpress-23"), PromulgatedRevision: 4, }, }} func (s *StoreSuite) TestEntityResolvedURL(c *gc.C) { for i, test := range entityResolvedURLTests { c.Logf("test %d: %s", i, test.about) c.Assert(EntityResolvedURL(test.entity), gc.DeepEquals, test.rurl) } } func (s *StoreSuite) TestCopyCopiesSessions(c *gc.C) { store := s.newStore(c, false) wordpress := storetesting.Charms.CharmDir("wordpress") url := MustParseResolvedURL("23 cs:~charmers/precise/wordpress-23") err := store.AddCharmWithArchive(url, wordpress) c.Assert(err, gc.IsNil) store1 := store.Copy() defer store1.Close() // Close the store we copied from. The copy should be unaffected. store.Close() entity, err := store1.FindEntity(url, nil) c.Assert(err, gc.IsNil) // Also check the blob store, as it has its own session reference. r, _, err := store1.BlobStore.Open(entity.BlobName) c.Assert(err, gc.IsNil) r.Close() // Also check the macaroon storage as that also has its own session reference. m, err := store1.Bakery.NewMacaroon("", nil, nil) c.Assert(err, gc.IsNil) c.Assert(m, gc.NotNil) } func (s *StoreSuite) TestAddAudit(c *gc.C) { filename := filepath.Join(c.MkDir(), "audit.log") config := ServerParams{ AuditLogger: &lumberjack.Logger{ Filename: filename, }, } p, err := NewPool(s.Session.DB("juju_test"), nil, nil, config) c.Assert(err, gc.IsNil) defer p.Close() store := p.Store() defer store.Close() entries := []audit.Entry{{ User: "George Clooney", Op: audit.OpSetPerm, Entity: charm.MustParseURL("cs:mycharm"), ACL: &audit.ACL{ Read: []string{"eleven", "ocean"}, Write: []string{"brad", "pitt"}, }, }, { User: "Julia Roberts", Op: audit.OpSetPerm, }} now := time.Now() for _, e := range entries { store.addAuditAtTime(e, now) } data, err := ioutil.ReadFile(filename) c.Assert(err, gc.IsNil) lines := strings.Split(strings.TrimSuffix(string(data), "\n"), "\n") c.Assert(lines, gc.HasLen, len(entries)) for i, e := range entries { e.Time = now c.Assert(lines[i], jc.JSONEquals, e) } } func (s *StoreSuite) TestAddAuditWithNoLumberjack(c *gc.C) { p, err := NewPool(s.Session.DB("juju_test"), nil, nil, ServerParams{}) c.Assert(err, gc.IsNil) defer p.Close() store := p.Store() defer store.Close() // Check that it does not panic. store.AddAudit(audit.Entry{ User: "George Clooney", Op: audit.OpSetPerm, Entity: charm.MustParseURL("cs:mycharm"), ACL: &audit.ACL{ Read: []string{"eleven", "ocean"}, Write: []string{"brad", "pitt"}, }, }) } func (s *StoreSuite) TestDenormalizeEntity(c *gc.C) { e := &mongodoc.Entity{ URL: charm.MustParseURL("~someone/utopic/acharm-45"), } denormalizeEntity(e) c.Assert(e, jc.DeepEquals, &mongodoc.Entity{ URL: charm.MustParseURL("~someone/utopic/acharm-45"), BaseURL: charm.MustParseURL("~someone/acharm"), User: "someone", Name: "acharm", Revision: 45, Series: "utopic", PromulgatedRevision: -1, SupportedSeries: []string{"utopic"}, }) } func (s *StoreSuite) TestDenormalizePromulgatedEntity(c *gc.C) { e := &mongodoc.Entity{ URL: charm.MustParseURL("~someone/utopic/acharm-45"), PromulgatedURL: charm.MustParseURL("utopic/acharm-5"), } denormalizeEntity(e) c.Assert(e, jc.DeepEquals, &mongodoc.Entity{ URL: charm.MustParseURL("~someone/utopic/acharm-45"), BaseURL: charm.MustParseURL("~someone/acharm"), User: "someone", Name: "acharm", Revision: 45, Series: "utopic", PromulgatedURL: charm.MustParseURL("utopic/acharm-5"), PromulgatedRevision: 5, SupportedSeries: []string{"utopic"}, }) } func (s *StoreSuite) TestDenormalizeBundleEntity(c *gc.C) { e := &mongodoc.Entity{ URL: charm.MustParseURL("~someone/bundle/acharm-45"), } denormalizeEntity(e) c.Assert(e, jc.DeepEquals, &mongodoc.Entity{ URL: charm.MustParseURL("~someone/bundle/acharm-45"), BaseURL: charm.MustParseURL("~someone/acharm"), User: "someone", Name: "acharm", Revision: 45, Series: "bundle", PromulgatedRevision: -1, }) } func (s *StoreSuite) TestBundleCharms(c *gc.C) { // Populate the store with some testing charms. mysql := storetesting.Charms.CharmArchive(c.MkDir(), "mysql") store := s.newStore(c, true) defer store.Close() rurl := router.MustNewResolvedURL("cs:~charmers/saucy/mysql-0", 0) err := store.AddCharmWithArchive(rurl, mysql) c.Assert(err, gc.IsNil) err = store.Publish(rurl, params.StableChannel) c.Assert(err, gc.IsNil) riak := storetesting.Charms.CharmArchive(c.MkDir(), "riak") rurl = router.MustNewResolvedURL("cs:~charmers/trusty/riak-42", 42) err = store.AddCharmWithArchive(rurl, riak) c.Assert(err, gc.IsNil) err = store.Publish(rurl, params.StableChannel) c.Assert(err, gc.IsNil) wordpress := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") rurl = router.MustNewResolvedURL("cs:~charmers/utopic/wordpress-47", 47) err = store.AddCharmWithArchive(rurl, wordpress) c.Assert(err, gc.IsNil) err = store.Publish(rurl, params.StableChannel) c.Assert(err, gc.IsNil) tests := []struct { about string ids []string charms map[string]charm.Charm }{{ about: "no ids", }, { about: "fully qualified ids", ids: []string{ "cs:~charmers/saucy/mysql-0", "cs:~charmers/trusty/riak-42", "cs:~charmers/utopic/wordpress-47", }, charms: map[string]charm.Charm{ "cs:~charmers/saucy/mysql-0": mysql, "cs:~charmers/trusty/riak-42": riak, "cs:~charmers/utopic/wordpress-47": wordpress, }, }, { about: "partial ids", ids: []string{"~charmers/utopic/wordpress", "~charmers/riak"}, charms: map[string]charm.Charm{ "~charmers/riak": riak, "~charmers/utopic/wordpress": wordpress, }, }, { about: "charm not found", ids: []string{"utopic/no-such", "~charmers/mysql"}, charms: map[string]charm.Charm{ "~charmers/mysql": mysql, }, }, { about: "no charms found", ids: []string{ "cs:~charmers/saucy/mysql-99", // Revision not present. "cs:~charmers/precise/riak-42", // Series not present. "cs:~charmers/utopic/django-47", // Name not present. }, }, { about: "repeated charms", ids: []string{ "cs:~charmers/saucy/mysql", "cs:~charmers/trusty/riak-42", "~charmers/mysql", }, charms: map[string]charm.Charm{ "cs:~charmers/saucy/mysql": mysql, "cs:~charmers/trusty/riak-42": riak, "~charmers/mysql": mysql, }, }} // Run the tests. for i, test := range tests { c.Logf("test %d: %s", i, test.about) charms, err := store.bundleCharms(test.ids) c.Assert(err, gc.IsNil) // Ensure the charms returned are what we expect. c.Assert(charms, gc.HasLen, len(test.charms)) for i, ch := range charms { expectCharm := test.charms[i] c.Assert(ch.Meta(), jc.DeepEquals, expectCharm.Meta()) c.Assert(ch.Config(), jc.DeepEquals, expectCharm.Config()) c.Assert(ch.Actions(), jc.DeepEquals, expectCharm.Actions()) // Since the charm archive and the charm entity have a slightly // different concept of what a revision is, and since the revision // is not used for bundle validation, we can safely avoid checking // the charm revision. } } } var publishTests = []struct { about string url *router.ResolvedURL channels []params.Channel initialEntity *mongodoc.Entity initialBaseEntity *mongodoc.BaseEntity expectedEntity *mongodoc.Entity expectedBaseEntity *mongodoc.BaseEntity expectedErr string }{{ about: "unpublished, single series, publish development", url: MustParseResolvedURL("~who/trusty/django-42"), channels: []params.Channel{params.DevelopmentChannel}, initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/trusty/django-42"), }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), }, expectedEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/trusty/django-42"), Development: true, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ params.DevelopmentChannel: { "trusty": charm.MustParseURL("~who/trusty/django-42"), }, }, }, }, { about: "development, single series, publish development", url: MustParseResolvedURL("~who/trusty/django-42"), channels: []params.Channel{params.DevelopmentChannel}, initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/trusty/django-42"), Development: true, }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ params.DevelopmentChannel: { "trusty": charm.MustParseURL("~who/trusty/django-41"), }, }, }, expectedEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/trusty/django-42"), Development: true, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ params.DevelopmentChannel: { "trusty": charm.MustParseURL("~who/trusty/django-42"), }, }, }, }, { about: "stable, single series, publish development", url: MustParseResolvedURL("~who/trusty/django-42"), channels: []params.Channel{params.DevelopmentChannel}, initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/trusty/django-42"), Stable: true, }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ params.StableChannel: { "trusty": charm.MustParseURL("~who/trusty/django-42"), }, }, }, expectedEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/trusty/django-42"), Stable: true, Development: true, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ params.StableChannel: { "trusty": charm.MustParseURL("~who/trusty/django-42"), }, params.DevelopmentChannel: { "trusty": charm.MustParseURL("~who/trusty/django-42"), }, }, }, }, { about: "unpublished, single series, publish stable", url: MustParseResolvedURL("~who/trusty/django-42"), channels: []params.Channel{params.StableChannel}, initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/trusty/django-42"), }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), }, expectedEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/trusty/django-42"), Stable: true, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ params.StableChannel: { "trusty": charm.MustParseURL("~who/trusty/django-42"), }, }, }, }, { about: "development, single series, publish stable", url: MustParseResolvedURL("~who/trusty/django-42"), channels: []params.Channel{params.StableChannel}, initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/trusty/django-42"), Development: true, }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ params.DevelopmentChannel: { "trusty": charm.MustParseURL("~who/trusty/django-41"), }, }, }, expectedEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/trusty/django-42"), Development: true, Stable: true, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ params.DevelopmentChannel: { "trusty": charm.MustParseURL("~who/trusty/django-41"), }, params.StableChannel: { "trusty": charm.MustParseURL("~who/trusty/django-42"), }, }, }, }, { about: "stable, single series, publish stable", url: MustParseResolvedURL("~who/trusty/django-42"), channels: []params.Channel{params.StableChannel}, initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/trusty/django-42"), Stable: true, }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ params.StableChannel: { "trusty": charm.MustParseURL("~who/trusty/django-40"), }, }, }, expectedEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/trusty/django-42"), Stable: true, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ params.StableChannel: { "trusty": charm.MustParseURL("~who/trusty/django-42"), }, }, }, }, { about: "unpublished, multi series, publish development", url: MustParseResolvedURL("~who/django-42"), channels: []params.Channel{params.DevelopmentChannel}, initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-42"), SupportedSeries: []string{"trusty", "wily"}, }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), }, expectedEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-42"), SupportedSeries: []string{"trusty", "wily"}, Development: true, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ params.DevelopmentChannel: { "trusty": charm.MustParseURL("~who/django-42"), "wily": charm.MustParseURL("~who/django-42"), }, }, }, }, { about: "development, multi series, publish development", url: MustParseResolvedURL("~who/django-42"), channels: []params.Channel{params.DevelopmentChannel}, initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-42"), Development: true, SupportedSeries: []string{"trusty", "wily"}, }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ params.DevelopmentChannel: { "precise": charm.MustParseURL("~who/django-0"), "trusty": charm.MustParseURL("~who/trusty/django-0"), }, }, }, expectedEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-42"), Development: true, SupportedSeries: []string{"trusty", "wily"}, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ params.DevelopmentChannel: { "precise": charm.MustParseURL("~who/django-0"), "trusty": charm.MustParseURL("~who/django-42"), "wily": charm.MustParseURL("~who/django-42"), }, }, }, }, { about: "stable, multi series, publish development", url: MustParseResolvedURL("~who/django-47"), channels: []params.Channel{params.DevelopmentChannel}, initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-47"), SupportedSeries: []string{"trusty", "wily", "precise"}, Stable: true, }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ params.StableChannel: { "trusty": charm.MustParseURL("~who/django-47"), }, }, }, expectedEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-47"), SupportedSeries: []string{"trusty", "wily", "precise"}, Stable: true, Development: true, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ params.StableChannel: { "trusty": charm.MustParseURL("~who/django-47"), }, params.DevelopmentChannel: { "trusty": charm.MustParseURL("~who/django-47"), "wily": charm.MustParseURL("~who/django-47"), "precise": charm.MustParseURL("~who/django-47"), }, }, }, }, { about: "unpublished, multi series, publish stable", url: MustParseResolvedURL("~who/django-42"), channels: []params.Channel{params.StableChannel}, initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-42"), SupportedSeries: []string{"trusty", "wily", "precise"}, }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), }, expectedEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-42"), SupportedSeries: []string{"trusty", "wily", "precise"}, Stable: true, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ params.StableChannel: { "trusty": charm.MustParseURL("~who/django-42"), "wily": charm.MustParseURL("~who/django-42"), "precise": charm.MustParseURL("~who/django-42"), }, }, }, }, { about: "development, multi series, publish stable", url: MustParseResolvedURL("~who/django-42"), channels: []params.Channel{params.StableChannel}, initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-42"), SupportedSeries: []string{"wily"}, Development: true, }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ params.DevelopmentChannel: { "trusty": charm.MustParseURL("~who/django-0"), }, }, }, expectedEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-42"), SupportedSeries: []string{"wily"}, Development: true, Stable: true, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ params.StableChannel: { "wily": charm.MustParseURL("~who/django-42"), }, params.DevelopmentChannel: { "trusty": charm.MustParseURL("~who/django-0"), }, }, }, }, { about: "stable, multi series, publish stable", url: MustParseResolvedURL("~who/django-42"), channels: []params.Channel{params.StableChannel}, initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-42"), SupportedSeries: []string{"trusty", "wily", "precise"}, Stable: true, }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ params.StableChannel: { "precise": charm.MustParseURL("~who/django-1"), "quantal": charm.MustParseURL("~who/django-2"), "saucy": charm.MustParseURL("~who/django-3"), "trusty": charm.MustParseURL("~who/django-4"), }, }, }, expectedEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-42"), SupportedSeries: []string{"trusty", "wily", "precise"}, Stable: true, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ params.StableChannel: { "precise": charm.MustParseURL("~who/django-42"), "quantal": charm.MustParseURL("~who/django-2"), "saucy": charm.MustParseURL("~who/django-3"), "trusty": charm.MustParseURL("~who/django-42"), "wily": charm.MustParseURL("~who/django-42"), }, }, }, }, { about: "bundle", url: MustParseResolvedURL("~who/bundle/django-42"), channels: []params.Channel{params.StableChannel}, initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/bundle/django-42"), }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), }, expectedEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/bundle/django-42"), Stable: true, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ params.StableChannel: { "bundle": charm.MustParseURL("~who/bundle/django-42"), }, }, }, }, { about: "unpublished, multi series, publish multiple channels", url: MustParseResolvedURL("~who/django-42"), channels: []params.Channel{params.DevelopmentChannel, params.StableChannel, params.Channel("no-such")}, initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-42"), SupportedSeries: []string{"trusty", "wily"}, }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ params.StableChannel: { "quantal": charm.MustParseURL("~who/django-1"), "trusty": charm.MustParseURL("~who/django-4"), }, params.DevelopmentChannel: { "wily": charm.MustParseURL("~who/django-10"), }, }, }, expectedEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-42"), SupportedSeries: []string{"trusty", "wily"}, Development: true, Stable: true, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ params.DevelopmentChannel: { "trusty": charm.MustParseURL("~who/django-42"), "wily": charm.MustParseURL("~who/django-42"), }, params.StableChannel: { "quantal": charm.MustParseURL("~who/django-1"), "trusty": charm.MustParseURL("~who/django-42"), "wily": charm.MustParseURL("~who/django-42"), }, }, }, }, { about: "not found", url: MustParseResolvedURL("~who/trusty/no-such-42"), channels: []params.Channel{params.DevelopmentChannel}, initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/trusty/django-42"), }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), }, expectedErr: `cannot update "cs:~who/trusty/no-such-42": not found`, }, { about: "no valid channels provided", url: MustParseResolvedURL("~who/trusty/django-42"), channels: []params.Channel{params.Channel("not-valid")}, initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/trusty/django-42"), }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), }, expectedErr: `cannot update "cs:~who/trusty/django-42": no channels provided`, }} func (s *StoreSuite) TestPublish(c *gc.C) { store := s.newStore(c, true) defer store.Close() for i, test := range publishTests { c.Logf("test %d: %s", i, test.about) // Remove existing entities and base entities. _, err := store.DB.Entities().RemoveAll(nil) c.Assert(err, gc.IsNil) _, err = store.DB.BaseEntities().RemoveAll(nil) c.Assert(err, gc.IsNil) // Insert the existing entity. err = store.DB.Entities().Insert(denormalizedEntity(test.initialEntity)) c.Assert(err, gc.IsNil) // Insert the existing base entity. err = store.DB.BaseEntities().Insert(test.initialBaseEntity) c.Assert(err, gc.IsNil) // Publish the entity. err = store.Publish(test.url, test.channels...) if test.expectedErr != "" { c.Assert(err, gc.ErrorMatches, test.expectedErr) continue } c.Assert(err, gc.IsNil) entity, err := store.FindEntity(test.url, nil) c.Assert(err, gc.IsNil) c.Assert(entity, jc.DeepEquals, denormalizedEntity(test.expectedEntity)) baseEntity, err := store.FindBaseEntity(&test.url.URL, nil) c.Assert(err, gc.IsNil) c.Assert(storetesting.NormalizeBaseEntity(baseEntity), jc.DeepEquals, storetesting.NormalizeBaseEntity(test.expectedBaseEntity)) } } func (s *StoreSuite) TestPublishWithFailedESInsert(c *gc.C) { // Make an elastic search with a non-existent address, // so that will try to add the charm there, but fail. esdb := &elasticsearch.Database{ Addr: "0.1.2.3:0123", } store := s.newStore(c, false) defer store.Close() store.ES = &SearchIndex{esdb, "no-index"} url := router.MustNewResolvedURL("~charmers/precise/wordpress-12", -1) err := store.AddCharmWithArchive(url, storetesting.Charms.CharmDir("wordpress")) c.Assert(err, gc.IsNil) err = store.Publish(url, params.StableChannel) c.Assert(err, gc.ErrorMatches, "cannot index cs:~charmers/precise/wordpress-12 to ElasticSearch: .*") } func entity(url, purl string) *mongodoc.Entity { id := charm.MustParseURL(url) var pid *charm.URL if purl != "" { pid = charm.MustParseURL(purl) } e := &mongodoc.Entity{ URL: id, PromulgatedURL: pid, } denormalizeEntity(e) return e } func baseEntity(url string, promulgated bool) *mongodoc.BaseEntity { id := charm.MustParseURL(url) return &mongodoc.BaseEntity{ URL: id, Name: id.Name, User: id.User, Promulgated: mongodoc.IntBool(promulgated), ChannelEntities: make(map[params.Channel]map[string]*charm.URL), } } // denormalizedEntity is a convenience function that returns // a copy of e with its denormalized fields filled out. func denormalizedEntity(e *mongodoc.Entity) *mongodoc.Entity { e1 := *e denormalizeEntity(&e1) return &e1 } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/zip.go0000664000175000017500000000252012672604603026507 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" import ( "archive/zip" "compress/flate" "io" "gopkg.in/errgo.v1" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" ) // ZipFileReader returns a reader that will read // content referred to by f within zipr, which should // refer to the contents of a zip file, func ZipFileReader(zipr io.ReadSeeker, f mongodoc.ZipFile) (io.Reader, error) { if _, err := zipr.Seek(f.Offset, 0); err != nil { return nil, errgo.Notef(err, "cannot seek to %d in zip content", f.Offset) } content := io.LimitReader(zipr, f.Size) if !f.Compressed { return content, nil } return flate.NewReader(content), nil } // NewZipFile returns a new mongodoc zip file // reference to the given zip file. func NewZipFile(f *zip.File) (mongodoc.ZipFile, error) { offset, err := f.DataOffset() if err != nil { return mongodoc.ZipFile{}, errgo.Notef(err, "cannot determine data offset for %q", f.Name) } zf := mongodoc.ZipFile{ Offset: offset, Size: int64(f.CompressedSize64), } switch f.Method { case zip.Store: case zip.Deflate: zf.Compressed = true default: return mongodoc.ZipFile{}, errgo.Newf("unknown zip compression method for %q", f.Name) } return zf, nil } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrations.go0000664000175000017500000002706012672604603030067 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" import ( "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" ) const ( migrationAddSupportedSeries mongodoc.MigrationName = "add supported series" migrationAddDevelopment mongodoc.MigrationName = "add development" migrationAddDevelopmentACLs mongodoc.MigrationName = "add development acls" migrationFixBogusPromulgatedURL mongodoc.MigrationName = "fix promulgate url" migrationAddPreV5CompatBlobBogus mongodoc.MigrationName = "add pre-v5 compatibility blobs" migrationAddPreV5CompatBlob mongodoc.MigrationName = "add pre-v5 compatibility blobs; second try" migrationNewChannelsModel mongodoc.MigrationName = "new channels model" ) // migrations holds all the migration functions that are executed in the order // they are defined when the charm store server is started. Each migration is // associated with a name that is used to check whether the migration has been // already run. To introduce a new database migration, add the corresponding // migration name and function to this list, and update the // TestMigrateMigrationList test in migration_test.go adding the new name(s). // Note that migration names must be unique across the list. // // A migration entry may have a nil migration function if the migration // is obsolete. Obsolete migrations should never be removed entirely, // otherwise the charmstore will see the old migrations in the table // and refuse to start up because it thinks that it's running an old // version of the charm store on a newer version of the database. var migrations = []migration{{ name: "entity ids denormalization", }, { name: "base entities creation", }, { name: "read acl creation", }, { name: "write acl creation", }, { name: migrationAddSupportedSeries, }, { name: migrationAddDevelopment, }, { name: migrationAddDevelopmentACLs, }, { name: migrationFixBogusPromulgatedURL, }, { // The original migration that attempted to do this actually did // nothing, so leave it here but use a new name for the // fixed version. name: migrationAddPreV5CompatBlobBogus, }, { name: migrationAddPreV5CompatBlob, migrate: addPreV5CompatBlob, }, { name: migrationNewChannelsModel, migrate: migrateToNewChannelsModel, }} // migration holds a migration function with its corresponding name. type migration struct { name mongodoc.MigrationName migrate func(StoreDatabase) error } // Migrate starts the migration process using the given database. func migrate(db StoreDatabase) error { // Retrieve already executed migrations. executed, err := getExecuted(db) if err != nil { return errgo.Mask(err) } // Explicitly create the collection in case there are no migrations // so that the tests that expect the migrations collection to exist // will pass. We ignore the error because we'll get one if the // collection already exists and there's no special type or value // for that (and if it's a genuine error, we'll catch the problem later // anyway). db.Migrations().Create(&mgo.CollectionInfo{}) // Execute required migrations. for _, m := range migrations { if executed[m.name] || m.migrate == nil { logger.Debugf("skipping already executed migration: %s", m.name) continue } logger.Infof("starting migration: %s", m.name) if err := m.migrate(db); err != nil { return errgo.Notef(err, "error executing migration: %s", m.name) } if err := setExecuted(db, m.name); err != nil { return errgo.Mask(err) } logger.Infof("migration completed: %s", m.name) } return nil } func getExecuted(db StoreDatabase) (map[mongodoc.MigrationName]bool, error) { // Retrieve the already executed migration names. executed := make(map[mongodoc.MigrationName]bool) var doc mongodoc.Migration if err := db.Migrations().Find(nil).Select(bson.D{{"executed", 1}}).One(&doc); err != nil { if err == mgo.ErrNotFound { return executed, nil } return nil, errgo.Notef(err, "cannot retrieve executed migrations") } names := make(map[mongodoc.MigrationName]bool, len(migrations)) for _, m := range migrations { names[m.name] = true } for _, name := range doc.Executed { name := mongodoc.MigrationName(name) // Check that the already executed migrations are known. if !names[name] { return nil, errgo.Newf("found unknown migration %q; running old charm store code on newer charm store database?", name) } // Collect the name of the executed migration. executed[name] = true } return executed, nil } func addPreV5CompatBlob(db StoreDatabase) error { blobStore := blobstore.New(db.Database, "entitystore") entities := db.Entities() iter := entities.Find(nil).Select(map[string]int{ "size": 1, "blobhash": 1, "blobname": 1, "blobhash256": 1, "charmmeta.series": 1, }).Iter() var entity mongodoc.Entity for iter.Next(&entity) { var info *preV5CompatibilityHackBlobInfo if entity.CharmMeta == nil || len(entity.CharmMeta.Series) == 0 { info = &preV5CompatibilityHackBlobInfo{ hash: entity.BlobHash, hash256: entity.BlobHash256, size: entity.Size, } } else { r, _, err := blobStore.Open(entity.BlobName) if err != nil { return errgo.Notef(err, "cannot open original blob") } info, err = addPreV5CompatibilityHackBlob(blobStore, r, entity.BlobName, entity.Size) r.Close() if err != nil { return errgo.Mask(err) } } err := entities.UpdateId(entity.URL, bson.D{{ "$set", bson.D{{ "prev5blobhash", info.hash, }, { "prev5blobhash256", info.hash256, }, { "prev5blobsize", info.size, }}, }}) if err != nil { return errgo.Notef(err, "cannot update pre-v5 info") } } if err := iter.Err(); err != nil { return errgo.Notef(err, "cannot iterate through entities") } return nil } func migrateToNewChannelsModel(db StoreDatabase) error { if err := ncmUpdateDevelopmentAndStable(db); err != nil { return errgo.Mask(err) } if err := ncmUpdateBaseEntities(db); err != nil { return errgo.Mask(err) } return nil } // ncmUpdateDevelopmentAndStable updates the Development and Stable // entity fields to conform to the new channels model. // All entities are treated as if they're in development; entities // without the development field set are treated as stable. func ncmUpdateDevelopmentAndStable(db StoreDatabase) error { entities := db.Entities() iter := entities.Find(bson.D{{ "stable", bson.D{{"$exists", false}}, }}).Select(map[string]int{ "_id": 1, "development": 1, }).Iter() // For every entity without a stable field, update // its development and stable fields appropriately. var entity mongodoc.Entity for iter.Next(&entity) { err := entities.UpdateId(entity.URL, bson.D{{ "$set", bson.D{ {"development", true}, {"stable", !entity.Development}, }, }}) if err != nil { return errgo.Notef(err, "cannot update entity") } } if err := iter.Err(); err != nil { return errgo.Notef(err, "cannot iterate through entities") } return nil } // preNCMBaseEntity holds the type of a base entity just before // the new channels model migration. type preNCMBaseEntity struct { // URL holds the reference URL of of charm on bundle // regardless of its revision, series or promulgation status // (this omits the revision and series from URL). // e.g., cs:~user/collection/foo URL *charm.URL `bson:"_id"` // User holds the user part of the entity URL (for instance, "joe"). User string // Name holds the name of the entity (for instance "wordpress"). Name string // Public specifies whether the charm or bundle // is available to all users. If this is true, the ACLs will // be ignored when reading a charm. Public bool // ACLs holds permission information relevant to the base entity. // The permissions apply to all revisions. ACLs mongodoc.ACL // DevelopmentACLs is similar to ACLs but applies to all development // revisions. DevelopmentACLs mongodoc.ACL // Promulgated specifies whether the charm or bundle should be // promulgated. Promulgated mongodoc.IntBool // CommonInfo holds arbitrary common extra metadata associated with // the base entity. Thhose data apply to all revisions. // The byte slices hold JSON-encoded data. CommonInfo map[string][]byte `bson:",omitempty" json:",omitempty"` } // ncmUpdateBaseEntities updates all the base entities to conform to // the new channels model. It assumes that ncmUpdateDevelopmentAndStable // has been run already. func ncmUpdateBaseEntities(db StoreDatabase) error { baseEntities := db.BaseEntities() iter := baseEntities.Find(bson.D{{ "channelentities", bson.D{{"$exists", false}}, }}).Iter() // For every base entity without a ChannelEntities field, update // its ChannelEntities and and ChannelACLs field appropriately. var baseEntity preNCMBaseEntity for iter.Next(&baseEntity) { if err := ncmUpdateBaseEntity(db, &baseEntity); err != nil { return errgo.Mask(err) } } if err := iter.Err(); err != nil { return errgo.Notef(err, "cannot iterate through base entities") } return nil } // ncmUpdateBaseEntity updates a single base entity to conform to // the new channels model. func ncmUpdateBaseEntity(db StoreDatabase, baseEntity *preNCMBaseEntity) error { channelEntities := make(map[params.Channel]map[string]*charm.URL) updateChannelURL := func(url *charm.URL, ch params.Channel, series string) { if channelEntities[ch] == nil { channelEntities[ch] = make(map[string]*charm.URL) } if oldURL := channelEntities[ch][series]; oldURL == nil || oldURL.Revision < url.Revision { channelEntities[ch][series] = url } } // updateChannelEntity updates the series entries in channelEntities // for the given entity, setting the entity URL entry if the revision // is greater than any already found. updateChannelEntity := func(entity *mongodoc.Entity, ch params.Channel) { if entity.URL.Series == "" { for _, series := range entity.SupportedSeries { updateChannelURL(entity.URL, ch, series) } } else { updateChannelURL(entity.URL, ch, entity.URL.Series) } } // Iterate through all the entities associated with the base entity // to find the most recent "published" entities so that we can // populate the ChannelEntities field. var entity mongodoc.Entity iter := db.Entities().Find(bson.D{{"baseurl", baseEntity.URL}}).Iter() for iter.Next(&entity) { if entity.Development { updateChannelEntity(&entity, params.DevelopmentChannel) } if entity.Stable { updateChannelEntity(&entity, params.StableChannel) } } if err := iter.Err(); err != nil { return errgo.Notef(err, "cannot iterate through entities") } err := db.BaseEntities().UpdateId(baseEntity.URL, bson.D{{ "$set", bson.D{{ "channelentities", channelEntities, }, { "channelacls", map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: baseEntity.DevelopmentACLs, params.DevelopmentChannel: baseEntity.DevelopmentACLs, params.StableChannel: baseEntity.ACLs, }, }}, }, { "$unset", bson.D{{ "developmentacls", nil, }, { "acls", nil, }}, }}) if err != nil { return errgo.Notef(err, "cannot update base entity") } return nil } func setExecuted(db StoreDatabase, name mongodoc.MigrationName) error { if _, err := db.Migrations().Upsert(nil, bson.D{{ "$addToSet", bson.D{{"executed", name}}, }}); err != nil { return errgo.Notef(err, "cannot add %s to executed migrations", name) } return nil } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/stats.go0000664000175000017500000004667112672604603027062 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" import ( "encoding/json" "fmt" "sort" "strconv" "strings" "sync" "time" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/juju/charmstore.v5-unstable/internal/router" ) type stats struct { // Cache for statistics key words (two generations). cacheMu sync.RWMutex statsIdNew map[string]int statsIdOld map[string]int statsTokenNew map[int]string statsTokenOld map[int]string } // Note that changing the StatsGranularity constant // will not change the stats time granularity - it // is defined for external code clarity. // StatsGranularity holds the time granularity of statistics // gathering. IncCounter(Async) calls within this duration // may be aggregated. const StatsGranularity = time.Minute // The stats mechanism uses the following MongoDB collections: // // juju.stat.counters - Counters for statistics // juju.stat.tokens - Tokens used in statistics counter keys func (s StoreDatabase) StatCounters() *mgo.Collection { return s.C("juju.stat.counters") } func (s StoreDatabase) StatTokens() *mgo.Collection { return s.C("juju.stat.tokens") } // key returns the compound statistics identifier that represents key. // If write is true, the identifier will be created if necessary. // Identifiers have a form similar to "ab:c:def:", where each section is a // base-32 number that represents the respective word in key. This form // allows efficiently indexing and searching for prefixes, while detaching // the key content and size from the actual words used in key. func (s *stats) key(db StoreDatabase, key []string, write bool) (string, error) { if len(key) == 0 { return "", errgo.New("store: empty statistics key") } tokens := db.StatTokens() skey := make([]byte, 0, len(key)*4) // Retry limit is mainly to prevent infinite recursion in edge cases, // such as if the database is ever run in read-only mode. // The logic below should deteministically stop in normal scenarios. var err error for i, retry := 0, 30; i < len(key) && retry > 0; retry-- { err = nil id, found := s.tokenId(key[i]) if !found { var t tokenId err = tokens.Find(bson.D{{"t", key[i]}}).One(&t) if err == mgo.ErrNotFound { if !write { return "", errgo.WithCausef(nil, params.ErrNotFound, "") } t.Id, err = tokens.Count() if err != nil { continue } t.Id++ t.Token = key[i] err = tokens.Insert(&t) } if err != nil { continue } s.cacheTokenId(t.Token, t.Id) id = t.Id } skey = strconv.AppendInt(skey, int64(id), 32) skey = append(skey, ':') i++ } if err != nil { return "", err } return string(skey), nil } const statsTokenCacheSize = 1024 type tokenId struct { Id int `bson:"_id"` Token string `bson:"t"` } // cacheTokenId adds the id for token into the cache. // The cache has two generations so that the least frequently used // tokens are evicted regularly. func (s *stats) cacheTokenId(token string, id int) { s.cacheMu.Lock() defer s.cacheMu.Unlock() // Can't possibly be >, but reviews want it for defensiveness. if len(s.statsIdNew) >= statsTokenCacheSize { s.statsIdOld = s.statsIdNew s.statsIdNew = nil s.statsTokenOld = s.statsTokenNew s.statsTokenNew = nil } if s.statsIdNew == nil { s.statsIdNew = make(map[string]int, statsTokenCacheSize) s.statsTokenNew = make(map[int]string, statsTokenCacheSize) } s.statsIdNew[token] = id s.statsTokenNew[id] = token } // tokenId returns the id for token from the cache, if found. func (s *stats) tokenId(token string) (id int, found bool) { s.cacheMu.RLock() id, found = s.statsIdNew[token] if found { s.cacheMu.RUnlock() return } id, found = s.statsIdOld[token] s.cacheMu.RUnlock() if found { s.cacheTokenId(token, id) } return } // idToken returns the token for id from the cache, if found. func (s *stats) idToken(id int) (token string, found bool) { s.cacheMu.RLock() token, found = s.statsTokenNew[id] if found { s.cacheMu.RUnlock() return } token, found = s.statsTokenOld[id] s.cacheMu.RUnlock() if found { s.cacheTokenId(token, id) } return } var counterEpoch = time.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Unix() func timeToStamp(t time.Time) int32 { return int32(t.Unix() - counterEpoch) } // IncCounterAsync increases by one the counter associated with the composed // key. The action is done in the background using a separate goroutine. func (s *Store) IncCounterAsync(key []string) { s.Go(func(s *Store) { if err := s.IncCounter(key); err != nil { logger.Errorf("cannot increase stats counter for key %v: %v", key, err) } }) } // IncCounter increases by one the counter associated with the composed key. func (s *Store) IncCounter(key []string) error { return s.IncCounterAtTime(key, time.Now()) } // IncCounterAtTime increases by one the counter associated with the composed // key, associating it with the given time. func (s *Store) IncCounterAtTime(key []string, t time.Time) error { skey, err := s.stats.key(s.DB, key, true) if err != nil { return err } // Round to the start of the minute so we get one document per minute at most. t = t.UTC().Add(-time.Duration(t.Second()) * time.Second) counters := s.DB.StatCounters() _, err = counters.Upsert(bson.D{{"k", skey}, {"t", timeToStamp(t)}}, bson.D{{"$inc", bson.D{{"c", 1}}}}) return err } // CounterRequest represents a request to aggregate counter values. type CounterRequest struct { // Key and Prefix determine the counter keys to match. // If Prefix is false, Key must match exactly. Otherwise, counters // must begin with Key and have at least one more key token. Key []string Prefix bool // If List is true, matching counters are aggregated under their // prefixes instead of being returned as a single overall sum. // // For example, given the following counts: // // {"a", "b"}: 1, // {"a", "c"}: 3 // {"a", "c", "d"}: 5 // {"a", "c", "e"}: 7 // // and assuming that Prefix is true, the following keys will // present the respective results if List is true: // // {"a"} => {{"a", "b"}, 1, false}, // {{"a", "c"}, 3, false}, // {{"a", "c"}, 12, true} // {"a", "c"} => {{"a", "c", "d"}, 3, false}, // {{"a", "c", "e"}, 5, false} // // If List is false, the same key prefixes will present: // // {"a"} => {{"a"}, 16, true} // {"a", "c"} => {{"a", "c"}, 12, false} // List bool // By defines the period covered by each aggregated data point. // If unspecified, it defaults to ByAll, which aggregates all // matching data points in a single entry. By CounterRequestBy // Start, if provided, changes the query so that only data points // ocurring at the given time or afterwards are considered. Start time.Time // Stop, if provided, changes the query so that only data points // ocurring at the given time or before are considered. Stop time.Time } type CounterRequestBy int const ( ByAll CounterRequestBy = iota ByDay ByWeek ) type Counter struct { Key []string Prefix bool Count int64 Time time.Time } // Counters aggregates and returns counter values according to the provided request. func (s *Store) Counters(req *CounterRequest) ([]Counter, error) { tokensColl := s.DB.StatTokens() countersColl := s.DB.StatCounters() searchKey, err := s.stats.key(s.DB, req.Key, false) if errgo.Cause(err) == params.ErrNotFound { if !req.List { return []Counter{{ Key: req.Key, Prefix: req.Prefix, Count: 0, }}, nil } return nil, nil } if err != nil { return nil, errgo.Mask(err) } var regex string if req.Prefix { regex = "^" + searchKey + ".+" } else { regex = "^" + searchKey + "$" } // This reduce function simply sums, for each emitted key, all the values found under it. job := mgo.MapReduce{Reduce: "function(key, values) { return Array.sum(values); }"} var emit string switch req.By { case ByDay: emit = "emit(k+'@'+NumberInt(this.t/86400), this.c);" case ByWeek: emit = "emit(k+'@'+NumberInt(this.t/604800), this.c);" default: emit = "emit(k, this.c);" } if req.List && req.Prefix { // For a search key "a:b:" matching a key "a:b:c:d:e:", this map function emits "a:b:c:*". // For a search key "a:b:" matching a key "a:b:c:", it emits "a:b:c:". // For a search key "a:b:" matching a key "a:b:", it emits "a:b:". job.Scope = bson.D{{"searchKeyLen", len(searchKey)}} job.Map = fmt.Sprintf(` function() { var k = this.k; var i = k.indexOf(':', searchKeyLen)+1; if (k.length > i) { k = k.substr(0, i)+'*'; } %s }`, emit) } else { // For a search key "a:b:" matching a key "a:b:c:d:e:", this map function emits "a:b:*". // For a search key "a:b:" matching a key "a:b:c:", it also emits "a:b:*". // For a search key "a:b:" matching a key "a:b:", it emits "a:b:". emitKey := searchKey if req.Prefix { emitKey += "*" } job.Scope = bson.D{{"emitKey", emitKey}} job.Map = fmt.Sprintf(` function() { var k = emitKey; %s }`, emit) } var result []struct { Key string `bson:"_id"` Value int64 } var query, tquery bson.D if !req.Start.IsZero() { tquery = append(tquery, bson.DocElem{ Name: "$gte", Value: timeToStamp(req.Start), }) } if !req.Stop.IsZero() { tquery = append(tquery, bson.DocElem{ Name: "$lte", Value: timeToStamp(req.Stop), }) } if len(tquery) == 0 { query = bson.D{{"k", bson.D{{"$regex", regex}}}} } else { query = bson.D{{"k", bson.D{{"$regex", regex}}}, {"t", tquery}} } _, err = countersColl.Find(query).MapReduce(&job, &result) if err != nil { return nil, err } var counters []Counter for i := range result { key := result[i].Key when := time.Time{} if req.By != ByAll { var stamp int64 if at := strings.Index(key, "@"); at != -1 && len(key) > at+1 { stamp, _ = strconv.ParseInt(key[at+1:], 10, 32) key = key[:at] } if stamp == 0 { return nil, errgo.Newf("internal error: bad aggregated key: %q", result[i].Key) } switch req.By { case ByDay: stamp = stamp * 86400 case ByWeek: // The +1 puts it at the end of the period. stamp = (stamp + 1) * 604800 } when = time.Unix(counterEpoch+stamp, 0).In(time.UTC) } ids := strings.Split(key, ":") tokens := make([]string, 0, len(ids)) for i := 0; i < len(ids)-1; i++ { if ids[i] == "*" { continue } id, err := strconv.ParseInt(ids[i], 32, 32) if err != nil { return nil, errgo.Newf("store: invalid id: %q", ids[i]) } token, found := s.stats.idToken(int(id)) if !found { var t tokenId err = tokensColl.FindId(id).One(&t) if err == mgo.ErrNotFound { return nil, errgo.Newf("store: internal error; token id not found: %d", id) } s.stats.cacheTokenId(t.Token, t.Id) token = t.Token } tokens = append(tokens, token) } counter := Counter{ Key: tokens, Prefix: len(ids) > 0 && ids[len(ids)-1] == "*", Count: result[i].Value, Time: when, } counters = append(counters, counter) } if !req.List && len(counters) == 0 { counters = []Counter{{Key: req.Key, Prefix: req.Prefix, Count: 0}} } else if len(counters) > 1 { sort.Sort(sortableCounters(counters)) } return counters, nil } type sortableCounters []Counter func (s sortableCounters) Len() int { return len(s) } func (s sortableCounters) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s sortableCounters) Less(i, j int) bool { // Earlier times first. if !s[i].Time.Equal(s[j].Time) { return s[i].Time.Before(s[j].Time) } // Then larger counts first. if s[i].Count != s[j].Count { return s[j].Count < s[i].Count } // Then smaller/shorter keys first. ki := s[i].Key kj := s[j].Key for n := range ki { if n >= len(kj) { return false } if ki[n] != kj[n] { return ki[n] < kj[n] } } if len(ki) < len(kj) { return true } // Then full keys first. return !s[i].Prefix && s[j].Prefix } // EntityStatsKey returns a stats key for the given charm or bundle // reference and the given kind. // Entity stats keys are generated using the following schema: // kind:series:name:user:revision // where user can be empty (for promulgated charms/bundles) and revision is // optional (e.g. when uploading an entity the revision is not specified). // For instance, entities' stats can then be retrieved like the following: // - kind:utopic:* -> all charms of a specific series; // - kind:trusty:django:* -> all revisions and user variations of a charm; // - kind:trusty:django::* -> all revisions of a promulgated charm; // - kind:trusty:django::42 -> a specific promulgated charm; // - kind:trusty:django:who:* -> all revisions of a user owned charm; // - kind:trusty:django:who:42 -> a specific user owned charm; // The above also applies to bundles (where the series is "bundle"). func EntityStatsKey(url *charm.URL, kind string) []string { key := []string{kind, url.Series, url.Name, url.User} if url.Revision != -1 { key = append(key, strconv.Itoa(url.Revision)) } return key } // AggregatedCounts contains counts for a statistic aggregated over the // lastDay, lastWeek, lastMonth and all time. type AggregatedCounts struct { LastDay, LastWeek, LastMonth, Total int64 } // LegacyDownloadCountsEnabled represents whether aggregated download counts // must be retrieved from the legacy infrastructure. In essence, if the value // is true (enabled), aggregated counts are not calculated based on the data // stored in the charm store stats; they are instead retrieved from the entity // extra-info. For this reason, enabling this we assume an external program // updated the extra-info for the entity, specifically the // "legacy-download-stats" key. // TODO (frankban): this is a temporary hack, and can be removed once we have // a more consistent way to import the download counts from the legacy charm // store (charms) and from charmworld (bundles). To remove the legacy download // counts logic in the future, grep the code for "LegacyDownloadCountsEnabled" // and remove as required. var LegacyDownloadCountsEnabled = true // ArchiveDownloadCounts calculates the aggregated download counts for // a charm or bundle. func (s *Store) ArchiveDownloadCounts(id *charm.URL, refresh bool) (thisRevision, allRevisions AggregatedCounts, err error) { // Retrieve the aggregated stats. fetchId := *id fetch := func() (interface{}, error) { return s.statsCacheFetch(&fetchId) } var v interface{} if refresh { s.pool.statsCache.Evict(fetchId.String()) } v, err = s.pool.statsCache.Get(fetchId.String(), fetch) if err != nil { return AggregatedCounts{}, AggregatedCounts{}, errgo.Mask(err) } thisRevision = v.(AggregatedCounts) fetchId.Revision = -1 if refresh { s.pool.statsCache.Evict(fetchId.String()) } v, err = s.pool.statsCache.Get(fetchId.String(), fetch) if err != nil { return AggregatedCounts{}, AggregatedCounts{}, errgo.Mask(err) } allRevisions = v.(AggregatedCounts) return } func (s *Store) statsCacheFetch(id *charm.URL) (interface{}, error) { prefix := id.Revision == -1 kind := params.StatsArchiveDownload if id.User == "" { kind = params.StatsArchiveDownloadPromulgated } counts, err := s.aggregateStats(EntityStatsKey(id, kind), prefix) if err != nil { return nil, errgo.Notef(err, "cannot get aggregated count for %q", id) } if !LegacyDownloadCountsEnabled { return counts, nil } // TODO (frankban): remove this code when removing the legacy counts logic. legacy, err := s.legacyDownloadCounts(id) if err != nil { return nil, err } counts.LastDay += legacy.LastDay counts.LastWeek += legacy.LastWeek counts.LastMonth += legacy.LastMonth counts.Total += legacy.Total return counts, nil } // legacyDownloadCounts retrieves the aggregated stats from the entity // extra-info. This is used when LegacyDownloadCountsEnabled is true. // TODO (frankban): remove this method when removing the legacy counts logic. func (s *Store) legacyDownloadCounts(id *charm.URL) (AggregatedCounts, error) { counts := AggregatedCounts{} entities, err := s.FindEntities(id, FieldSelector("extrainfo")) if err != nil { return counts, errgo.Mask(err, errgo.Is(params.ErrNotFound)) } if len(entities) == 0 { return counts, errgo.WithCausef(nil, params.ErrNotFound, "entity not found") } entity := entities[0] data, ok := entity.ExtraInfo[params.LegacyDownloadStats] if ok { if err := json.Unmarshal(data, &counts.Total); err != nil { return counts, errgo.Notef(err, "cannot unmarshal extra-info value") } } return counts, nil } // aggregatedStats returns the aggregated downloads counts for the given stats // key. func (s *Store) aggregateStats(key []string, prefix bool) (AggregatedCounts, error) { var counts AggregatedCounts req := CounterRequest{ Key: key, By: ByDay, Prefix: prefix, } results, err := s.Counters(&req) if err != nil { return counts, errgo.Notef(err, "cannot retrieve stats") } today := time.Now() lastDay := today.AddDate(0, 0, -1) lastWeek := today.AddDate(0, 0, -7) lastMonth := today.AddDate(0, -1, 0) // Aggregate the results. for _, result := range results { if result.Time.After(lastMonth) { counts.LastMonth += result.Count if result.Time.After(lastWeek) { counts.LastWeek += result.Count if result.Time.After(lastDay) { counts.LastDay += result.Count } } } counts.Total += result.Count } return counts, nil } // IncrementDownloadCountsAsync updates the download statistics for entity id in both // the statistics database and the search database. The action is done in the // background using a separate goroutine. func (s *Store) IncrementDownloadCountsAsync(id *router.ResolvedURL) { s.Go(func(s *Store) { if err := s.IncrementDownloadCounts(id); err != nil { logger.Errorf("cannot increase download counter for %v: %s", id, err) } }) } // IncrementDownloadCounts updates the download statistics for entity id in both // the statistics database and the search database. func (s *Store) IncrementDownloadCounts(id *router.ResolvedURL) error { return s.IncrementDownloadCountsAtTime(id, time.Now()) } // IncrementDownloadCountsAtTime updates the download statistics for entity id in both // the statistics database and the search database, associating it with the given time. func (s *Store) IncrementDownloadCountsAtTime(id *router.ResolvedURL, t time.Time) error { key := EntityStatsKey(&id.URL, params.StatsArchiveDownload) if err := s.IncCounterAtTime(key, t); err != nil { return errgo.Notef(err, "cannot increase stats counter for %v", key) } if id.PromulgatedRevision == -1 { // Check that the id really is for an unpromulgated entity. // This unfortunately adds an extra round trip to the database, // but as incrementing statistics is performed asynchronously // it will not be in the critical path. entity, err := s.FindEntity(id, FieldSelector("promulgated-revision")) if err != nil { return errgo.Notef(err, "cannot find entity %v", &id.URL) } id.PromulgatedRevision = entity.PromulgatedRevision } if id.PromulgatedRevision != -1 { key := EntityStatsKey(id.PromulgatedURL(), params.StatsArchiveDownloadPromulgated) if err := s.IncCounterAtTime(key, t); err != nil { return errgo.Notef(err, "cannot increase stats counter for %v", key) } } // TODO(mhilton) when this charmstore is being used by juju, find a more // efficient way to update the download statistics for search. if err := s.UpdateSearch(id); err != nil { return errgo.Notef(err, "cannot update search record for %v", id) } return nil } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/package_test.go0000664000175000017500000000050212672604603030335 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmstore_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" import ( "testing" jujutesting "github.com/juju/testing" ) func TestPackage(t *testing.T) { jujutesting.MgoTestPackage(t, nil) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/addentity_test.go0000664000175000017500000005145412672604603030743 0ustar marcomarco// Copyright 2016 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" import ( "archive/zip" "bytes" "fmt" "io" "io/ioutil" "regexp" "sort" "time" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" "gopkg.in/juju/charmstore.v5-unstable/internal/router" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" ) type AddEntitySuite struct { commonSuite } var _ = gc.Suite(&AddEntitySuite{}) func (s *AddEntitySuite) TestAddCharmWithUser(c *gc.C) { store := s.newStore(c, false) defer store.Close() wordpress := storetesting.Charms.CharmDir("wordpress") url := router.MustNewResolvedURL("cs:~who/precise/wordpress-23", -1) err := store.AddCharmWithArchive(url, wordpress) c.Assert(err, gc.IsNil) assertBaseEntity(c, store, mongodoc.BaseURL(&url.URL), false) } func (s *AddEntitySuite) TestAddPromulgatedCharmDir(c *gc.C) { charmDir := storetesting.Charms.CharmDir("wordpress") s.checkAddCharm(c, charmDir, router.MustNewResolvedURL("~charmers/precise/wordpress-1", 1)) } func (s *AddEntitySuite) TestAddPromulgatedCharmArchive(c *gc.C) { charmArchive := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") s.checkAddCharm(c, charmArchive, router.MustNewResolvedURL("~charmers/precise/wordpress-1", 1)) } func (s *AddEntitySuite) TestAddUserOwnedCharmDir(c *gc.C) { charmDir := storetesting.Charms.CharmDir("wordpress") s.checkAddCharm(c, charmDir, router.MustNewResolvedURL("~charmers/precise/wordpress-1", -1)) } func (s *AddEntitySuite) TestAddUserOwnedCharmArchive(c *gc.C) { charmArchive := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") s.checkAddCharm(c, charmArchive, router.MustNewResolvedURL("~charmers/precise/wordpress-1", -1)) } func (s *AddEntitySuite) TestAddBundleDir(c *gc.C) { bundleDir := storetesting.Charms.BundleDir("wordpress-simple") s.checkAddBundle(c, bundleDir, router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-2", 3)) } func (s *AddEntitySuite) TestAddBundleArchive(c *gc.C) { bundleArchive, err := charm.ReadBundleArchive( storetesting.Charms.BundleArchivePath(c.MkDir(), "wordpress-simple"), ) s.addRequiredCharms(c, bundleArchive) c.Assert(err, gc.IsNil) s.checkAddBundle(c, bundleArchive, router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-2", 3)) } func (s *AddEntitySuite) TestAddUserOwnedBundleDir(c *gc.C) { bundleDir := storetesting.Charms.BundleDir("wordpress-simple") s.checkAddBundle(c, bundleDir, router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-1", -1)) } func (s *AddEntitySuite) TestAddUserOwnedBundleArchive(c *gc.C) { bundleArchive, err := charm.ReadBundleArchive( storetesting.Charms.BundleArchivePath(c.MkDir(), "wordpress-simple"), ) c.Assert(err, gc.IsNil) s.checkAddBundle(c, bundleArchive, router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-1", -1)) } func (s *AddEntitySuite) TestAddCharmWithBundleSeries(c *gc.C) { store := s.newStore(c, false) defer store.Close() ch := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") err := store.AddCharmWithArchive(router.MustNewResolvedURL("~charmers/bundle/wordpress-2", -1), ch) c.Assert(err, gc.ErrorMatches, `cannot read bundle archive: archive file "bundle.yaml" not found`) } func (s *AddEntitySuite) TestAddCharmWithMultiSeries(c *gc.C) { store := s.newStore(c, false) defer store.Close() ch := storetesting.Charms.CharmArchive(c.MkDir(), "multi-series") s.checkAddCharm(c, ch, router.MustNewResolvedURL("~charmers/multi-series-1", 1)) // Make sure it can be accessed with a number of names e, err := store.FindBestEntity(charm.MustParseURL("~charmers/multi-series-1"), params.UnpublishedChannel, nil) c.Assert(err, gc.IsNil) c.Assert(e.URL.String(), gc.Equals, "cs:~charmers/multi-series-1") e, err = store.FindBestEntity(charm.MustParseURL("~charmers/trusty/multi-series-1"), params.UnpublishedChannel, nil) c.Assert(err, gc.IsNil) c.Assert(e.URL.String(), gc.Equals, "cs:~charmers/multi-series-1") e, err = store.FindBestEntity(charm.MustParseURL("~charmers/wily/multi-series-1"), params.UnpublishedChannel, nil) c.Assert(err, gc.IsNil) c.Assert(e.URL.String(), gc.Equals, "cs:~charmers/multi-series-1") _, err = store.FindBestEntity(charm.MustParseURL("~charmers/precise/multi-series-1"), params.UnpublishedChannel, nil) c.Assert(err, gc.ErrorMatches, "no matching charm or bundle for cs:~charmers/precise/multi-series-1") c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) } func (s *AddEntitySuite) TestAddCharmWithSeriesWhenThereIsAnExistingMultiSeriesVersion(c *gc.C) { store := s.newStore(c, false) defer store.Close() ch := storetesting.Charms.CharmArchive(c.MkDir(), "multi-series") err := store.AddCharmWithArchive(router.MustNewResolvedURL("~charmers/multi-series-1", -1), ch) c.Assert(err, gc.IsNil) ch = storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") err = store.AddCharmWithArchive(router.MustNewResolvedURL("~charmers/trusty/multi-series-2", -1), ch) c.Assert(err, gc.ErrorMatches, `charm name duplicates multi-series charm name cs:~charmers/multi-series-1`) } func (s *AddEntitySuite) TestAddCharmWithMultiSeriesToES(c *gc.C) { store := s.newStore(c, true) defer store.Close() ch := storetesting.Charms.CharmArchive(c.MkDir(), "multi-series") s.checkAddCharm(c, ch, router.MustNewResolvedURL("~charmers/juju-gui-1", 1)) } func (s *AddEntitySuite) TestAddBundleDuplicatingCharm(c *gc.C) { store := s.newStore(c, false) defer store.Close() ch := storetesting.Charms.CharmDir("wordpress") err := store.AddCharmWithArchive(router.MustNewResolvedURL("~tester/precise/wordpress-2", -1), ch) c.Assert(err, gc.IsNil) b := storetesting.Charms.BundleDir("wordpress-simple") s.addRequiredCharms(c, b) err = store.AddBundleWithArchive(router.MustNewResolvedURL("~tester/bundle/wordpress-5", -1), b) c.Assert(err, gc.ErrorMatches, "bundle name duplicates charm name cs:~tester/precise/wordpress-2") } func (s *AddEntitySuite) TestAddCharmDuplicatingBundle(c *gc.C) { store := s.newStore(c, false) defer store.Close() b := storetesting.Charms.BundleDir("wordpress-simple") s.addRequiredCharms(c, b) err := store.AddBundleWithArchive(router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-2", -1), b) c.Assert(err, gc.IsNil) ch := storetesting.Charms.CharmDir("wordpress") err = store.AddCharmWithArchive(router.MustNewResolvedURL("~charmers/precise/wordpress-simple-5", -1), ch) c.Assert(err, gc.ErrorMatches, "charm name duplicates bundle name cs:~charmers/bundle/wordpress-simple-2") } var uploadEntityErrorsTests = []struct { about string url string upload ArchiverTo blobHash string blobSize int64 expectError string expectCause error }{{ about: "revision not specified", url: "~charmers/precise/wordpress", upload: storetesting.NewCharm(nil), expectError: "entity id does not specify revision", expectCause: params.ErrEntityIdNotAllowed, }, { about: "user not specified", url: "precise/wordpress-23", upload: storetesting.NewCharm(nil), expectError: "entity id does not specify user", expectCause: params.ErrEntityIdNotAllowed, }, { about: "hash mismatch", url: "~charmers/precise/wordpress-0", upload: storetesting.NewCharm(nil), blobHash: "blahblah", expectError: "cannot put archive blob: hash mismatch", // It would be nice if this was: // expectCause: params.ErrInvalidEntity, }, { about: "size mismatch", url: "~charmers/precise/wordpress-0", upload: storetesting.NewCharm(nil), blobSize: 99999, expectError: "cannot read charm archive: seek past end of file", // It would be nice if the above error was better and // the cause was: // expectCause: params.ErrInvalidEntity, }, { about: "charm uploaded to bundle URL", url: "~charmers/bundle/foo-0", upload: storetesting.NewCharm(nil), expectError: `cannot read bundle archive: archive file "bundle.yaml" not found`, // It would be nice if this was: // expectCause: params.ErrInvalidEntity, }, { about: "bundle uploaded to charm URL", url: "~charmers/precise/foo-0", upload: storetesting.NewBundle(&charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "foo": { Charm: "foo", }, }, }), expectError: `cannot read charm archive: archive file "metadata.yaml" not found`, // It would be nice if this was: // expectCause: params.ErrInvalidEntity, }, { about: "banned relation name", url: "~charmers/precise/foo-0", upload: storetesting.NewCharm(storetesting.RelationMeta("requires relation-name foo")), expectError: `relation relation-name has almost certainly not been changed from the template`, expectCause: params.ErrInvalidEntity, }, { about: "banned interface name", url: "~charmers/precise/foo-0", upload: storetesting.NewCharm(storetesting.RelationMeta("requires foo interface-name")), expectError: `interface interface-name in relation foo has almost certainly not been changed from the template`, expectCause: params.ErrInvalidEntity, }, { about: "unrecognized series", url: "~charmers/precise/foo-0", upload: storetesting.NewCharm(storetesting.MetaWithSupportedSeries(nil, "badseries")), expectError: `unrecognized series "badseries" in metadata`, expectCause: params.ErrInvalidEntity, }, { about: "inconsistent series", url: "~charmers/trusty/foo-0", upload: storetesting.NewCharm(storetesting.MetaWithSupportedSeries(nil, "trusty", "win10")), expectError: `cannot mix series from ubuntu and windows in single charm`, expectCause: params.ErrInvalidEntity, }, { about: "series not specified", url: "~charmers/foo-0", upload: storetesting.NewCharm(nil), expectError: `series not specified in url or charm metadata`, expectCause: params.ErrEntityIdNotAllowed, }, { about: "series not allowed by metadata", url: "~charmers/precise/foo-0", upload: storetesting.NewCharm(storetesting.MetaWithSupportedSeries(nil, "trusty")), expectError: `"precise" series not listed in charm metadata`, expectCause: params.ErrEntityIdNotAllowed, }, { about: "bundle refers to non-existent charm", url: "~charmers/bundle/foo-0", upload: storetesting.NewBundle(&charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "foo": { Charm: "bad-charm", }, }, }), expectError: regexp.QuoteMeta(`bundle verification failed: ["service \"foo\" refers to non-existent charm \"bad-charm\""]`), expectCause: params.ErrInvalidEntity, }, { about: "bundle verification fails", url: "~charmers/bundle/foo-0", upload: storetesting.NewBundle(&charm.BundleData{}), expectError: regexp.QuoteMeta(`bundle verification failed: ["at least one service must be specified"]`), expectCause: params.ErrInvalidEntity, }, { about: "invalid zip format", url: "~charmers/foo-0", upload: zipWithInvalidFormat(), expectError: `cannot read charm archive: zip: not a valid zip file`, expectCause: params.ErrInvalidEntity, }, { about: "invalid zip algorithm", url: "~charmers/foo-0", upload: zipWithInvalidAlgorithm(), expectError: `cannot read charm archive: zip: unsupported compression algorithm`, expectCause: params.ErrInvalidEntity, }, { about: "invalid zip checksum", url: "~charmers/foo-0", upload: zipWithInvalidChecksum(), expectError: `cannot read charm archive: zip: checksum error`, expectCause: params.ErrInvalidEntity, }} func (s *AddEntitySuite) TestUploadEntityErrors(c *gc.C) { store := s.newStore(c, true) defer store.Close() for i, test := range uploadEntityErrorsTests { c.Logf("test %d: %s", i, test.about) var buf bytes.Buffer err := test.upload.ArchiveTo(&buf) c.Assert(err, gc.IsNil) if test.blobHash == "" { h := blobstore.NewHash() h.Write(buf.Bytes()) test.blobHash = fmt.Sprintf("%x", h.Sum(nil)) } if test.blobSize == 0 { test.blobSize = int64(len(buf.Bytes())) } url := &router.ResolvedURL{ URL: *charm.MustParseURL(test.url), } err = store.UploadEntity(url, &buf, test.blobHash, test.blobSize, nil) c.Assert(err, gc.ErrorMatches, test.expectError) if test.expectCause != nil { c.Assert(errgo.Cause(err), gc.Equals, test.expectCause) } else { c.Assert(errgo.Cause(err), gc.Equals, err) } } } func (s *AddEntitySuite) checkAddCharm(c *gc.C, ch charm.Charm, url *router.ResolvedURL) { store := s.newStore(c, true) defer store.Close() // Add the charm to the store. beforeAdding := time.Now() err := store.AddCharmWithArchive(url, ch) c.Assert(err, gc.IsNil) afterAdding := time.Now() var doc *mongodoc.Entity err = store.DB.Entities().FindId(&url.URL).One(&doc) c.Assert(err, gc.IsNil) // The entity doc has been correctly added to the mongo collection. size, hash, hash256 := getSizeAndHashes(ch) sort.Strings(doc.CharmProvidedInterfaces) sort.Strings(doc.CharmRequiredInterfaces) // Check the upload time and then reset it to its zero value // so that we can test the deterministic parts later. c.Assert(doc.UploadTime, jc.TimeBetween(beforeAdding, afterAdding)) doc.UploadTime = time.Time{} assertDoc := assertBlobFields(c, doc, url, hash, hash256, size) c.Assert(assertDoc, jc.DeepEquals, denormalizedEntity(&mongodoc.Entity{ URL: &url.URL, BlobHash: hash, BlobHash256: hash256, Size: size, CharmMeta: ch.Meta(), CharmActions: ch.Actions(), CharmConfig: ch.Config(), CharmProvidedInterfaces: []string{"http", "logging", "monitoring"}, CharmRequiredInterfaces: []string{"mysql", "varnish"}, PromulgatedURL: url.PromulgatedURL(), SupportedSeries: ch.Meta().Series, })) // The charm archive has been properly added to the blob store. r, obtainedSize, err := store.BlobStore.Open(doc.BlobName) c.Assert(err, gc.IsNil) defer r.Close() c.Assert(obtainedSize, gc.Equals, size) data, err := ioutil.ReadAll(r) c.Assert(err, gc.IsNil) charmArchive, err := charm.ReadCharmArchiveBytes(data) c.Assert(err, gc.IsNil) c.Assert(charmArchive.Meta(), jc.DeepEquals, ch.Meta()) c.Assert(charmArchive.Config(), jc.DeepEquals, ch.Config()) c.Assert(charmArchive.Actions(), jc.DeepEquals, ch.Actions()) c.Assert(charmArchive.Revision(), jc.DeepEquals, ch.Revision()) // Check that the base entity has been properly created. assertBaseEntity(c, store, mongodoc.BaseURL(&url.URL), url.PromulgatedRevision != -1) // Try inserting the charm again - it should fail because the charm is // already there. err = store.AddCharmWithArchive(url, ch) c.Assert(errgo.Cause(err), gc.Equals, params.ErrDuplicateUpload) } func (s *AddEntitySuite) checkAddBundle(c *gc.C, bundle charm.Bundle, url *router.ResolvedURL) { store := s.newStore(c, true) defer store.Close() // Add the bundle to the store. beforeAdding := time.Now() s.addRequiredCharms(c, bundle) err := store.AddBundleWithArchive(url, bundle) c.Assert(err, gc.IsNil) afterAdding := time.Now() var doc *mongodoc.Entity err = store.DB.Entities().FindId(&url.URL).One(&doc) c.Assert(err, gc.IsNil) sort.Sort(orderedURLs(doc.BundleCharms)) // Check the upload time and then reset it to its zero value // so that we can test the deterministic parts later. c.Assert(doc.UploadTime, jc.TimeBetween(beforeAdding, afterAdding)) doc.UploadTime = time.Time{} // The entity doc has been correctly added to the mongo collection. size, hash, hash256 := getSizeAndHashes(bundle) assertDoc := assertBlobFields(c, doc, url, hash, hash256, size) c.Assert(assertDoc, jc.DeepEquals, denormalizedEntity(&mongodoc.Entity{ URL: &url.URL, BlobHash: hash, BlobHash256: hash256, Size: size, BundleData: bundle.Data(), BundleReadMe: bundle.ReadMe(), BundleCharms: []*charm.URL{ charm.MustParseURL("mysql"), charm.MustParseURL("wordpress"), }, BundleMachineCount: newInt(2), BundleUnitCount: newInt(2), PromulgatedURL: url.PromulgatedURL(), })) // The bundle archive has been properly added to the blob store. r, obtainedSize, err := store.BlobStore.Open(doc.BlobName) c.Assert(err, gc.IsNil) defer r.Close() c.Assert(obtainedSize, gc.Equals, size) data, err := ioutil.ReadAll(r) c.Assert(err, gc.IsNil) bundleArchive, err := charm.ReadBundleArchiveBytes(data) c.Assert(err, gc.IsNil) c.Assert(bundleArchive.Data(), jc.DeepEquals, bundle.Data()) c.Assert(bundleArchive.ReadMe(), jc.DeepEquals, bundle.ReadMe()) // Check that the base entity has been properly created. assertBaseEntity(c, store, mongodoc.BaseURL(&url.URL), url.PromulgatedRevision != -1) // Try inserting the bundle again - it should fail because the bundle is // already there. err = store.AddBundleWithArchive(url, bundle) c.Assert(errgo.Cause(err), gc.Equals, params.ErrDuplicateUpload, gc.Commentf("error: %v", err)) } // assertBlobFields asserts that the blob-related fields in doc are as expected. // It returns a copy of doc with unpredictable fields zeroed out. func assertBlobFields(c *gc.C, doc *mongodoc.Entity, url *router.ResolvedURL, hash, hash256 string, size int64) *mongodoc.Entity { doc1 := *doc doc = &doc1 // The blob name is random, but we check that it's // in the correct format, and non-empty. blobName := doc.BlobName c.Assert(blobName, gc.Matches, "[0-9a-z]+") doc.BlobName = "" // The PreV5* fields are unpredictable, so zero them out // for the purposes of comparison. if doc.CharmMeta != nil && len(doc.CharmMeta.Series) > 0 { // It's a multi-series charm, so the PreV5* fields should be active. if doc.PreV5BlobSize <= doc.Size { c.Fatalf("pre-v5 blobsize %d is unexpectedly less than original blob size %d", doc.PreV5BlobSize, doc.Size) } c.Assert(doc.PreV5BlobHash, gc.Not(gc.Equals), "") c.Assert(doc.PreV5BlobHash, gc.Not(gc.Equals), hash) c.Assert(doc.PreV5BlobHash256, gc.Not(gc.Equals), "") c.Assert(doc.PreV5BlobHash256, gc.Not(gc.Equals), hash256) } else { c.Assert(doc.PreV5BlobSize, gc.Equals, doc.Size) c.Assert(doc.PreV5BlobHash, gc.Equals, doc.BlobHash) c.Assert(doc.PreV5BlobHash256, gc.Equals, doc.BlobHash256) } doc.PreV5BlobSize = 0 doc.PreV5BlobHash = "" doc.PreV5BlobHash256 = "" return doc } func assertBaseEntity(c *gc.C, store *Store, url *charm.URL, promulgated bool) { baseEntity, err := store.FindBaseEntity(url, nil) c.Assert(err, gc.IsNil) acls := mongodoc.ACL{ Read: []string{url.User}, Write: []string{url.User}, } expectACLs := map[params.Channel]mongodoc.ACL{ params.StableChannel: acls, params.DevelopmentChannel: acls, params.UnpublishedChannel: acls, } c.Assert(storetesting.NormalizeBaseEntity(baseEntity), jc.DeepEquals, storetesting.NormalizeBaseEntity(&mongodoc.BaseEntity{ URL: url, User: url.User, Name: url.Name, Promulgated: mongodoc.IntBool(promulgated), ChannelACLs: expectACLs, })) } type orderedURLs []*charm.URL func (o orderedURLs) Less(i, j int) bool { return o[i].String() < o[j].String() } func (o orderedURLs) Swap(i, j int) { o[i], o[j] = o[j], o[i] } func (o orderedURLs) Len() int { return len(o) } type byteArchiver []byte func (a byteArchiver) ArchiveTo(w io.Writer) error { _, err := w.Write(a) return err } func zipWithInvalidFormat() ArchiverTo { return byteArchiver(nil) } func zipWithInvalidChecksum() ArchiverTo { return byteArchiver( "PK\x03\x04\x14\x00\b\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\r\x00\x00\x00metadata.yamlielloPK\a\b" + "\x86\xa6\x106\x05\x00\x00\x00\x05\x00\x00\x00PK" + "\x01\x02\x14\x00\x14\x00\b\x00\x00\x00\x00\x00" + "\x00\x00\x86\xa6\x106\x05\x00\x00\x00\x05\x00" + "\x00\x00\r\x00\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00metadata.yamlPK" + "\x05\x06\x00\x00\x00\x00\x01\x00\x01\x00;\x00" + "\x00\x00@\x00\x00\x00\x00\x00", ) data := storetesting.NewCharm(nil).Bytes() zr, err := zip.NewReader(bytes.NewReader(data), int64(len(data))) if err != nil { panic(err) } // Change the contents of a file so // that it won't (probably) fit the checksum // any more. off, err := zr.File[0].DataOffset() if err != nil { panic(err) } data[off] += 2 return byteArchiver(data) } func zipWithInvalidAlgorithm() ArchiverTo { return byteArchiver( "PK\x03\x04\x14\x00\b\x00\t\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\r\x00\x00\x00metadata.yamlhello" + "PK\a\b\x86\xa6\x106\x05\x00\x00\x00\x05\x00" + "\x00\x00PK\x01\x02\x14\x00\x14\x00\b\x00" + "\t\x00\x00\x00\x00\x00\x86\xa6\x106\x05\x00" + "\x00\x00\x05\x00\x00\x00\r\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00metadata.yamlPK\x05\x06\x00\x00\x00" + "\x00\x01\x00\x01\x00;\x00\x00\x00@\x00\x00" + "\x00\x00\x00", ) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/common_test.go0000664000175000017500000000335412672604603030242 0ustar marcomarco// Copyright 2016 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" import ( gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/juju/charmstore.v5-unstable/internal/router" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" ) type commonSuite struct { storetesting.IsolatedMgoESSuite index string } // addRequiredCharms adds any charms required by the given // bundle that are not already in the store. func (s *commonSuite) addRequiredCharms(c *gc.C, bundle charm.Bundle) { store := s.newStore(c, true) defer store.Close() for _, svc := range bundle.Data().Services { u := charm.MustParseURL(svc.Charm) if _, err := store.FindBestEntity(u, params.NoChannel, nil); err == nil { continue } if u.Revision == -1 { u.Revision = 0 } var rurl router.ResolvedURL rurl.URL = *u ch := storetesting.Charms.CharmDir(u.Name) if len(ch.Meta().Series) == 0 && u.Series == "" { rurl.URL.Series = "trusty" } if u.User == "" { rurl.URL.User = "charmers" rurl.PromulgatedRevision = rurl.URL.Revision } else { rurl.PromulgatedRevision = -1 } err := store.AddCharmWithArchive(&rurl, ch) c.Assert(err, gc.IsNil) err = store.Publish(&rurl, params.StableChannel) c.Assert(err, gc.IsNil) } } func (s *commonSuite) newStore(c *gc.C, withES bool) *Store { var si *SearchIndex if withES { si = &SearchIndex{s.ES, s.TestIndex} } p, err := NewPool(s.Session.DB("juju_test"), si, &bakery.NewServiceParams{}, ServerParams{}) c.Assert(err, gc.IsNil) store := p.Store() p.Close() return store } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/server.go0000664000175000017500000001336512672604603027224 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. // This is the internal version of the charmstore package. // It exposes details to the various API packages // that we do not wish to expose to the world at large. package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" import ( "net/http" "strings" "time" "gopkg.in/errgo.v1" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/mgo.v2" "gopkg.in/natefinch/lumberjack.v2" "gopkg.in/juju/charmstore.v5-unstable/internal/router" ) // NewAPIHandlerFunc is a function that returns a new API handler that uses // the given Store. The absPath parameter holds the root path of the // API handler. type NewAPIHandlerFunc func(pool *Pool, p ServerParams, absPath string) HTTPCloseHandler // HTTPCloseHandler represents a HTTP handler that // must be closed after use. type HTTPCloseHandler interface { Close() http.Handler } // ServerParams holds configuration for a new internal API server. type ServerParams struct { // AuthUsername and AuthPassword hold the credentials // used for HTTP basic authentication. AuthUsername string AuthPassword string // IdentityLocation holds the location of the third party authorization // service to use when creating third party caveats, // for example: http://api.jujucharms.com/identity/v1/discharger // If it is empty, IdentityURL+"/v1/discharger" will be used. IdentityLocation string // TermsLocation holds the location of the third party // terms service to use when creating third party caveats. TermsLocation string // PublicKeyLocator holds a public key store. // It may be nil. PublicKeyLocator bakery.PublicKeyLocator // IdentityAPIURL holds the URL of the identity manager, // for example http://api.jujucharms.com/identity IdentityAPIURL string // AgentUsername and AgentKey hold the credentials used for agent // authentication. AgentUsername string AgentKey *bakery.KeyPair // StatsCacheMaxAge is the maximum length of time between // refreshes of entities in the stats cache. StatsCacheMaxAge time.Duration // SearchCacheMaxAge is the maximum length of time between // refreshes of entities in the search cache. SearchCacheMaxAge time.Duration // MaxMgoSessions specifies a soft limit on the maximum // number of mongo sessions used. Each concurrent // HTTP request will use one session. MaxMgoSessions int // HTTPRequestWaitDuration holds the amount of time // that an HTTP request will wait for a free connection // when the MaxConcurrentHTTPRequests limit is reached. HTTPRequestWaitDuration time.Duration // AuditLogger optionally holds the logger which will be used to // write audit log entries. AuditLogger *lumberjack.Logger } // NewServer returns a handler that serves the given charm store API // versions using db to store that charm store data. // An optional elasticsearch configuration can be specified in si. If // elasticsearch is not being used then si can be set to nil. // The key of the versions map is the version name. // The handler configuration is provided to all version handlers. // // The returned Server should be closed after use. func NewServer(db *mgo.Database, si *SearchIndex, config ServerParams, versions map[string]NewAPIHandlerFunc) (*Server, error) { if len(versions) == 0 { return nil, errgo.Newf("charm store server must serve at least one version of the API") } config.IdentityLocation = strings.Trim(config.IdentityLocation, "/") config.TermsLocation = strings.Trim(config.TermsLocation, "/") config.IdentityAPIURL = strings.Trim(config.IdentityAPIURL, "/") if config.IdentityLocation == "" && config.IdentityAPIURL != "" { config.IdentityLocation = config.IdentityAPIURL + "/v1/discharger" } logger.Infof("identity discharge location: %s", config.IdentityLocation) logger.Infof("identity API location: %s", config.IdentityAPIURL) logger.Infof("terms discharge location: %s", config.TermsLocation) bparams := bakery.NewServiceParams{ // TODO The location is attached to any macaroons that we // mint. Currently we don't know the location of the current // service. We potentially provide a way to configure this, // but it probably doesn't matter, as nothing currently uses // the macaroon location for anything. Location: "charmstore", Locator: config.PublicKeyLocator, } pool, err := NewPool(db, si, &bparams, config) if err != nil { return nil, errgo.Notef(err, "cannot make store") } store := pool.Store() defer store.Close() if err := migrate(store.DB); err != nil { pool.Close() return nil, errgo.Notef(err, "database migration failed") } store.Go(func(store *Store) { if err := store.syncSearch(); err != nil { logger.Errorf("Cannot populate elasticsearch: %v", err) } }) srv := &Server{ pool: pool, mux: router.NewServeMux(), } // Version independent API. handle(srv.mux, "/debug", newServiceDebugHandler(pool, config, srv.mux)) for vers, newAPI := range versions { root := "/" + vers h := newAPI(pool, config, root) handle(srv.mux, root, h) srv.handlers = append(srv.handlers, h) } return srv, nil } type Server struct { pool *Pool mux *router.ServeMux handlers []HTTPCloseHandler } // ServeHTTP implements http.Handler.ServeHTTP. func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { s.mux.ServeHTTP(w, req) } // Close closes the server. It must be called when the server // is finished with. func (s *Server) Close() { s.pool.Close() for _, h := range s.handlers { h.Close() } s.handlers = nil } // Pool returns the Pool used by the server. func (s *Server) Pool() *Pool { return s.pool } func handle(mux *router.ServeMux, path string, handler http.Handler) { if path != "/" { handler = http.StripPrefix(path, handler) path += "/" } mux.Handle(path, handler) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/server_test.go0000664000175000017500000001052212672604603030253 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" import ( "net/http" "github.com/juju/testing/httptesting" gc "gopkg.in/check.v1" "gopkg.in/juju/charmstore.v5-unstable/internal/router" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" ) var serverParams = ServerParams{ AuthUsername: "test-user", AuthPassword: "test-password", } type ServerSuite struct { storetesting.IsolatedMgoESSuite } var _ = gc.Suite(&ServerSuite{}) func (s *ServerSuite) TestNewServerWithNoVersions(c *gc.C) { h, err := NewServer(s.Session.DB("foo"), nil, serverParams, nil) c.Assert(err, gc.ErrorMatches, `charm store server must serve at least one version of the API`) c.Assert(h, gc.IsNil) } type versionResponse struct { Version string Path string } func (s *ServerSuite) TestNewServerWithVersions(c *gc.C) { db := s.Session.DB("foo") serveVersion := func(vers string) NewAPIHandlerFunc { return func(p *Pool, config ServerParams, _ string) HTTPCloseHandler { return nopCloseHandler{ router.HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { return versionResponse{ Version: vers, Path: req.URL.Path, }, nil }), } } } h, err := NewServer(db, nil, serverParams, map[string]NewAPIHandlerFunc{ "version1": serveVersion("version1"), }) c.Assert(err, gc.IsNil) defer h.Close() assertServesVersion(c, h, "version1") assertDoesNotServeVersion(c, h, "version2") assertDoesNotServeVersion(c, h, "version3") h, err = NewServer(db, nil, serverParams, map[string]NewAPIHandlerFunc{ "version1": serveVersion("version1"), "version2": serveVersion("version2"), }) c.Assert(err, gc.IsNil) defer h.Close() assertServesVersion(c, h, "version1") assertServesVersion(c, h, "version2") assertDoesNotServeVersion(c, h, "version3") h, err = NewServer(db, nil, serverParams, map[string]NewAPIHandlerFunc{ "version1": serveVersion("version1"), "version2": serveVersion("version2"), "version3": serveVersion("version3"), }) c.Assert(err, gc.IsNil) defer h.Close() assertServesVersion(c, h, "version1") assertServesVersion(c, h, "version2") assertServesVersion(c, h, "version3") h, err = NewServer(db, nil, serverParams, map[string]NewAPIHandlerFunc{ "version1": serveVersion("version1"), "": serveVersion(""), }) c.Assert(err, gc.IsNil) defer h.Close() assertServesVersion(c, h, "") assertServesVersion(c, h, "version1") } func (s *ServerSuite) TestNewServerWithConfig(c *gc.C) { serveConfig := func(p *Pool, config ServerParams, _ string) HTTPCloseHandler { return nopCloseHandler{ router.HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { return config, nil }), } } h, err := NewServer(s.Session.DB("foo"), nil, serverParams, map[string]NewAPIHandlerFunc{ "version1": serveConfig, }) c.Assert(err, gc.IsNil) defer h.Close() httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: h, URL: "/version1/some/path", ExpectBody: serverParams, }) } func (s *ServerSuite) TestNewServerWithElasticSearch(c *gc.C) { serveConfig := func(p *Pool, config ServerParams, _ string) HTTPCloseHandler { return nopCloseHandler{ router.HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { return config, nil }), } } h, err := NewServer(s.Session.DB("foo"), &SearchIndex{s.ES, s.TestIndex}, serverParams, map[string]NewAPIHandlerFunc{ "version1": serveConfig, }) c.Assert(err, gc.IsNil) defer h.Close() httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: h, URL: "/version1/some/path", ExpectBody: serverParams, }) } func assertServesVersion(c *gc.C, h http.Handler, vers string) { path := vers if path != "" { path = "/" + path } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: h, URL: path + "/some/path", ExpectBody: versionResponse{ Version: vers, Path: "/some/path", }, }) } func assertDoesNotServeVersion(c *gc.C, h http.Handler, vers string) { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: h, URL: "/" + vers + "/some/path", }) c.Assert(rec.Code, gc.Equals, http.StatusNotFound) } type nopCloseHandler struct { http.Handler } func (nopCloseHandler) Close() { } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/elasticsearch.go0000664000175000017500000002235512672604603030527 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" import "encoding/json" var ( esIndex = mustParseJSON(esIndexJSON) esMapping = mustParseJSON(esMappingJSON) ) const esSettingsVersion = 8 func mustParseJSON(s string) interface{} { var j json.RawMessage if err := json.Unmarshal([]byte(s), &j); err != nil { panic(err) } return &j } const esIndexJSON = ` { "settings": { "number_of_shards": 1, "analysis": { "filter": { "n3_20grams_filter": { "type": "nGram", "min_gram": 3, "max_gram": 20 } }, "analyzer": { "n3_20grams": { "type": "custom", "tokenizer": "standard", "filter": [ "lowercase", "n3_20grams_filter" ] } } } } } ` const esMappingJSON = ` { "entity" : { "dynamic" : "false", "properties" : { "URL" : { "type" : "multi_field", "fields" : { "URL" : { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "ngrams" : { "type" : "string", "analyzer" : "n3_20grams", "include_in_all" : false } } }, "PromulgatedURL" : { "type" : "string", "index": "not_analyzed", "index_options" : "docs" }, "BaseURL" : { "type" : "string", "index": "not_analyzed", "index_options" : "docs" }, "User" : { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "Name" : { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "Revision" : { "type" : "integer", "index" : "not_analyzed" }, "Series" : { "type" : "multi_field", "fields" : { "Series" : { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "ngrams" : { "type" : "string", "analyzer" : "n3_20grams", "include_in_all" : false } } }, "TotalDownloads": { "type": "long" }, "BlobHash" : { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "UploadTime" : { "type" : "date", "format" : "dateOptionalTime" }, "CharmMeta" : { "dynamic" : "false", "properties" : { "Name" : { "type" : "multi_field", "fields" : { "Name" : { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "ngrams" : { "type" : "string", "analyzer" : "n3_20grams", "include_in_all" : false } } }, "Summary" : { "type" : "string" }, "Description" : { "type" : "string" }, "Provides" : { "dynamic" : "false", "properties" : { "Name" : { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "Role" : { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "Interface" : { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "Scope" : { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" } } }, "Requires" : { "dynamic" : "false", "properties" : { "Name" : { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "Role" : { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "Interface" : { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "Scope" : { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" } } }, "Peers" : { "dynamic" : "false", "properties" : { "Name" : { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "Role" : { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "Interface" : { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "Scope" : { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" } } }, "Categories" : { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "Tags" : { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" } } }, "charmactions" : { "dynamic" : "false", "properties" : { "description" : { "type" : "string" }, "action_name" : { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" } } }, "CharmProvidedInterfaces" : { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "CharmRequiredInterfaces" : { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "BundleData" : { "type": "object", "dynamic": "false", "properties" : { "Services" : { "type": "object", "dynamic": "false", "properties": { "Charm": { "type" : "string", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "NumUnits": { "type" : "integer", "index": "not_analyzed" } } }, "Series" : { "type" : "string" }, "Relations" : { "type" : "string", "index": "not_analyzed" }, "Tags" : { "type" : "string", "index": "not_analyzed", "omit_norms" : true, "index_options" : "docs" } } }, "BundleReadMe" : { "type": "string", "index": "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "BundleCharms": { "type": "string", "index": "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "BundleMachineCount": { "type": "integer" }, "BundleUnitCount": { "type": "integer" }, "TotalDownloads": { "type": "long" }, "Public": { "type": "boolean", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "ReadACLs" : { "type" : "string", "index": "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "SingleSeries": { "type": "boolean", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" }, "AllSeries": { "type": "boolean", "index" : "not_analyzed", "omit_norms" : true, "index_options" : "docs" } } } } ` charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/stats_test.go0000664000175000017500000005467412672604603030123 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmstore_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" import ( "fmt" "strconv" "sync" "time" jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/mgo.v2/bson" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" "gopkg.in/juju/charmstore.v5-unstable/internal/router" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" ) type StatsSuite struct { jujutesting.IsolatedMgoSuite store *charmstore.Store } var _ = gc.Suite(&StatsSuite{}) func (s *StatsSuite) SetUpTest(c *gc.C) { s.IsolatedMgoSuite.SetUpTest(c) pool, err := charmstore.NewPool(s.Session.DB("foo"), nil, nil, charmstore.ServerParams{}) c.Assert(err, gc.IsNil) s.store = pool.Store() pool.Close() } func (s *StatsSuite) TearDownTest(c *gc.C) { s.store.Close() s.IsolatedMgoSuite.TearDownTest(c) } func (s *StatsSuite) TestSumCounters(c *gc.C) { if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } req := charmstore.CounterRequest{Key: []string{"a"}} cs, err := s.store.Counters(&req) c.Assert(err, gc.IsNil) c.Assert(cs, gc.DeepEquals, []charmstore.Counter{{Key: req.Key, Count: 0}}) for i := 0; i < 10; i++ { err := s.store.IncCounter([]string{"a", "b", "c"}) c.Assert(err, gc.IsNil) } for i := 0; i < 7; i++ { s.store.IncCounter([]string{"a", "b"}) c.Assert(err, gc.IsNil) } for i := 0; i < 3; i++ { s.store.IncCounter([]string{"a", "z", "b"}) c.Assert(err, gc.IsNil) } tests := []struct { key []string prefix bool result int64 }{ {[]string{"a", "b", "c"}, false, 10}, {[]string{"a", "b"}, false, 7}, {[]string{"a", "z", "b"}, false, 3}, {[]string{"a", "b", "c"}, true, 0}, {[]string{"a", "b", "c", "d"}, false, 0}, {[]string{"a", "b"}, true, 10}, {[]string{"a"}, true, 20}, {[]string{"b"}, true, 0}, } for _, t := range tests { c.Logf("Test: %#v\n", t) req = charmstore.CounterRequest{Key: t.key, Prefix: t.prefix} cs, err := s.store.Counters(&req) c.Assert(err, gc.IsNil) c.Assert(cs, gc.DeepEquals, []charmstore.Counter{{Key: t.key, Prefix: t.prefix, Count: t.result}}) } // High-level interface works. Now check that the data is // stored correctly. counters := s.store.DB.StatCounters() docs1, err := counters.Count() c.Assert(err, gc.IsNil) if docs1 != 3 && docs1 != 4 { c.Errorf("Expected 3 or 4 docs in counters collection, got %d", docs1) } // Hack times so that the next operation adds another document. err = counters.Update(nil, bson.D{{"$set", bson.D{{"t", 1}}}}) c.Check(err, gc.IsNil) err = s.store.IncCounter([]string{"a", "b", "c"}) c.Assert(err, gc.IsNil) docs2, err := counters.Count() c.Assert(err, gc.IsNil) c.Assert(docs2, gc.Equals, docs1+1) req = charmstore.CounterRequest{Key: []string{"a", "b", "c"}} cs, err = s.store.Counters(&req) c.Assert(err, gc.IsNil) c.Assert(cs, gc.DeepEquals, []charmstore.Counter{{Key: req.Key, Count: 11}}) req = charmstore.CounterRequest{Key: []string{"a"}, Prefix: true} cs, err = s.store.Counters(&req) c.Assert(err, gc.IsNil) c.Assert(cs, gc.DeepEquals, []charmstore.Counter{{Key: req.Key, Prefix: true, Count: 21}}) } func (s *StatsSuite) TestCountersReadOnlySum(c *gc.C) { if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } // Summing up an unknown key shouldn't add the key to the database. req := charmstore.CounterRequest{Key: []string{"a", "b", "c"}} _, err := s.store.Counters(&req) c.Assert(err, gc.IsNil) tokens := s.Session.DB("juju").C("stat.tokens") n, err := tokens.Count() c.Assert(err, gc.IsNil) c.Assert(n, gc.Equals, 0) } func (s *StatsSuite) TestCountersTokenCaching(c *gc.C) { if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } assertSum := func(i int, want int64) { req := charmstore.CounterRequest{Key: []string{strconv.Itoa(i)}} cs, err := s.store.Counters(&req) c.Assert(err, gc.IsNil) c.Assert(cs[0].Count, gc.Equals, want) } assertSum(100000, 0) const genSize = 1024 // All of these will be cached, as we have two generations // of genSize entries each. for i := 0; i < genSize*2; i++ { err := s.store.IncCounter([]string{strconv.Itoa(i)}) c.Assert(err, gc.IsNil) } // Now go behind the scenes and corrupt all the tokens. tokens := s.store.DB.StatTokens() iter := tokens.Find(nil).Iter() var t struct { Id int "_id" Token string "t" } for iter.Next(&t) { err := tokens.UpdateId(t.Id, bson.M{"$set": bson.M{"t": "corrupted" + t.Token}}) c.Assert(err, gc.IsNil) } c.Assert(iter.Err(), gc.IsNil) // We can consult the counters for the cached entries still. // First, check that the newest generation is good. for i := genSize; i < genSize*2; i++ { assertSum(i, 1) } // Now, we can still access a single entry of the older generation, // but this will cause the generations to flip and thus the rest // of the old generation will go away as the top half of the // entries is turned into the old generation. assertSum(0, 1) // Now we've lost access to the rest of the old generation. for i := 1; i < genSize; i++ { assertSum(i, 0) } // But we still have all of the top half available since it was // moved into the old generation. for i := genSize; i < genSize*2; i++ { assertSum(i, 1) } } func (s *StatsSuite) TestCounterTokenUniqueness(c *gc.C) { if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } var wg0, wg1 sync.WaitGroup wg0.Add(10) wg1.Add(10) for i := 0; i < 10; i++ { go func() { wg0.Done() wg0.Wait() defer wg1.Done() err := s.store.IncCounter([]string{"a"}) c.Check(err, gc.IsNil) }() } wg1.Wait() req := charmstore.CounterRequest{Key: []string{"a"}} cs, err := s.store.Counters(&req) c.Assert(err, gc.IsNil) c.Assert(cs[0].Count, gc.Equals, int64(10)) } func (s *StatsSuite) TestListCounters(c *gc.C) { if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } incs := [][]string{ {"c", "b", "a"}, // Assign internal id c < id b < id a, to make sorting slightly trickier. {"a"}, {"a", "c"}, {"a", "b"}, {"a", "b", "c"}, {"a", "b", "c"}, {"a", "b", "e"}, {"a", "b", "d"}, {"a", "f", "g"}, {"a", "f", "h"}, {"a", "i"}, {"a", "i", "j"}, {"k", "l"}, } for _, key := range incs { err := s.store.IncCounter(key) c.Assert(err, gc.IsNil) } tests := []struct { prefix []string result []charmstore.Counter }{ { []string{"a"}, []charmstore.Counter{ {Key: []string{"a", "b"}, Prefix: true, Count: 4}, {Key: []string{"a", "f"}, Prefix: true, Count: 2}, {Key: []string{"a", "b"}, Prefix: false, Count: 1}, {Key: []string{"a", "c"}, Prefix: false, Count: 1}, {Key: []string{"a", "i"}, Prefix: false, Count: 1}, {Key: []string{"a", "i"}, Prefix: true, Count: 1}, }, }, { []string{"a", "b"}, []charmstore.Counter{ {Key: []string{"a", "b", "c"}, Prefix: false, Count: 2}, {Key: []string{"a", "b", "d"}, Prefix: false, Count: 1}, {Key: []string{"a", "b", "e"}, Prefix: false, Count: 1}, }, }, { []string{"z"}, []charmstore.Counter(nil), }, } // Use a different store to exercise cache filling. pool, err := charmstore.NewPool(s.store.DB.Database, nil, nil, charmstore.ServerParams{}) c.Assert(err, gc.IsNil) st := pool.Store() defer st.Close() pool.Close() for i := range tests { req := &charmstore.CounterRequest{Key: tests[i].prefix, Prefix: true, List: true} result, err := st.Counters(req) c.Assert(err, gc.IsNil) c.Assert(result, gc.DeepEquals, tests[i].result) } } func (s *StatsSuite) TestListCountersBy(c *gc.C) { if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } incs := []struct { key []string day int }{ {[]string{"a"}, 1}, {[]string{"a"}, 1}, {[]string{"b"}, 1}, {[]string{"a", "b"}, 1}, {[]string{"a", "c"}, 1}, {[]string{"a"}, 3}, {[]string{"a", "b"}, 3}, {[]string{"b"}, 9}, {[]string{"b"}, 9}, {[]string{"a", "c", "d"}, 9}, {[]string{"a", "c", "e"}, 9}, {[]string{"a", "c", "f"}, 9}, } day := func(i int) time.Time { return time.Date(2012, time.May, i, 0, 0, 0, 0, time.UTC) } for i, inc := range incs { t := day(inc.day) // Ensure each entry is unique by adding // a sufficient increment for each test. t = t.Add(time.Duration(i) * charmstore.StatsGranularity) err := s.store.IncCounterAtTime(inc.key, t) c.Assert(err, gc.IsNil) } tests := []struct { request charmstore.CounterRequest result []charmstore.Counter }{ { charmstore.CounterRequest{ Key: []string{"a"}, Prefix: false, List: false, By: charmstore.ByDay, }, []charmstore.Counter{ {Key: []string{"a"}, Prefix: false, Count: 2, Time: day(1)}, {Key: []string{"a"}, Prefix: false, Count: 1, Time: day(3)}, }, }, { charmstore.CounterRequest{ Key: []string{"a"}, Prefix: true, List: false, By: charmstore.ByDay, }, []charmstore.Counter{ {Key: []string{"a"}, Prefix: true, Count: 2, Time: day(1)}, {Key: []string{"a"}, Prefix: true, Count: 1, Time: day(3)}, {Key: []string{"a"}, Prefix: true, Count: 3, Time: day(9)}, }, }, { charmstore.CounterRequest{ Key: []string{"a"}, Prefix: true, List: false, By: charmstore.ByDay, Start: day(2), }, []charmstore.Counter{ {Key: []string{"a"}, Prefix: true, Count: 1, Time: day(3)}, {Key: []string{"a"}, Prefix: true, Count: 3, Time: day(9)}, }, }, { charmstore.CounterRequest{ Key: []string{"a"}, Prefix: true, List: false, By: charmstore.ByDay, Stop: day(4), }, []charmstore.Counter{ {Key: []string{"a"}, Prefix: true, Count: 2, Time: day(1)}, {Key: []string{"a"}, Prefix: true, Count: 1, Time: day(3)}, }, }, { charmstore.CounterRequest{ Key: []string{"a"}, Prefix: true, List: false, By: charmstore.ByDay, Start: day(3), Stop: day(8), }, []charmstore.Counter{ {Key: []string{"a"}, Prefix: true, Count: 1, Time: day(3)}, }, }, { charmstore.CounterRequest{ Key: []string{"a"}, Prefix: true, List: true, By: charmstore.ByDay, }, []charmstore.Counter{ {Key: []string{"a", "b"}, Prefix: false, Count: 1, Time: day(1)}, {Key: []string{"a", "c"}, Prefix: false, Count: 1, Time: day(1)}, {Key: []string{"a", "b"}, Prefix: false, Count: 1, Time: day(3)}, {Key: []string{"a", "c"}, Prefix: true, Count: 3, Time: day(9)}, }, }, { charmstore.CounterRequest{ Key: []string{"a"}, Prefix: true, List: false, By: charmstore.ByWeek, }, []charmstore.Counter{ {Key: []string{"a"}, Prefix: true, Count: 3, Time: day(6)}, {Key: []string{"a"}, Prefix: true, Count: 3, Time: day(13)}, }, }, { charmstore.CounterRequest{ Key: []string{"a"}, Prefix: true, List: true, By: charmstore.ByWeek, }, []charmstore.Counter{ {Key: []string{"a", "b"}, Prefix: false, Count: 2, Time: day(6)}, {Key: []string{"a", "c"}, Prefix: false, Count: 1, Time: day(6)}, {Key: []string{"a", "c"}, Prefix: true, Count: 3, Time: day(13)}, }, }, } for _, test := range tests { result, err := s.store.Counters(&test.request) c.Assert(err, gc.IsNil) c.Assert(result, gc.DeepEquals, test.result) } } type testStatsEntity struct { id *router.ResolvedURL lastDay int lastWeek int lastMonth int total int legacyTotal int } var archiveDownloadCountsTests = []struct { about string charms []testStatsEntity id *charm.URL expectThisRevision charmstore.AggregatedCounts expectAllRevisions charmstore.AggregatedCounts }{{ about: "single revision", charms: []testStatsEntity{{ id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-0"), lastDay: 1, lastWeek: 2, lastMonth: 3, total: 4, legacyTotal: 0, }}, id: charm.MustParseURL("~charmers/trusty/wordpress-0"), expectThisRevision: charmstore.AggregatedCounts{ LastDay: 1, LastWeek: 3, LastMonth: 6, Total: 10, }, expectAllRevisions: charmstore.AggregatedCounts{ LastDay: 1, LastWeek: 3, LastMonth: 6, Total: 10, }, }, { about: "single revision with legacy count", charms: []testStatsEntity{{ id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-0"), lastDay: 1, lastWeek: 2, lastMonth: 3, total: 4, legacyTotal: 10, }}, id: charm.MustParseURL("~charmers/trusty/wordpress-0"), expectThisRevision: charmstore.AggregatedCounts{ LastDay: 1, LastWeek: 3, LastMonth: 6, Total: 20, }, expectAllRevisions: charmstore.AggregatedCounts{ LastDay: 1, LastWeek: 3, LastMonth: 6, Total: 20, }, }, { about: "multiple revisions", charms: []testStatsEntity{{ id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-0"), lastDay: 1, lastWeek: 2, lastMonth: 3, total: 4, legacyTotal: 0, }, { id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-1"), lastDay: 2, lastWeek: 3, lastMonth: 4, total: 5, legacyTotal: 0, }}, id: charm.MustParseURL("~charmers/trusty/wordpress-1"), expectThisRevision: charmstore.AggregatedCounts{ LastDay: 2, LastWeek: 5, LastMonth: 9, Total: 14, }, expectAllRevisions: charmstore.AggregatedCounts{ LastDay: 3, LastWeek: 8, LastMonth: 15, Total: 24, }, }, { about: "multiple revisions with legacy count", charms: []testStatsEntity{{ id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-0"), lastDay: 1, lastWeek: 2, lastMonth: 3, total: 4, legacyTotal: 100, }, { id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-1"), lastDay: 2, lastWeek: 3, lastMonth: 4, total: 5, legacyTotal: 100, }}, id: charm.MustParseURL("~charmers/trusty/wordpress-1"), expectThisRevision: charmstore.AggregatedCounts{ LastDay: 2, LastWeek: 5, LastMonth: 9, Total: 114, }, expectAllRevisions: charmstore.AggregatedCounts{ LastDay: 3, LastWeek: 8, LastMonth: 15, Total: 124, }, }, { about: "promulgated revision", charms: []testStatsEntity{{ id: charmstore.MustParseResolvedURL("0 ~charmers/trusty/wordpress-0"), lastDay: 1, lastWeek: 2, lastMonth: 3, total: 4, legacyTotal: 0, }}, id: charm.MustParseURL("trusty/wordpress-0"), expectThisRevision: charmstore.AggregatedCounts{ LastDay: 1, LastWeek: 3, LastMonth: 6, Total: 10, }, expectAllRevisions: charmstore.AggregatedCounts{ LastDay: 1, LastWeek: 3, LastMonth: 6, Total: 10, }, }, { about: "promulgated revision with legacy count", charms: []testStatsEntity{{ id: charmstore.MustParseResolvedURL("0 ~charmers/trusty/wordpress-0"), lastDay: 1, lastWeek: 2, lastMonth: 3, total: 4, legacyTotal: 10, }}, id: charm.MustParseURL("trusty/wordpress-0"), expectThisRevision: charmstore.AggregatedCounts{ LastDay: 1, LastWeek: 3, LastMonth: 6, Total: 20, }, expectAllRevisions: charmstore.AggregatedCounts{ LastDay: 1, LastWeek: 3, LastMonth: 6, Total: 20, }, }, { about: "promulgated revision with changed owner", charms: []testStatsEntity{{ id: charmstore.MustParseResolvedURL("0 ~charmers/trusty/wordpress-0"), lastDay: 1, lastWeek: 10, lastMonth: 100, total: 1000, legacyTotal: 0, }, { id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-1"), lastDay: 2, lastWeek: 20, lastMonth: 200, total: 2000, legacyTotal: 0, }, { id: charmstore.MustParseResolvedURL("~wordpress-charmers/trusty/wordpress-0"), lastDay: 3, lastWeek: 30, lastMonth: 300, total: 3000, legacyTotal: 0, }, { id: charmstore.MustParseResolvedURL("1 ~wordpress-charmers/trusty/wordpress-1"), lastDay: 4, lastWeek: 40, lastMonth: 400, total: 4000, legacyTotal: 0, }}, id: charm.MustParseURL("trusty/wordpress-1"), expectThisRevision: charmstore.AggregatedCounts{ LastDay: 4, LastWeek: 44, LastMonth: 444, Total: 4444, }, expectAllRevisions: charmstore.AggregatedCounts{ LastDay: 5, LastWeek: 55, LastMonth: 555, Total: 5555, }, }} func (s *StatsSuite) TestArchiveDownloadCounts(c *gc.C) { s.PatchValue(&charmstore.LegacyDownloadCountsEnabled, true) for i, test := range archiveDownloadCountsTests { c.Logf("%d: %s", i, test.about) // Clear everything charmstore.StatsCacheEvictAll(s.store) s.store.DB.Entities().RemoveAll(nil) s.store.DB.StatCounters().RemoveAll(nil) for _, charm := range test.charms { ch := storetesting.Charms.CharmDir(charm.id.URL.Name) err := s.store.AddCharmWithArchive(charm.id, ch) c.Assert(err, gc.IsNil) url := charm.id.URL now := time.Now() setDownloadCounts(c, s.store, &url, now, charm.lastDay) setDownloadCounts(c, s.store, &url, now.Add(-2*24*time.Hour), charm.lastWeek) setDownloadCounts(c, s.store, &url, now.Add(-10*24*time.Hour), charm.lastMonth) setDownloadCounts(c, s.store, &url, now.Add(-100*24*time.Hour), charm.total) if charm.id.PromulgatedRevision > -1 { url.Revision = charm.id.PromulgatedRevision url.User = "" setDownloadCounts(c, s.store, &url, now, charm.lastDay) setDownloadCounts(c, s.store, &url, now.Add(-2*24*time.Hour), charm.lastWeek) setDownloadCounts(c, s.store, &url, now.Add(-10*24*time.Hour), charm.lastMonth) setDownloadCounts(c, s.store, &url, now.Add(-100*24*time.Hour), charm.total) } extraInfo := map[string][]byte{ params.LegacyDownloadStats: []byte(fmt.Sprintf("%d", charm.legacyTotal)), } err = s.store.UpdateEntity(charm.id, bson.D{{ "$set", bson.D{{"extrainfo", extraInfo}}, }}) c.Assert(err, gc.IsNil) } thisRevision, allRevisions, err := s.store.ArchiveDownloadCounts(test.id, true) c.Assert(err, gc.IsNil) c.Assert(thisRevision, jc.DeepEquals, test.expectThisRevision) c.Assert(allRevisions, jc.DeepEquals, test.expectAllRevisions) } } func setDownloadCounts(c *gc.C, s *charmstore.Store, id *charm.URL, t time.Time, n int) { kind := params.StatsArchiveDownload if id.User == "" { kind = params.StatsArchiveDownloadPromulgated } key := charmstore.EntityStatsKey(id, kind) for i := 0; i < n; i++ { err := s.IncCounterAtTime(key, t) c.Assert(err, gc.IsNil) } } func (s *StatsSuite) TestIncrementDownloadCounts(c *gc.C) { ch := storetesting.Charms.CharmDir("wordpress") id := charmstore.MustParseResolvedURL("0 ~charmers/trusty/wordpress-1") err := s.store.AddCharmWithArchive(id, ch) c.Assert(err, gc.IsNil) err = s.store.IncrementDownloadCounts(id) c.Assert(err, gc.IsNil) expect := charmstore.AggregatedCounts{ LastDay: 1, LastWeek: 1, LastMonth: 1, Total: 1, } thisRevision, allRevisions, err := s.store.ArchiveDownloadCounts(charm.MustParseURL("~charmers/trusty/wordpress-1"), true) c.Assert(err, gc.IsNil) c.Assert(thisRevision, jc.DeepEquals, expect) c.Assert(allRevisions, jc.DeepEquals, expect) thisRevision, allRevisions, err = s.store.ArchiveDownloadCounts(charm.MustParseURL("trusty/wordpress-0"), true) c.Assert(err, gc.IsNil) c.Assert(thisRevision, jc.DeepEquals, expect) c.Assert(allRevisions, jc.DeepEquals, expect) } func (s *StatsSuite) TestIncrementDownloadCountsOnPromulgatedMultiSeriesCharm(c *gc.C) { ch := storetesting.Charms.CharmDir("multi-series") id := charmstore.MustParseResolvedURL("0 ~charmers/wordpress-1") err := s.store.AddCharmWithArchive(id, ch) c.Assert(err, gc.IsNil) err = s.store.IncrementDownloadCounts(id) c.Assert(err, gc.IsNil) expect := charmstore.AggregatedCounts{ LastDay: 1, LastWeek: 1, LastMonth: 1, Total: 1, } thisRevision, allRevisions, err := s.store.ArchiveDownloadCounts(charm.MustParseURL("~charmers/wordpress-1"), true) c.Assert(err, gc.IsNil) c.Assert(thisRevision, jc.DeepEquals, expect) c.Assert(allRevisions, jc.DeepEquals, expect) thisRevision, allRevisions, err = s.store.ArchiveDownloadCounts(charm.MustParseURL("wordpress-0"), true) c.Assert(err, gc.IsNil) c.Assert(thisRevision, jc.DeepEquals, expect) c.Assert(allRevisions, jc.DeepEquals, expect) } func (s *StatsSuite) TestIncrementDownloadCountsOnIdWithPreferredSeries(c *gc.C) { ch := storetesting.Charms.CharmDir("multi-series") id := charmstore.MustParseResolvedURL("0 ~charmers/wordpress-1") id.PreferredSeries = "trusty" err := s.store.AddCharmWithArchive(id, ch) c.Assert(err, gc.IsNil) err = s.store.IncrementDownloadCounts(id) c.Assert(err, gc.IsNil) expect := charmstore.AggregatedCounts{ LastDay: 1, LastWeek: 1, LastMonth: 1, Total: 1, } thisRevision, allRevisions, err := s.store.ArchiveDownloadCounts(charm.MustParseURL("~charmers/wordpress-1"), true) c.Assert(err, gc.IsNil) c.Assert(thisRevision, jc.DeepEquals, expect) c.Assert(allRevisions, jc.DeepEquals, expect) thisRevision, allRevisions, err = s.store.ArchiveDownloadCounts(charm.MustParseURL("wordpress-0"), true) c.Assert(err, gc.IsNil) c.Assert(thisRevision, jc.DeepEquals, expect) c.Assert(allRevisions, jc.DeepEquals, expect) } func (s *StatsSuite) TestIncrementDownloadCountsCaching(c *gc.C) { ch := storetesting.Charms.CharmDir("wordpress") id := charmstore.MustParseResolvedURL("0 ~charmers/trusty/wordpress-1") err := s.store.AddCharmWithArchive(id, ch) c.Assert(err, gc.IsNil) err = s.store.IncrementDownloadCounts(id) c.Assert(err, gc.IsNil) expect := charmstore.AggregatedCounts{ LastDay: 1, LastWeek: 1, LastMonth: 1, Total: 1, } expectAfter := charmstore.AggregatedCounts{ LastDay: 2, LastWeek: 2, LastMonth: 2, Total: 2, } thisRevision, allRevisions, err := s.store.ArchiveDownloadCounts(charm.MustParseURL("~charmers/trusty/wordpress-1"), false) c.Assert(err, gc.IsNil) c.Assert(thisRevision, jc.DeepEquals, expect) c.Assert(allRevisions, jc.DeepEquals, expect) err = s.store.IncrementDownloadCounts(id) thisRevision, allRevisions, err = s.store.ArchiveDownloadCounts(charm.MustParseURL("~charmers/trusty/wordpress-1"), false) c.Assert(err, gc.IsNil) c.Assert(thisRevision, jc.DeepEquals, expect) c.Assert(allRevisions, jc.DeepEquals, expect) thisRevision, allRevisions, err = s.store.ArchiveDownloadCounts(charm.MustParseURL("~charmers/trusty/wordpress-1"), true) c.Assert(err, gc.IsNil) c.Assert(thisRevision, jc.DeepEquals, expectAfter) c.Assert(allRevisions, jc.DeepEquals, expectAfter) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrationdump.4.4.3.zip0000664000175000017500000002307112672604603031432 0ustar marcomarcoPK vcs-status2Ñ3Ñ3Ö5³ÔMO5¶4MN1àRðUÈÌ+I-ÊKÌÑOÎH,Ê-.É/JÕÏÍL/J,ÉÌÏ+ŽO)Í-ˆ/I-.ÑKÏ'B9HÊëÿÿPK™l*µNwPKjuju/base_entities.bsonÔ“KKÃ@…OÓ*ÅŠèFеԺ,þ×î%KHfÂLFqãßWzÛLÒ4™FPí¦éÜÇœóõd£xq„s¡ž¼„3_¥¤ô(S25ÉÔÏ) ŒˆBß[n(ò#ó“w­ù¤âÒ“Šsr×Ðè‘™¥$òo/ó„Ÿù©-u¿v¶ˆaf‚$yÊhRUÛ%„‹&!Å?á0¯9Õ® xçO'ŠƒÅiG,{—h6¦ž¥ JôUQÛä/Ÿ¯%xc øG x7¿@f+ YŸ^' „u7baqܹ”r÷TI“UJ/‹ZE`¼Ð§eJ M.?·º@²‡fX>I¹oX—ÏcîI»²µü7êÕ5ì­jñÀkiüÌ·dGZš|FJ,¦­óá’¼5¾»8©w6¬O*µG…¢úŽR¬«Ô6þõUmÛÿÿPKÞ´s[V"PKjuju/entities.bsonì™ËŽ#I†£\5ÐÓBÐj@h@â: ÄÐtÜ/À†%Ìr¶H(.'ºR*;M¦Ý¨YŒ{6<kx7@<v3 VpòâéL;Ëíêr1Ê^”2#NDdDœï?'¢¾uAÈìE"ß%„Äú‡ÆK_Í¡ªŸ†õ"]ÁÓeUÎ×WÏü RWò„’Yð5¬«+òíF;Öh{U†K__Æ9*Îx¢àÿ²È…VÖdªtt<0•úäS¢5&& ÜëE´QiöB$Es> Y:ðÂháÔËá¸Òä'8¢’K/iΠŒr:mµLÔÉ$,üm‹dõRâ7èLŒV(­ëz\ø9w°;¥ZŠ6Yã,˜t™6?O.ºù¶+Q“¯¡éŒ’/të3XBÎ;ÃäWž¼õç5TÏ‹5ùqó6´ý^ÓKÛ#ù>«-Öóõ¢XÕ¤ÿ=êºûxY, –ëŪ-Ÿuåø„Sø,–„²*Ï6 šN^Z?lGóqU”‹º‹å"Ïú—9àg_´ø9Ï‹©X¬ Ê¾™Á[M]m¿\ÕníƒÏáª\ΡrÖ.ìãñä6ž³¬à¹ú»ÏhÌ£ùÐãOº­‹_ùþY¿gƒI?A‹¢ÆeïvmXÓöõΓö@ùhÜzUkÞnx·žGc¿½^^•>­ \ÿ¿ÿñ£Ÿ€å³5¶"o7Cõ@ò¯ó^ Þ#Ó±({5âÛÛí¦Ü™`ô$·‰/cù´óÜS¥øþnýÇ{°w“|‡ì„xˆE=Ò !îïìÚ¿tÇ1åNi—YÎŒgÀfÈVÃ’±š ̳Ș1IZA]2.ÑÈ%¾c´K km²:,y†O­Ü:-9Û¥ÜG¥d47Æ«™ÌŽSÇmÂxo¥1ø’"ƒ,\ÒÙgà!É@ö™D9e[”·ýpÀóà Çž š¼Dï|„]Ûíù½/6årPÝ×·8þŒl@:ë\e=ŸûêEÿ– ŽUѶíJÔk”w0—8òª«:6ØÛª5ÁóÝûÊ$Ï·v˜-žß»yäïšÆnšþíz¶ö=þ›q/êõrYVظ7ýi#аÅH#þ²G#þ±I ¾Ù}⇡ OWÕº^½Ø òCøÊÀxluOäÁœäáfòФGÛžrObÄïØ{ò òƒ#âÿ:"ÓÌ!?Ú$MÎ: ðø•«¢ë{o"0°Û"Ýhª.•£ 2ÏQgˆ&h‹‚3Á„ˆÉ›t>Ñ .­e¬@¼$8©©tT[‘A@“láZË]Ò1™“‚k`,á>eŃ ÊK™’I§^3îð{˜ã‚rÅ­v‹ÞE}˜$3ÃŽt…©$f›°î¿!¤ÿ–‰ô‹~7)Ù "36t°oßÖ«rYDr|…hˆ‘{íÊÃÝûؤ<ÜÚѶäáý×”‡¶ÙãQ;ÂpÓmªÆïþv}žðäFg öZg o÷2Y‚ÇÓ›`¢—œÁý4^âH˨%½<ã¡áªcˆÈ1p+SöV'@ḫÜHàZRƬ¬1$LÜH‹&²9¸¡öSí•9‡¸d&™Ø„ p4’#LfJ[šF{˜„è7SB> ôâS*ð¼[5<¾¾–OÎ>TܽÓL_ÜÖs¶4AíÕ„¦ò&‡ 68T´m¯=T¨ë„äU‡Šöˆ…9(ÏØ'£@°;ÂtĘžóÙäô~°;½lÜòWÅÕ 2åwÁ ~'L²RИ]ˆ*$)L Ùá¤Ò;µNL0 I˜nz ¿!GN²næuŠùHÿo£ÀÃ4~®b£Àµ¥®OsÆi ù÷æ.â]ÒÉA]Ρ\À')F]®W—P-ÚOžS¾ºÕ`dØ H³è}}¿©ŸoK†–¯¾Š9H'?“í5GÐ;:ðþßßòŒÏׇÈE¤#¹øÃ®\ïöåþ>ðÅ]‚NNàŸÀ/Fàÿéþ‘À?#ÿÜ€¿¹UØpÜÿb̽ºKîß"dêö›×õGžÜA½QÿçõŸõú.©ÿ 9Q¢~@½ÿçüDý‘¨'ä¿ÿÿPKü.•¦,PKjuju/entitystore.chunks.bsonÊdd``ÏLa{РŸ·Td> {ZfNj1š(³@°¦$–$2hõ1x3³ˆ0p!:àâÜÔ’DZ½ÊÄÜœSÞg|·=ÒÒ;{ÖWgËæà îÉÝFÜ^FŸ?¨qªÿÿ?À›Ã@ºÜQÈSÅk>'¹:ºøºêå¦xÒ?uÆ›fËQG 6°ŒL" CçÅ0ÅÉèZ‘åDÑ–À€äšoV & ,Ò‹!j²°4/Ö€æA hüÍ ÄI¥y)9©`7kùéjöñÓÝö((@CãÔyݳžþ:ž¾Œ…¢"“Œ…º‹ÖN8º“P©mµ€ൂì°F6žÃX$WÒÉ 8BºH/ƒ¨éD is`˜ŠÖ\2{ŒÒ0ÑG(!íMÍ$­P´ÒuæÊ£a1»¸X]Wžœyt¥C˜g`f¡§ê„À¨Iñfža倅áÿ'ï»M÷Äk?ÙÑ€l<Õ’|ž$¢¦KD¼Ä/P"”¬ˆð8éãí­ëuVû!4>¶€âÆËS×OÆÉ3ç™?3ÂS~a¨¿>м6‘äÈÆS-ȳðùZˆšv¤ w®Xзí;P‚&º%È=É r2Òþ¼™Á')ߠ3Ðlw¼¶“ ÈÆS-êðDÃAˆ5,Ѱk4ìA‰† & Ñ€”º7ƒb-¬Æ]ÞÅÅÂÝZs`¡dô,ý¢V%Ó«¨ÚD \PbB \ÔPZÉVÿÿPKÅ,NVR|PKjuju/entitystore.files.bson|Ó¿j–1ðØ"8tPw§ÈÉ99IVq7 N‚äœ$¶Ø?í""‚«÷ .Þ@½ 'ÁEœzêäàqû¾—òÁË› Iøñ<ÉçÜÕ§Ý=þñf÷äøã÷ÎmïèþÙñ󽃗ù¿ÛîÚًÓÖï·Óá>úõdïŠs;‡ãøÙé¾ÛµùÖQOî¶4CÌÜ‚vn¸NH¥µSÎnkؾv4Ü[®3ôŒH¾„)ž0£/S›O)wÁ œ»Lx}ƒðûBxwMØz±¶9´‘Ä"ˆÍFÌ0i!ìÊÌ#€gÑçbÖ:ü(•RO•»æua6áÍW_ùçáÛo뇫ÂQTKƒ9±`LÂ1æƒ0 H¢Kad΄Á§ÚLˆ4}ž^R`íHT@.^l^,„iU(†c‰¡Õ’BWKYÄRHk(Ë–C…FªàEìGaV_JH~„‘sH3këÂ{&¼õèÏùùáë­ßk« #e„JŠZ–1§ûÔ* ¼¦ 8DÅ×ÚØZ®Ñ2´43rlÚãÿˆ/~Ù |·^¸!»ˆVsh¹’½›"•ˆC\›Elõ£—ÞÕ ñ%ë´ ³dœš§sÿÿÿPKMÇQ°ØÌPKjuju/juju.stat.counters.bson2a```ÏLa{Р½ò‰¡ÆËÄÝË’˜€âLÙ œ@ÊÐÊÈÊØÊÄŠA „çÞvlz–õ0"ë1µ2³2ǯgº +KBö¬ÄÔ“ˆK9TÏ*tÿ$Y%ãÒãÕ³3 R¬RñëYƒ®ÇU ÿÿPK}R‡ulPKjuju/juju.stat.tokens.bsond{KÄ@ Äc}´ÕZEPTA¸ÏtlÛp]hwË>îÛ_Ú>¸¿fa~Ifçž÷º+z$žH”«[}Ä]:«€Oaf2’Áa­=|‹yÍf1™¶ÝA¤é/nÈIêV¹ø÷–Ý”$¸èà `Îv·d3Öl¶¿ “23©lµZœ-‹«hšŽ2ÿ‰—³÷²Í,Ø¿`÷Œ½^ÜòMÈ&Ç2WÕK54¨7§‘>ÿ#À#%‰·1´èÌTѪûæî½íÑ:{ÿÿPK™š&Ò½PKjuju/logs.bsonÿÿPKPKjuju/macaroons.bsonÿÿPKPK juju/managedStoredResources.bsonÔ–_räD ÆMŠpŠ÷IÝ-u_K¸¤n ¶j+Y’ÔM^vB3/?ÌøôYý³>Ùw?lÛÍþam?oÛöûÇÓ¿6vè@ÆÁ® ë8o¸Ýøýóé”Á™”i§'ü{÷“~þã¿I<úÓÃéqzªhÆÆ#0)Ô¼‡G/&¸¤37Tœˆ"«öcÉX0©æ‘‰ðX-¯öÕÙ mÀÄ¥˜{>»zi“+áöÓç/÷·þ|ÿ°Ýl/ÛçSžüäÛݹxØ~É¿)™÷¼pSȘ´äzZòþhÃhT‰c´ôzZzA+f#lc³ÐÈ_œT¸u h<¶yR—®â Dæ'•®eöY „–²4u¸á2ÚÕ´b—ZˆZáC´®¦u–xg´v%[CÕߤ•w{ÉkY^®ÀÓzMâ+ZÂÐ ri ¼Åäð)Æš^òB8ÜÄfÒ«CX‘^¥c÷^R¾ú¨ u÷^òh•t¬ƒ7g®Gi]”Üv¬&£Å×Óâ ZÚi]Í4;EŠËÔj>$ñˆVb¯!«®ZŒ"“l·˜F½®ÐÎËsdYB"©N\±‹ç0»š–í¤¬ ëíÞÊB_ò¸Qp>Ýv˜ÖkN¬6ú(»ž'…Yv›Ï–í²òÐ I?&5¦Öæ2#;­¦î*5 ˜Due("a‰°Z;ñ¢dÝCº´:Îsë·hÝ}{©wŸýö¹Ý>">|ù»ÿ!øÉ|Î\&Kd÷õºr[-*0 ÏvšŠE\m¦SŠXÐÀ™ô>¢Y¾kë÷XÓÓ¥5Š!f²Ž»ôb±/ÍY7õÝ<>Ó^“øÎ¿..J¦½x'\s£U®§UÞ­¶GŸ†â1ZÇ߯I¼7Z}_ܦu{ûËõ"o\Ok¼?Z¶ >ôŒù+ÿÿPKÅrUÇÈDPKjuju/migrations.bson\¿J1Æ?ƒp­(baát "xê©Ob+ÙÍ(ÙM˜dÏÓÊ—±ð| ß¹c‘åÊä÷ý›Ó`ö(ßïçŸ?ó³_ÿõ±Ë+n‡Ê3ÃîǸ¯R_IB¡À}ÒÎGyóUR7Ç‘)_˜62áB­òH¯°oÔž|'à^T*o‘ñ!PrNj[¨°Z.Ü{# ¼ä˜rgp·ÿ–É÷:Ö,w›¢'YQÖÔ ñÙ[ã î'£++_,Ô¦.ÛˆFâúÚ&¦¦˜à/ÿÿPK‘pò¸Ô'PKjuju/storedResources.bson¼–Qn$E †{à ñ¼G@â Ùe—]uÄ!Få*› ¡,ì&ˆîË1pD’]%C?ÌtOOMùÿüÿîùóÍq\œ~ÞÇ<Žc4Xcš÷ðèdŠ[»TrÉ Qus'[džU9¯LUÆny·ï.fhî‰yæ«O§¶„+ï~ñ«Ÿ®/os×Ûãâ×™WßäÙ ØJÄ¥CXaR*=Ö, ˆu l¹üƒÇzsu}|yÜýÀÇËI/çÇËÿGÁÛë?®Êo7~ãÇ÷·ûÃñu¾5qèÀUB|6”VCL×€<OÙ¼Fõü·ÏÂW¶aT×¶Á)j, _j2³óN‡›ÚJ)<æ#í¬»wÊÙ ðéNyµ)ýåàÍEøþOáWe‚ÒÆLøÄQ:Jk ksG{ÿÍ'àŸ]Áçàgþß²¡ÙsçÓ^Ög6ùü/ž…?û¨“wê´™.PòŒ›ÍZuf¬ˆ±[ÅŒN˜¥“|¥ûÎ{@0£*hF7%Hm ëÚ&ƒ1ÒE¬ÖÒ¨™Õ”7w~±"EÓ¿cç‡'ð[`ÎäeeŒ)9vFMçg”¤Î”rŒÀ?»‚×Ì|§`™uIülÐtOoÀЧäÀ ÁQÃÓk"©O[uU¬¤ù(„.uCï#šå£Ž·ºÇ^ž¡å CÌrt?;ÿ…?Ó)äHÅö^…Ñ­t]‘ÎWSÊQž3áðÏ®àUðý”åç_‰6ž‡ÿwÿÿPKÑ•WâúîPKjuju/txns.bsonìZ;Õ>w×k]Ý{ÍòÊ,CbÚœw!È™‘HQ—w…Ùµvv,~D‚  ##ÃÄ+'ªgº™™m¦g¦ýÜÁîÌéé®®ª¯¾¯NÍüü/ÆN¿°—Øóß¾zá`ÿÍÞ`lkkŸaŒ%§¦ÂÙ‰ö}n›³×éßVdgéìèèà0§Kyt0>ŒyĶCZöÆú"J²`È®äâT‘ÀY©r(¢I;Å}Ÿx”šÞë“¡³.9‚žG‘PЫfe¢ÕR°-dÛõÃUl{ý·~žúñoé«x´ËÈ$í¢rzG»·ÆöΕ¼™¬_˜˜glç0—x0Þ?bÍB}œ8œ¤f‡³?šƒ±Sõ©ó }²I´w\I¡ Nôu6Iô7ì¶'z'æo :Û³'ëçGùˆé'¥W±ðJéÊñ*­@U®D¬ WRP^1£­6lmŒ^[£SMŒèÑÁœÃSÓbðÓbx˜Î¾„ûx9§çæCõ½|å à•'ŒÍÜql±оðú˜ôw¬ôRWæýkã1}x Êñ(6/'Yê‡ÍãÞª*b8Ý[ƒ796úŒa‚ÓǦ98wû ‰pzÝF¨«á««ao?²ûkÏqÂ_ÞnV:Á~¦ ´-Az eó`‡•Áëƒî4°/Rßÿ´ê˳ñ —ú–h¤0äsˆJzú+¢TÖ8(ÜPñÈ Lñ´ˆ Sâ ˆdI"8TÑEEÎ *• 7ÂcÉ<MU§À*o:ÔwxÓÇ«ïà¶[õ}|ƒêû¿&ÑÑá¨^ê;¸ÇýÔ7Ek©æDeU–•¶àH‡}®²óÚ$ãmŠ0£¥Ô÷ÿMŒ@+)²›'$\™p}BÂ9B<¯¬·úÞ×䀸Í(Åe/õÜŸcÔwp»¨ïNh”!yļq° ¾*Øë+×{}‹; ì7«/P|åk{½m5}¶Ñ襾`¹IÜË & Á³*²»çÁ"õ™vY>‘üÓ œ'\vŠÂ¥³×–kÏ­S%+z—u™Zl­îPßáM¯¾ƒÛnÕ÷Ù ¨o›èïÛ6+‡„­^ê;¸Ç=ÕWZK É+ã‘ÔWéR9aK ·1)­31êTß6F?´Ì¯972ʵ ‰ÊrÊ †ê˜J=/MH‹®ìIH‹n1CHƒç•õ&¤›(£#hÝo‡ÎKÔ‰ÒèT†ˆ:dä –66¸ÍF£ ² òö@%ét*èlÊ´³ 䆥Õ\¹XÚótÒð¦'¤Ám·„d6HH¿´Ã¸T@#õ"¤Á=îGHÜ Ô1Š*¦Wš_9ÇMEuÀM÷™-EH¿¶êŒ9¦õ;¤9N°+’]Ÿì! žW¶4!=M9xèÒoï¾Óä $Q’׋JÔ\Kï¼ÒÂ)¨/,!åæHZåãŠc‘Àú>òËJc„Œ)X¯E!ºÕ i5zä3&ú¨R¨R‚VÝó‰¡MwÍ'¶ÝÒ3 ¤6Ñï5‰.Ú¢ŒÖõ"¤Á=îGH¦•C •÷h+m½¤‰z%PV"_Ý@ÍĨ“Ú½ßÆíןOPEO9ÁY,ufYBZteOBZt‹¹-ÛÀye½ éƒvhŠÇhú L‰ƒ­Š Ѭ3\s‡Ö$S4·ÂË’‰7# C¤XHAynx¶2qç|1AÈ¢ä\RÌÔñ颂CH[¶áMOHƒÛn é'6=6AH6‰ÎbáÆ÷"¤Á=îGHHz¯²PUH)VZäP9òŠ:$ J4´›˜‰Ñ!½½ FµÛZÈe˜ÄèÅi1Äeé"ûGN¸xõ0W×L5—²÷òßþ;k¾Â}ºÉj…ÎP×à`KVÛl}Ü~à9îr¯Ñêàß]Ý_lžhV:uú“v)¦ët\z’±èÊ5u:ÎO2†ORo°zc’… ï½?ØO6+`ÿ¬mJ]tÒ£Ø<ØÕÊ`Wëƒ]Ý `ÿ¼Ý% Ë1¤{?YYì§š•N°Ñ6<ÖÄàÂú?Y™ÃÛÒ#¡EW® öù‘ÐðIê ö/›pi”óYÝ{°ŸnV:ÁþU;#æÒyj7v¿2Øýú`÷wØÿ ÿÿPK¸~L}i-PKjuju/txns.log.bsonÌ–=‹&ELJÝ{Á—CÍõΪ~©ê641?ÑTº»ªŽƒ»[ÙuÅÀ@0ñ[›)*(Š¢hæ'PPÍ-yx<‘}™YŽ™`gç™þñ«U÷GÓ4]}ýŽL¯ýüîs÷?|êƒiÚÛ?zóàPå–=šÞ÷¯.ÉtÏo{05¿ÕLÕÐ ƒµ®ÅÔJìŒÂ…BÔŽ "³¤¡ W’?ufª’ým‘B½c¯0Pú:JÓ˜¥€¾Þáô„/t¦½éßë$ÜË+ÃÝ€{|î•ý{í~»­òÊÿ¨_ÞP?¿¡~Öo·ïôv÷…L B'#mS5øçZâèÚÊ]:ÛÑ#»rÄ =:Ñ0û }ÄPý/Ž)6È4j蘭úMš°f`—GÑe€µ%CÆÚL¡[ªÚ"S¬yîc+Ã='ö瘝m£'WæèœØ_ß‘#„ŽØ}úŸè—Ž˜ Ô œ4Z°A¦ƒ;5on«vîÃ¥Úzä’¸`Ñ.iM©•hýI¢ÍJ”àþº2ÜSb¿Åýí‚%õìlJš=lžG]RÒßg8j¥†–¤`ïÍCÌQy´Ôµ²Ká–i*¤9µØƒyo ûô-ÁF%‰µB¢>軫 œ4PÄÂjä[ÀÜ?V†{NIÿÜUIi†£—|ÑgnýõÉÇ3ÙHB-5&,= OëÝAGö$‹¿ƒÍû/horizon The charms configure the 'admin' user with a password of 'openstack' by default. The OpenStack cloud deployed is completely clean; the charms don't attempt to configure networking or upload images. Read the OpenStack User Guide on how to configure your cloud for use: http://docs.openstack.org/user-guide/content/ Niggles ------- The neutron-gateway service requires a service unit with two network interfaces to provide full functionality; this part of OpenStack provides L3 routing between tenant networks and the rest of the world. Its possible todo this when testing on OpenStack by adding a second network interface to the neutron-gateway service: nova interface-attach --net-id juju set neutron-gateway ext-port=eth1 Note that you will need to be running this bundle on an OpenStack cloud that supports MAC address learning of some description; this includes using OpenStack Havana with the Neutron Open vSwitch plugin. For actual OpenStack deployments, this service would reside of a physical server with network ports attached to both the internal network (for communication with nova-compute service units) and the external network (for inbound/outbound network access to/from instances within the cloud). charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/bad/0000775000175000017500000000000012672604603031776 5ustar marcomarco././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/bad/bundle.yamlcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/bad/bun0000664000175000017500000000037212672604603032507 0ustar marcomarco# This bundle has a bad relation, which will cause it to fail # its verification. services: wordpress: charm: wordpress num_units: 1 mysql: charm: mysql num_units: 1 relations: - ["foo:db", "mysql:server"] ././@LongLink0000644000000000000000000000015300000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/bad/README.mdcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/bad/REA0000664000175000017500000000001712672604603032326 0ustar marcomarcoA dummy bundle charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/0000775000175000017500000000000012672604603031444 5ustar marcomarco././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/terms1/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/terms10000775000175000017500000000000012672604603032600 5ustar marcomarco././@LongLink0000644000000000000000000000016300000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/terms1/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/terms10000664000175000017500000000025312672604603032602 0ustar marcomarconame: terms1 summary: "Sample charm with terms and conditions" description: | That's a boring charm that requires certain terms. terms: ["terms-3/1", "terms-4/5"] ././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/varnish/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/varnis0000775000175000017500000000000012672604603032667 5ustar marcomarco././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/varnish/revisioncharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/varnis0000664000175000017500000000000112672604603032660 0ustar marcomarco1././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/varnish/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/varnis0000664000175000017500000000015712672604603032674 0ustar marcomarconame: varnish summary: "Database engine" description: "Another popular database" provides: webcache: varnish ././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/category/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/catego0000775000175000017500000000000012672604603032627 5ustar marcomarco././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/category/.ignoredcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/catego0000664000175000017500000000000112672604603032620 0ustar marcomarco#././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/category/.dir/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/catego0000775000175000017500000000000012672604603032627 5ustar marcomarco././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/category/.dir/ignoredcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/catego0000664000175000017500000000000012672604603032617 0ustar marcomarco././@LongLink0000644000000000000000000000016500000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/category/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/catego0000664000175000017500000000026212672604603032631 0ustar marcomarconame: categories summary: "Sample charm with a category" description: | That's a boring charm that has a category. categories: ["database"] tags: ["openstack", "storage"]././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/logging/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/loggin0000775000175000017500000000000012672604603032644 5ustar marcomarco././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/logging/hooks/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/loggin0000775000175000017500000000000012672604603032644 5ustar marcomarco././@LongLink0000644000000000000000000000016500000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/logging/hooks/.gitkeepcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/loggin0000664000175000017500000000000012672604603032634 0ustar marcomarco././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/logging/revisioncharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/loggin0000664000175000017500000000000212672604603032636 0ustar marcomarco1 ././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/logging/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/loggin0000664000175000017500000000056212672604603032651 0ustar marcomarconame: logging summary: "Subordinate logging test charm" description: | This is a longer description which potentially contains multiple lines. subordinate: true provides: logging-client: interface: logging requires: logging-directory: interface: logging scope: container info: interface: juju-info scope: container ././@LongLink0000644000000000000000000000015100000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpress/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpr0000775000175000017500000000000012672604603032702 5ustar marcomarco././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpress/hooks/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpr0000775000175000017500000000000012672604603032702 5ustar marcomarco././@LongLink0000644000000000000000000000016700000000000011607 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpress/hooks/.gitkeepcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpr0000664000175000017500000000000012672604603032672 0ustar marcomarco././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpress/config.yamlcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpr0000664000175000017500000000015712672604603032707 0ustar marcomarcooptions: blog-title: {default: My Title, description: A descriptive title used for the blog., type: string} ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpress/revisioncharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpr0000664000175000017500000000000112672604603032673 0ustar marcomarco3././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpress/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpr0000664000175000017500000000063212672604603032705 0ustar marcomarconame: wordpress summary: "Blog engine" description: "A pretty popular blog engine" provides: url: interface: http limit: optional: false logging-dir: interface: logging scope: container monitoring-port: interface: monitoring scope: container requires: db: interface: mysql limit: 1 optional: false cache: interface: varnish limit: 2 optional: true ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpress/actions/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpr0000775000175000017500000000000012672604603032702 5ustar marcomarco././@LongLink0000644000000000000000000000017100000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpress/actions/.gitkeepcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpr0000664000175000017500000000000012672604603032672 0ustar marcomarco././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series-unknown/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-0000775000175000017500000000000012672604603032574 5ustar marcomarco././@LongLink0000644000000000000000000000017200000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series-unknown/hooks/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-0000775000175000017500000000000012672604603032574 5ustar marcomarco././@LongLink0000644000000000000000000000020200000000000011575 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series-unknown/hooks/.gitkeepcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-0000664000175000017500000000000012672604603032564 0ustar marcomarco././@LongLink0000644000000000000000000000020100000000000011574 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series-unknown/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-0000664000175000017500000000076112672604603032602 0ustar marcomarconame: multi-series summary: multi-series test charm description: Test charm that supports a number of series series: - trusty - utopic - vivid - wily - nosuchseries provides: url: interface: http limit: optional: false logging-dir: interface: logging scope: container monitoring-port: interface: monitoring scope: container requires: db: interface: mysql limit: 1 optional: false cache: interface: varnish limit: 2 optional: true ././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsay/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsa0000775000175000017500000000000012672604603032662 5ustar marcomarco././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsay/hooks/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsa0000775000175000017500000000000012672604603032662 5ustar marcomarco././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsay/hooks/hookscharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsa0000775000175000017500000000003312672604603032663 0ustar marcomarco#!/bin/bash # Do nothing! ././@LongLink0000644000000000000000000000017300000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsay/hooks/config-changedcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsa0000777000175000017500000000000012672604603033725 2hooksustar marcomarco././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsay/hooks/installcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsa0000775000175000017500000000045712672604603032675 0ustar marcomarco#!/bin/bash RES_NAME="for-install" RES_PATH=$(2>&1 resource-get $RES_NAME) if [ $? -ne 0 ]; then RES_GET_STDERR=$RES_PATH status-set blocked "[resource "'"'"$RES_NAME"'"'"] $RES_GET_STDERR" exit 0 fi set -e status-set maintenance "path: $RES_PATH" status-set maintenance $(cat $RES_PATH) ././@LongLink0000644000000000000000000000017200000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsay/hooks/update-statuscharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsa0000775000175000017500000000045612672604603032674 0ustar marcomarco#!/bin/bash RES_NAME="for-upload" RES_PATH=$(2>&1 resource-get $RES_NAME) if [ $? -ne 0 ]; then RES_GET_STDERR=$RES_PATH status-set blocked "[resource "'"'"$RES_NAME"'"'"] $RES_GET_STDERR" exit 0 fi set -e status-set maintenance "path: $RES_PATH" status-set maintenance $(cat $RES_PATH) ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsay/config.xmlcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsa0000664000175000017500000000001412672604603032657 0ustar marcomarcodata ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsay/dummy.txtcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsa0000664000175000017500000000001312672604603032656 0ustar marcomarcodummy data ././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsay/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsa0000664000175000017500000000075512672604603032673 0ustar marcomarconame: starsay summary: A dumb little test charm for resources. maintainer: Nate Finch description: Doesn't do anything at all. tags: - application resources: for-store: type: file filename: dummy.tgz description: One line that is useful when operators need to push it. for-install: type: file filename: initial.tgz description: get things started for-upload: type: file filename: config.xml description: Who uses xml anymore? ././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsay/initial.txtcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsa0000664000175000017500000000001512672604603032660 0ustar marcomarcoinitial data ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsay/dummy.tgzcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsa0000664000175000017500000000020712672604603032663 0ustar marcomarco‹TÒÕVíÑ1 ƒ@…á©sŠ=A˜qÝõ< ›R]‰Þ^E©Rˆþ¯yżâÁä©m—{™‹œG71Ö{Zô=æÅªƒ¦¡µJ½ŠÓ7½LcIƒsò»þù¡÷íþ§òþ—SI·«§~°HÆ{¥(././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsay/initial.tgzcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/starsa0000664000175000017500000000021012672604603032655 0ustar marcomarco‹aÒÕVíÑ1 A …áÔžbN ÉŒ“=Ï€²‚Ñã«l³•n³ˆðÍ+òŠécÞÎûx„lE_Üï´¡ê2gVŲ{-Õt(¢–KvIºÙ¢…Ûíš’œ¦ñrÿÐûvÿS}þ:¶h»_¬ö‰¤É(charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/riak/0000775000175000017500000000000012672604603032372 5ustar marcomarco././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/riak/revisioncharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/riak/r0000664000175000017500000000000112672604603032545 0ustar marcomarco7././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/riak/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/riak/m0000664000175000017500000000031712672604603032552 0ustar marcomarconame: riak summary: "K/V storage engine" description: "Scalable K/V Store in Erlang with Clocks :-)" provides: endpoint: interface: http admin: interface: http peers: ring: interface: riak charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/mysql/0000775000175000017500000000000012672604603032611 5ustar marcomarco././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/mysql/revisioncharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/mysql/0000664000175000017500000000000112672604603032602 0ustar marcomarco1././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/mysql/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/mysql/0000664000175000017500000000015212672604603032611 0ustar marcomarconame: mysql summary: "Database engine" description: "A pretty popular database" provides: server: mysql ././@LongLink0000644000000000000000000000017400000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series-bad-combination/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-0000775000175000017500000000000012672604603032574 5ustar marcomarco././@LongLink0000644000000000000000000000020200000000000011575 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series-bad-combination/hooks/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-0000775000175000017500000000000012672604603032574 5ustar marcomarco././@LongLink0000644000000000000000000000021200000000000011576 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series-bad-combination/hooks/.gitkeepcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-0000664000175000017500000000000012672604603032564 0ustar marcomarco././@LongLink0000644000000000000000000000021100000000000011575 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series-bad-combination/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-0000664000175000017500000000075212672604603032602 0ustar marcomarconame: multi-series summary: multi-series test charm description: Test charm that supports a number of series series: - trusty - utopic - vivid - wily - win10 provides: url: interface: http limit: optional: false logging-dir: interface: logging scope: container monitoring-port: interface: monitoring scope: container requires: db: interface: mysql limit: 1 optional: false cache: interface: varnish limit: 2 optional: true charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/0000775000175000017500000000000012672604603032577 5ustar marcomarco././@LongLink0000644000000000000000000000015300000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/hooks/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/0000775000175000017500000000000012672604603032577 5ustar marcomarco././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/hooks/installcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/0000775000175000017500000000003112672604603032576 0ustar marcomarco#!/bin/bash echo "Done!" ././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/.ignoredcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/0000664000175000017500000000000112672604603032570 0ustar marcomarco#././@LongLink0000644000000000000000000000015300000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/empty/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/0000775000175000017500000000000012672604603032577 5ustar marcomarco././@LongLink0000644000000000000000000000016300000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/empty/.gitkeepcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/0000664000175000017500000000000012672604603032567 0ustar marcomarco././@LongLink0000644000000000000000000000015100000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/src/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/0000775000175000017500000000000012672604603032577 5ustar marcomarco././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/src/hello.ccharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/0000664000175000017500000000011412672604603032575 0ustar marcomarco#include main() { printf ("Hello World!\n"); return 0; } ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/config.yamlcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/0000664000175000017500000000054312672604603032603 0ustar marcomarcooptions: title: {default: My Title, description: A descriptive title used for the service., type: string} outlook: {description: No default outlook., type: string} username: {default: admin001, description: The name of the initial account (given admin permissions)., type: string} skill-level: {description: A number indicating skill., type: int} ././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/.dir/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/0000775000175000017500000000000012672604603032577 5ustar marcomarco././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/.dir/ignoredcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/0000664000175000017500000000000012672604603032567 0ustar marcomarco././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/revisioncharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/0000664000175000017500000000000112672604603032570 0ustar marcomarco1././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/0000664000175000017500000000021412672604603032576 0ustar marcomarconame: dummy summary: "That's a dummy charm." description: | This is a longer description which potentially contains multiple lines. ././@LongLink0000644000000000000000000000015300000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/build/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/0000775000175000017500000000000012672604603032577 5ustar marcomarco././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/build/ignoredcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/0000664000175000017500000000000012672604603032567 0ustar marcomarco././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/actions.yamlcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/0000664000175000017500000000025012672604603032576 0ustar marcomarcosnapshot: description: Take a snapshot of the database. params: outfile: description: The file to write out to. type: string default: foo.bz2 ././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-0000775000175000017500000000000012672604603032574 5ustar marcomarco././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series/hooks/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-0000775000175000017500000000000012672604603032574 5ustar marcomarco././@LongLink0000644000000000000000000000017200000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series/hooks/.gitkeepcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-0000664000175000017500000000000012672604603032564 0ustar marcomarco././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series/revisioncharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-0000664000175000017500000000000212672604603032566 0ustar marcomarco1 ././@LongLink0000644000000000000000000000017100000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-0000664000175000017500000000074112672604603032600 0ustar marcomarconame: multi-series summary: multi-series test charm description: Test charm that supports a number of series series: - trusty - utopic - vivid - wily provides: url: interface: http limit: optional: false logging-dir: interface: logging scope: container monitoring-port: interface: monitoring scope: container requires: db: interface: mysql limit: 1 optional: false cache: interface: varnish limit: 2 optional: true ././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series/actions/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-0000775000175000017500000000000012672604603032574 5ustar marcomarco././@LongLink0000644000000000000000000000017400000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series/actions/.gitkeepcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-0000664000175000017500000000000012672604603032564 0ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/terms/0000775000175000017500000000000012672604603032576 5ustar marcomarco././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/terms/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/terms/0000664000175000017500000000031112672604603032573 0ustar marcomarconame: terms summary: "Sample charm with terms and conditions" description: | That's a boring charm that requires certain terms. tags: ["openstack", "storage"] terms: ["terms-1/1", "terms-2/5"] ././@LongLink0000644000000000000000000000015100000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000775000175000017500000000000012672604603032541 5ustar marcomarco././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000775000175000017500000000000012672604603032541 5ustar marcomarco././@LongLink0000644000000000000000000000017000000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/otherdatacharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000664000175000017500000000001212672604603032534 0ustar marcomarcosome text ././@LongLink0000644000000000000000000000017600000000000011607 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/collect-metricscharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000664000175000017500000000002212672604603032535 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/startcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000664000175000017500000000002212672604603032535 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000017400000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/upgrade-charmcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000664000175000017500000000002212672604603032535 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000017500000000000011606 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/config-changedcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000664000175000017500000000002212672604603032535 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000020300000000000011576 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/meter-status-changedcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000664000175000017500000000002212672604603032535 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/installcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000664000175000017500000000002212672604603032535 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000020200000000000011575 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-brokencharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000664000175000017500000000002212672604603032535 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000020400000000000011577 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-departedcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000664000175000017500000000002212672604603032535 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000020300000000000011576 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-joinedcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000664000175000017500000000002212672604603032535 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000020300000000000011576 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-changedcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000664000175000017500000000002212672604603032535 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000020400000000000011577 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-changedcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000664000175000017500000000002212672604603032535 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000020400000000000011577 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-departedcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000664000175000017500000000002212672604603032535 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000020300000000000011576 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-changedcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000664000175000017500000000002212672604603032535 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000020200000000000011575 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-brokencharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000664000175000017500000000002212672604603032535 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000016300000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/stopcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000664000175000017500000000002212672604603032535 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000020200000000000011575 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-joinedcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000664000175000017500000000002212672604603032535 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000020300000000000011576 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-brokencharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000664000175000017500000000002212672604603032535 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/subdir/charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000775000175000017500000000000012672604603032541 5ustar marcomarco././@LongLink0000644000000000000000000000017300000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/subdir/stuffcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000664000175000017500000000002712672604603032542 0ustar marcomarconon hook related stuff ././@LongLink0000644000000000000000000000020500000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-departedcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000664000175000017500000000002212672604603032535 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000020200000000000011575 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-joinedcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000664000175000017500000000002212672604603032535 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/revisioncharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000664000175000017500000000000212672604603032533 0ustar marcomarco1 ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-ho0000664000175000017500000000036512672604603032547 0ustar marcomarconame: all-hooks summary: "That's a dummy charm with hook scrips for all types of hooks." description: "This is a longer description." provides: foo: interface: phony requires: bar: interface: fake peers: self: interface: dummy charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/entities.go0000664000175000017500000001065612672604603030125 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package storetesting // import "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" import ( jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/mgo.v2" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" ) // EntityBuilder provides a convenient way to describe a mongodoc.Entity // for tests that is correctly formed and contains the desired // information. type EntityBuilder struct { entity *mongodoc.Entity } // NewEntity creates a new EntityBuilder for the provided URL. func NewEntity(url string) EntityBuilder { URL := charm.MustParseURL(url) return EntityBuilder{ entity: &mongodoc.Entity{ URL: URL, Name: URL.Name, Series: URL.Series, Revision: URL.Revision, User: URL.User, BaseURL: mongodoc.BaseURL(URL), PromulgatedRevision: -1, }, } } func copyURL(id *charm.URL) *charm.URL { if id == nil { return nil } id1 := *id return &id1 } func (b EntityBuilder) copy() EntityBuilder { e := *b.entity e.PromulgatedURL = copyURL(e.PromulgatedURL) e.URL = copyURL(e.URL) e.BaseURL = copyURL(e.BaseURL) return EntityBuilder{&e} } // WithPromulgatedURL sets the PromulgatedURL and PromulgatedRevision of the // entity being built. func (b EntityBuilder) WithPromulgatedURL(url string) EntityBuilder { b = b.copy() if url == "" { b.entity.PromulgatedURL = nil b.entity.PromulgatedRevision = -1 } else { b.entity.PromulgatedURL = charm.MustParseURL(url) b.entity.PromulgatedRevision = b.entity.PromulgatedURL.Revision } return b } // Build creates a mongodoc.Entity from the EntityBuilder. func (b EntityBuilder) Build() *mongodoc.Entity { return b.copy().entity } // AssertEntity checks that db contains an entity that matches expect. func AssertEntity(c *gc.C, db *mgo.Collection, expect *mongodoc.Entity) { var entity mongodoc.Entity err := db.FindId(expect.URL).One(&entity) c.Assert(err, gc.IsNil) c.Assert(&entity, jc.DeepEquals, expect) } // BaseEntityBuilder provides a convenient way to describe a // mongodoc.BaseEntity for tests that is correctly formed and contains the // desired information. type BaseEntityBuilder struct { baseEntity *mongodoc.BaseEntity } // NewBaseEntity creates a new BaseEntityBuilder for the provided URL. func NewBaseEntity(url string) BaseEntityBuilder { URL := charm.MustParseURL(url) return BaseEntityBuilder{ baseEntity: &mongodoc.BaseEntity{ URL: URL, Name: URL.Name, User: URL.User, }, } } func (b BaseEntityBuilder) copy() BaseEntityBuilder { e := *b.baseEntity e.URL = copyURL(e.URL) return BaseEntityBuilder{&e} } // WithPromulgated sets the promulgated flag on the BaseEntity. func (b BaseEntityBuilder) WithPromulgated(promulgated bool) BaseEntityBuilder { b = b.copy() b.baseEntity.Promulgated = mongodoc.IntBool(promulgated) return b } // WithACLs sets the ACLs field on the BaseEntity. func (b BaseEntityBuilder) WithACLs(channel params.Channel, acls mongodoc.ACL) BaseEntityBuilder { b = b.copy() if b.baseEntity.ChannelACLs == nil { b.baseEntity.ChannelACLs = make(map[params.Channel]mongodoc.ACL) } b.baseEntity.ChannelACLs[channel] = acls return b } // Build creates a mongodoc.BaseEntity from the BaseEntityBuilder. func (b BaseEntityBuilder) Build() *mongodoc.BaseEntity { return b.copy().baseEntity } // AssertBaseEntity checks that db contains a base entity that matches expect. func AssertBaseEntity(c *gc.C, db *mgo.Collection, expect *mongodoc.BaseEntity) { var baseEntity mongodoc.BaseEntity err := db.FindId(expect.URL).One(&baseEntity) c.Assert(err, gc.IsNil) c.Assert(NormalizeBaseEntity(&baseEntity), jc.DeepEquals, NormalizeBaseEntity(expect)) } // NormalizeBaseEntity modifies a base entity so that it can be compared // with another normalized base entity using jc.DeepEquals. func NormalizeBaseEntity(be *mongodoc.BaseEntity) *mongodoc.BaseEntity { be1 := *be for c, acls := range be1.ChannelACLs { if len(acls.Read) == 0 && len(acls.Write) == 0 { delete(be1.ChannelACLs, c) } } if len(be1.ChannelACLs) == 0 { be1.ChannelACLs = nil } for c, entities := range be1.ChannelEntities { if len(entities) == 0 { delete(be1.ChannelEntities, c) } } if len(be1.ChannelEntities) == 0 { be1.ChannelEntities = nil } return &be1 } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/elasticsearch.go0000664000175000017500000000412112672604603031101 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package storetesting // import "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" import ( "os" "time" "github.com/juju/utils" gc "gopkg.in/check.v1" "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" ) // ElasticSearchSuite defines a test suite that connects to an // elastic-search server. The address of the server depends on the value // of the JUJU_TEST_ELASTICSEARCH environment variable, which can be // "none" (do not start or connect to a server) or host:port holding the // address and port of the server to connect to. If // JUJU_TEST_ELASTICSEARCH is not specified then localhost:9200 will be // used. type ElasticSearchSuite struct { ES *elasticsearch.Database indexes []string TestIndex string } var jujuTestElasticSearch = os.Getenv("JUJU_TEST_ELASTICSEARCH") func (s *ElasticSearchSuite) SetUpSuite(c *gc.C) { serverAddr := jujuTestElasticSearch switch serverAddr { case "none": c.Skip("elasticsearch disabled") case "": serverAddr = ":9200" } s.ES = &elasticsearch.Database{serverAddr} } func (s *ElasticSearchSuite) TearDownSuite(c *gc.C) { } func (s *ElasticSearchSuite) SetUpTest(c *gc.C) { s.TestIndex = s.NewIndex(c) } func (s *ElasticSearchSuite) TearDownTest(c *gc.C) { for _, index := range s.indexes { s.ES.DeleteIndex(index + "*") s.ES.DeleteDocument(".versions", "version", index) } s.indexes = nil } // NewIndex creates a new index name and ensures that it will be cleaned up at // end of the test. func (s *ElasticSearchSuite) NewIndex(c *gc.C) string { uuid, err := utils.NewUUID() c.Assert(err, gc.IsNil) id := time.Now().Format("20060102") + uuid.String() s.indexes = append(s.indexes, id) return id } // LoadESConfig loads a canned test configuration to the specified index func (s *ElasticSearchSuite) LoadESConfig(index string, settings, mapping interface{}) error { if err := s.ES.PutIndex(index, settings); err != nil { return err } if err := s.ES.PutMapping(index, "entity", mapping); err != nil { return err } return nil } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/0000775000175000017500000000000012672604603023541 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/content_test.go0000664000175000017500000003443412672604603026611 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v4_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" import ( "bytes" "fmt" "io" "io/ioutil" "net/http" "path/filepath" "sort" jc "github.com/juju/testing/checkers" "github.com/juju/testing/httptesting" "github.com/juju/xml" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" "gopkg.in/juju/charmstore.v5-unstable/internal/v4" ) var serveDiagramErrorsTests = []struct { about string url string expectStatus int expectBody interface{} }{{ about: "entity not found", url: "~charmers/bundle/foo-23/diagram.svg", expectStatus: http.StatusNotFound, expectBody: params.Error{ Code: params.ErrNotFound, Message: `no matching charm or bundle for cs:~charmers/bundle/foo-23`, }, }, { about: "diagram for a charm", url: "~charmers/wordpress/diagram.svg", expectStatus: http.StatusNotFound, expectBody: params.Error{ Code: params.ErrNotFound, Message: "diagrams not supported for charms", }, }} func (s *APISuite) TestServeDiagramErrors(c *gc.C) { id := newResolvedURL("cs:~charmers/trusty/wordpress-42", 42) s.addPublicCharmFromRepo(c, "wordpress", id) id = newResolvedURL("cs:~charmers/bundle/nopositionbundle-42", 42) s.addPublicBundleFromRepo(c, "wordpress-simple", id, true) for i, test := range serveDiagramErrorsTests { c.Logf("test %d: %s", i, test.about) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(test.url), ExpectStatus: test.expectStatus, ExpectBody: test.expectBody, }) } } func (s *APISuite) TestServeDiagram(c *gc.C) { bundle := storetesting.NewBundle(&charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "wordpress": { Charm: "wordpress", Annotations: map[string]string{ "gui-x": "100", "gui-y": "200", }, }, "mysql": { Charm: "utopic/mysql-23", Annotations: map[string]string{ "gui-x": "200", "gui-y": "200", }, }, }, }, ) url := newResolvedURL("cs:~charmers/bundle/wordpressbundle-42", 42) s.addRequiredCharms(c, bundle) err := s.store.AddBundleWithArchive(url, bundle) c.Assert(err, gc.IsNil) s.setPublic(c, url) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("bundle/wordpressbundle/diagram.svg"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %q", rec.Body.Bytes())) c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") assertCacheControl(c, rec.Header(), true) // Check that the output contains valid XML with an SVG tag, // but don't check the details of the output so that this test doesn't // break every time the jujusvg presentation changes. // Also check that we get an image for each service containing the charm // icon link. assertXMLContains(c, rec.Body.Bytes(), map[string]func(xml.Token) bool{ "svg element": isStartElementWithName("svg"), "wordpress icon": isStartElementWithAttr("image", "href", "../../wordpress/icon.svg"), "mysql icon": isStartElementWithAttr("image", "href", "../../utopic/mysql-23/icon.svg"), }) // Do the same check again, but with the short form of the id; // the relative links should change accordingly. rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("wordpressbundle/diagram.svg"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %q", rec.Body.Bytes())) // Check that the output contains valid XML with an SVG tag, // but don't check the details of the output so that this test doesn't // break every time the jujusvg presentation changes. // Also check that we get an image for each service containing the charm // icon link. assertXMLContains(c, rec.Body.Bytes(), map[string]func(xml.Token) bool{ "svg element": isStartElementWithName("svg"), "wordpress icon": isStartElementWithAttr("image", "href", "../wordpress/icon.svg"), "mysql icon": isStartElementWithAttr("image", "href", "../utopic/mysql-23/icon.svg"), }) } func (s *APISuite) TestServeDiagramNoPosition(c *gc.C) { bundle := storetesting.NewBundle( &charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "wordpress": { Charm: "wordpress", }, "mysql": { Charm: "utopic/mysql-23", Annotations: map[string]string{ "gui-x": "200", "gui-y": "200", }, }, }, }) url := newResolvedURL("cs:~charmers/bundle/wordpressbundle-42", 42) s.addRequiredCharms(c, bundle) err := s.store.AddBundleWithArchive(url, bundle) c.Assert(err, gc.IsNil) s.setPublic(c, url) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("bundle/wordpressbundle/diagram.svg"), }) // Check that the request succeeds and has the expected content type. c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %q", rec.Body.Bytes())) c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") } var serveReadMeTests = []struct { name string expectNotFound bool }{{ name: "README.md", }, { name: "README.rst", }, { name: "readme", }, { name: "README", }, { name: "ReadMe.Txt", }, { name: "README.ex", }, { name: "", expectNotFound: true, }, { name: "readme-youtube-subscribe.html", expectNotFound: true, }, { name: "readme Dutch.txt", expectNotFound: true, }, { name: "readme Dutch.txt", expectNotFound: true, }, { name: "README.debugging", expectNotFound: true, }} func (s *APISuite) TestServeReadMe(c *gc.C) { url := newResolvedURL("cs:~charmers/precise/wordpress-0", -1) for i, test := range serveReadMeTests { c.Logf("test %d: %s", i, test.name) wordpress := storetesting.Charms.ClonedDir(c.MkDir(), "wordpress") content := fmt.Sprintf("some content %d", i) if test.name != "" { err := ioutil.WriteFile(filepath.Join(wordpress.Path, test.name), []byte(content), 0666) c.Assert(err, gc.IsNil) } url.URL.Revision = i s.addPublicCharm(c, wordpress, url) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(url.URL.Path() + "/readme"), }) if test.expectNotFound { c.Assert(rec.Code, gc.Equals, http.StatusNotFound) c.Assert(rec.Body.String(), jc.JSONEquals, params.Error{ Code: params.ErrNotFound, Message: "not found", }) } else { c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Body.String(), gc.DeepEquals, content) assertCacheControl(c, rec.Header(), true) } } } func charmWithExtraFile(c *gc.C, name, file, content string) *charm.CharmDir { ch := storetesting.Charms.ClonedDir(c.MkDir(), name) err := ioutil.WriteFile(filepath.Join(ch.Path, file), []byte(content), 0666) c.Assert(err, gc.IsNil) return ch } func (s *APISuite) TestServeIcon(c *gc.C) { content := `an icon, really` expected := `an icon, really` wordpress := charmWithExtraFile(c, "wordpress", "icon.svg", content) url := newResolvedURL("cs:~charmers/precise/wordpress-0", -1) err := s.store.AddCharmWithArchive(url, wordpress) c.Assert(err, gc.IsNil) s.setPublic(c, url) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(url.URL.Path() + "/icon.svg"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Body.String(), gc.Equals, expected) c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") assertCacheControl(c, rec.Header(), true) // Test with revision -1 noRevURL := url.URL noRevURL.Revision = -1 rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(noRevURL.Path() + "/icon.svg"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Body.String(), gc.Equals, expected) c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") assertCacheControl(c, rec.Header(), true) // Reload the charm with an icon that already has viewBox. wordpress = storetesting.Charms.ClonedDir(c.MkDir(), "wordpress") err = ioutil.WriteFile(filepath.Join(wordpress.Path, "icon.svg"), []byte(expected), 0666) c.Assert(err, gc.IsNil) url.URL.Revision++ err = s.store.AddCharmWithArchive(url, wordpress) c.Assert(err, gc.IsNil) s.setPublic(c, url) // Check that we still get expected svg. rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(url.URL.Path() + "/icon.svg"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Body.String(), gc.Equals, expected) c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") } func (s *APISuite) TestServeBundleIcon(c *gc.C) { s.addPublicBundleFromRepo(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/something-32", 32), true) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("~charmers/bundle/something-32/icon.svg"), ExpectStatus: http.StatusNotFound, ExpectBody: params.Error{ Code: params.ErrNotFound, Message: "icons not supported for bundles", }, }) } func (s *APISuite) TestServeDefaultIcon(c *gc.C) { wordpress := storetesting.Charms.ClonedDir(c.MkDir(), "wordpress") url := newResolvedURL("cs:~charmers/precise/wordpress-0", 0) s.addPublicCharm(c, wordpress, url) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(url.URL.Path() + "/icon.svg"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Body.String(), gc.Equals, v4.DefaultIcon) c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") assertCacheControl(c, rec.Header(), true) } func (s *APISuite) TestServeDefaultIconForBadXML(c *gc.C) { for i, content := range []string{ "\x89\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44", // Technically this XML is not bad - we just can't parse it because // it's got internally defined character entities. Nonetheless, we treat // it as "bad" for the time being. cloudfoundrySVG, } { wordpress := charmWithExtraFile(c, "wordpress", "icon.svg", content) url := newResolvedURL("cs:~charmers/precise/wordpress-0", -1) url.URL.Revision = i s.addPublicCharm(c, wordpress, url) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(url.URL.Path() + "/icon.svg"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Body.String(), gc.Equals, v4.DefaultIcon) c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") assertCacheControl(c, rec.Header(), true) } } // assertXMLEqual assers that the xml contained in the // two slices is equal, without caring about namespace // declarations or attribute ordering. func assertXMLEqual(c *gc.C, body []byte, expect []byte) { decBody := xml.NewDecoder(bytes.NewReader(body)) decExpect := xml.NewDecoder(bytes.NewReader(expect)) for i := 0; ; i++ { tok0, err0 := decBody.Token() tok1, err1 := decExpect.Token() if err1 != nil { c.Assert(err0, gc.NotNil) c.Assert(err0.Error(), gc.Equals, err1.Error()) break } ok, err := tokenEqual(tok0, tok1) if !ok { c.Logf("got %#v", tok0) c.Logf("want %#v", tok1) c.Fatalf("mismatch at token %d: %v", i, err) } } } func tokenEqual(tok0, tok1 xml.Token) (bool, error) { tok0 = canonicalXMLToken(tok0) tok1 = canonicalXMLToken(tok1) return jc.DeepEqual(tok0, tok1) } func canonicalXMLToken(tok xml.Token) xml.Token { start, ok := tok.(xml.StartElement) if !ok { return tok } // Remove all namespace-defining attributes. j := 0 for _, attr := range start.Attr { if attr.Name.Local == "xmlns" && attr.Name.Space == "" || attr.Name.Space == "xmlns" { continue } start.Attr[j] = attr j++ } start.Attr = start.Attr[0:j] sort.Sort(attrByName(start.Attr)) return start } type attrByName []xml.Attr func (a attrByName) Len() int { return len(a) } func (a attrByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a attrByName) Less(i, j int) bool { if a[i].Name.Space != a[j].Name.Space { return a[i].Name.Space < a[j].Name.Space } return a[i].Name.Local < a[j].Name.Local } // assertXMLContains asserts that the XML in body is well formed, and // contains at least one token that satisfies each of the functions in need. func assertXMLContains(c *gc.C, body []byte, need map[string]func(xml.Token) bool) { dec := xml.NewDecoder(bytes.NewReader(body)) for { tok, err := dec.Token() if err == io.EOF { break } c.Assert(err, gc.IsNil) for what, f := range need { if f(tok) { delete(need, what) } } } c.Assert(need, gc.HasLen, 0, gc.Commentf("body:\n%s", body)) } func isStartElementWithName(name string) func(xml.Token) bool { return func(tok xml.Token) bool { startElem, ok := tok.(xml.StartElement) return ok && startElem.Name.Local == name } } func isStartElementWithAttr(name, attr, val string) func(xml.Token) bool { return func(tok xml.Token) bool { startElem, ok := tok.(xml.StartElement) if !ok { return false } for _, a := range startElem.Attr { if a.Name.Local == attr && a.Value == val { return true } } return false } } const cloudfoundrySVG = ` ]> content omitted ` charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/archive_test.go0000664000175000017500000013605212672604603026557 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v4_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" import ( "archive/zip" "bytes" "crypto/sha256" "encoding/base64" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/http/httptest" "os" "strconv" "strings" "sync" "time" jc "github.com/juju/testing/checkers" "github.com/juju/testing/httptesting" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" charmtesting "gopkg.in/juju/charmrepo.v2-unstable/testing" "gopkg.in/mgo.v2/bson" "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" "gopkg.in/juju/charmstore.v5-unstable/internal/router" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/stats" "gopkg.in/juju/charmstore.v5-unstable/internal/v4" ) type ArchiveSuite struct { commonSuite } var _ = gc.Suite(&ArchiveSuite{}) func (s *ArchiveSuite) SetUpSuite(c *gc.C) { s.enableIdentity = true s.commonSuite.SetUpSuite(c) } func (s *ArchiveSuite) TestGet(c *gc.C) { id := newResolvedURL("cs:~charmers/precise/wordpress-0", -1) ch := storetesting.NewCharm(nil) s.addPublicCharm(c, ch, id) rec := s.assertArchiveDownload( c, "~charmers/precise/wordpress-0", nil, ch.Bytes(), ) c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, "cs:~charmers/precise/wordpress-0") assertCacheControl(c, rec.Header(), true) // Check that the HTTP range logic is plugged in OK. If this // is working, we assume that the whole thing is working OK, // as net/http is well-tested. rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("~charmers/precise/wordpress-0/archive"), Header: http.Header{"Range": {"bytes=10-100"}}, }) c.Assert(rec.Code, gc.Equals, http.StatusPartialContent, gc.Commentf("body: %q", rec.Body.Bytes())) c.Assert(rec.Body.Bytes(), gc.HasLen, 100-10+1) c.Assert(rec.Body.Bytes(), gc.DeepEquals, ch.Bytes()[10:101]) c.Assert(rec.Header().Get(params.ContentHashHeader), gc.Equals, hashOfBytes(ch.Bytes())) c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, "cs:~charmers/precise/wordpress-0") assertCacheControl(c, rec.Header(), true) } func (s *ArchiveSuite) TestGetWithPartialId(c *gc.C) { id := newResolvedURL("cs:~charmers/precise/wordpress-0", -1) ch := storetesting.NewCharm(nil) s.addPublicCharm(c, ch, id) rec := s.assertArchiveDownload( c, "~charmers/wordpress", nil, ch.Bytes(), ) // The complete entity id can be retrieved from the response header. c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, id.URL.String()) } func (s *ArchiveSuite) TestGetPromulgatedWithPartialId(c *gc.C) { id := newResolvedURL("cs:~charmers/utopic/wordpress-42", 42) ch := storetesting.NewCharm(nil) s.addPublicCharm(c, ch, id) rec := s.assertArchiveDownload( c, "wordpress", nil, ch.Bytes(), ) c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, id.PromulgatedURL().String()) } // V4 SPECIFIC func (s *ArchiveSuite) TestGetElidesSeriesFromMultiSeriesCharmMetadata(c *gc.C) { _, ch := s.addPublicCharmFromRepo(c, "multi-series", newResolvedURL("cs:~charmers/multi-series-0", -1)) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("~charmers/multi-series/archive"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) gotCh, err := charm.ReadCharmArchiveBytes(rec.Body.Bytes()) c.Assert(err, gc.IsNil) c.Assert(gotCh.Meta().Series, gc.HasLen, 0) // Check that the metadata is elided from the metadata file when retrieved // directly too. rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("~charmers/multi-series/archive/metadata.yaml"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) gotMeta, err := charm.ReadMeta(bytes.NewReader(rec.Body.Bytes())) c.Assert(err, gc.IsNil) c.Assert(gotMeta.Series, gc.HasLen, 0) chMeta := ch.Meta() chMeta.Series = nil c.Assert(gotMeta, jc.DeepEquals, chMeta) } func (s *ArchiveSuite) TestGetCounters(c *gc.C) { if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } for i, id := range []*router.ResolvedURL{ newResolvedURL("~who/utopic/mysql-42", 42), } { c.Logf("test %d: %s", i, id) ch := storetesting.NewCharm(nil) s.addPublicCharm(c, ch, id) // Download the charm archive using the API, which should increment // the download counts. s.assertArchiveDownload( c, id.URL.Path(), nil, ch.Bytes(), ) // Check that the downloads count for the entity has been updated. key := []string{params.StatsArchiveDownload, "utopic", "mysql", id.URL.User, "42"} stats.CheckCounterSum(c, s.store, key, false, 1) // Check that the promulgated download count for the entity has also been updated key = []string{params.StatsArchiveDownloadPromulgated, "utopic", "mysql", "", "42"} stats.CheckCounterSum(c, s.store, key, false, 1) } } func (s *ArchiveSuite) TestGetCountersDisabled(c *gc.C) { id := newResolvedURL("~charmers/utopic/mysql-42", 42) ch := storetesting.NewCharm(nil) s.addPublicCharm(c, ch, id) // Download the charm archive using the API, passing stats=0. s.assertArchiveDownload( c, "", &httptesting.DoRequestParams{URL: storeURL("~charmers/utopic/mysql-42/archive?stats=0")}, ch.Bytes(), ) // Check that the downloads count for the entity has not been updated. key := []string{params.StatsArchiveDownload, "utopic", "mysql", "", "42"} stats.CheckCounterSum(c, s.store, key, false, 0) } var archivePostErrorsTests = []struct { about string url string noContentLength bool noHash bool entity charmstore.ArchiverTo expectStatus int expectMessage string expectCode params.ErrorCode }{{ about: "revision specified", url: "~charmers/precise/wordpress-23", expectStatus: http.StatusBadRequest, expectMessage: "revision specified, but should not be specified", expectCode: params.ErrBadRequest, }, { about: "no hash given", url: "~charmers/precise/wordpress", noHash: true, expectStatus: http.StatusBadRequest, expectMessage: "hash parameter not specified", expectCode: params.ErrBadRequest, }, { about: "no content length", url: "~charmers/precise/wordpress", noContentLength: true, expectStatus: http.StatusBadRequest, expectMessage: "Content-Length not specified", expectCode: params.ErrBadRequest, }, { about: "invalid channel", url: "~charmers/bad-wolf/trusty/wordpress", expectStatus: http.StatusNotFound, expectMessage: "not found", expectCode: params.ErrNotFound, }, { about: "no series", url: "~charmers/juju-gui", expectStatus: http.StatusForbidden, expectMessage: "series not specified in url or charm metadata", expectCode: params.ErrEntityIdNotAllowed, }, { about: "url series not in metadata", url: "~charmers/precise/juju-gui", entity: storetesting.NewCharm(&charm.Meta{ Series: []string{"trusty"}, }), expectStatus: http.StatusForbidden, expectMessage: `"precise" series not listed in charm metadata`, expectCode: params.ErrEntityIdNotAllowed, }, { about: "bad combination of series", url: "~charmers/juju-gui", entity: storetesting.NewCharm(&charm.Meta{ Series: []string{"precise", "win10"}, }), expectStatus: http.StatusBadRequest, expectMessage: `cannot mix series from ubuntu and windows in single charm`, expectCode: params.ErrInvalidEntity, }, { about: "unknown series", url: "~charmers/juju-gui", entity: storetesting.NewCharm(&charm.Meta{ Series: []string{"precise", "nosuchseries"}, }), expectStatus: http.StatusBadRequest, expectMessage: `unrecognized series "nosuchseries" in metadata`, expectCode: params.ErrInvalidEntity, }} func (s *ArchiveSuite) TestPostErrors(c *gc.C) { type exoticReader struct { io.Reader } for i, test := range archivePostErrorsTests { c.Logf("test %d: %s", i, test.about) if test.entity == nil { test.entity = storetesting.NewCharm(nil) } blob, hashSum := getBlob(test.entity) body := io.Reader(blob) if test.noContentLength { // net/http will automatically add a Content-Length header // if it sees *strings.Reader, but not if it's a type it doesn't // know about. body = exoticReader{body} } path := storeURL(test.url) + "/archive" if !test.noHash { path += "?hash=" + hashSum } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: path, Method: "POST", Header: http.Header{ "Content-Type": {"application/zip"}, }, Body: body, Username: testUsername, Password: testPassword, ExpectStatus: test.expectStatus, ExpectBody: params.Error{ Message: test.expectMessage, Code: test.expectCode, }, }) } } func (s *ArchiveSuite) TestConcurrentUploads(c *gc.C) { wordpress := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") f, err := os.Open(wordpress.Path) c.Assert(err, gc.IsNil) var buf bytes.Buffer _, err = io.Copy(&buf, f) c.Assert(err, gc.IsNil) hash, _ := hashOf(bytes.NewReader(buf.Bytes())) srv := httptest.NewServer(s.srv) defer srv.Close() // Our strategy for testing concurrent uploads is as follows: We // repeat uploading a bunch of simultaneous uploads to the same // charm. Each upload should either succeed, or fail with an // ErrDuplicateUpload error. We make sure that all replies are // like this, and that at least one duplicate upload error is // found, so that we know we've tested that error path. errorBodies := make(chan io.ReadCloser) // upload performs one upload of the testing charm. // It sends the response body on the errorBodies channel when // it finds an error response. upload := func() { c.Logf("uploading") body := bytes.NewReader(buf.Bytes()) url := srv.URL + storeURL("~charmers/precise/wordpress/archive?hash="+hash) req, err := http.NewRequest("POST", url, body) c.Assert(err, gc.IsNil) req.Header.Set("Content-Type", "application/zip") req.SetBasicAuth(testUsername, testPassword) resp, err := http.DefaultClient.Do(req) if !c.Check(err, gc.IsNil) { return } if resp.StatusCode == http.StatusOK { resp.Body.Close() return } errorBodies <- resp.Body } // The try loop continues concurrently uploading // charms until it is told to stop (by closing the try // channel). It then signals that it has terminated // by closing errorBodies. try := make(chan struct{}) go func(try chan struct{}) { for _ = range try { var wg sync.WaitGroup for p := 0; p < 5; p++ { wg.Add(1) go func() { upload() wg.Done() }() } wg.Wait() } close(errorBodies) }(try) // We continue the loop until we have found an // error (or the maximum iteration count has // been exceeded). foundError := false count := 0 loop: for { select { case body, ok := <-errorBodies: if !ok { // The try loop has terminated, // so we need to stop too. break loop } dec := json.NewDecoder(body) var errResp params.Error err := dec.Decode(&errResp) body.Close() c.Assert(err, gc.IsNil) c.Assert(errResp, jc.DeepEquals, params.Error{ Message: "duplicate upload", Code: params.ErrDuplicateUpload, }) // We've found the error we're looking for, // so we signal to the try loop that it can stop. // We will process any outstanding error bodies, // before seeing errorBodies closed and exiting // the loop. foundError = true if try != nil { close(try) try = nil } case try <- struct{}{}: // In cases we've seen, the actual maximum value of // count is 1, but let's allow for serious scheduler vagaries. if count++; count > 200 { c.Fatalf("200 tries with no duplicate error") } } } if !foundError { c.Errorf("no duplicate-upload errors found") } } func (s *ArchiveSuite) TestPostCharm(c *gc.C) { s.discharge = dischargeForUser("charmers") // A charm that did not exist before should get revision 0. s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-0", -1), "wordpress") // Subsequent charm uploads should increment the revision by 1. s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-1", -1), "mysql") // Retrieving the unpublished version returns the latest charm. rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("~charmers/wordpress/archive?channel=unpublished"), Do: bakeryDo(nil), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, "cs:~charmers/precise/wordpress-1") } func (s *ArchiveSuite) TestPostCurrentVersion(c *gc.C) { s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-0", -1), "wordpress") // Subsequent charm uploads should not increment the revision by 1. s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-0", -1), "wordpress") } func (s *ArchiveSuite) TestPostMultiSeriesCharm(c *gc.C) { // A charm that did not exist before should get revision 0. s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/juju-gui-0", -1), "multi-series") } func (s *ArchiveSuite) TestPostMultiSeriesCharmRevisionAfterAllSingleSeriesOnes(c *gc.C) { // Create some single series versions of the charm s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/vivid/juju-gui-1", -1), "mysql") s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/trusty/juju-gui-12", -1), "mysql") s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/precise/juju-gui-44", -1), "mysql") // Check that the new multi-series revision takes the a revision // number larger than the largest of all the single series // revisions. s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/juju-gui-45", -1), "multi-series") } func (s *ArchiveSuite) TestPostMultiSeriesPromulgatedRevisionAfterAllSingleSeriesOnes(c *gc.C) { // Create some single series versions of the charm s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/vivid/juju-gui-1", 0), "mysql") s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/trusty/juju-gui-12", 9), "mysql") s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/precise/juju-gui-44", 33), "mysql") // Check that the new multi-series promulgated revision takes the // a revision number larger than the largest of all the single // series revisions. s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/juju-gui-45", 34), "multi-series") } func (s *ArchiveSuite) TestPostSingleSeriesCharmWhenMultiSeriesVersionExists(c *gc.C) { s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/juju-gui-0", -1), "multi-series") s.assertUploadCharmError( c, "POST", charm.MustParseURL("~charmers/saucy/juju-gui-0"), nil, "wordpress", http.StatusForbidden, params.Error{ Message: "charm name duplicates multi-series charm name cs:~charmers/juju-gui-0", Code: params.ErrEntityIdNotAllowed, }, ) } func (s *ArchiveSuite) TestPutCharm(c *gc.C) { s.assertUploadCharm( c, "PUT", newResolvedURL("~charmers/precise/wordpress-3", 3), "wordpress", ) s.assertUploadCharm( c, "PUT", newResolvedURL("~charmers/precise/wordpress-1", -1), "wordpress", ) // Check that we get a duplicate-upload error if we try to // upload to the same revision again. s.assertUploadCharmError( c, "PUT", charm.MustParseURL("~charmers/precise/wordpress-3"), nil, "mysql", http.StatusInternalServerError, params.Error{ Message: "duplicate upload", Code: params.ErrDuplicateUpload, }, ) // Check we get an error if promulgated url already uploaded. s.assertUploadCharmError( c, "PUT", charm.MustParseURL("~charmers/precise/wordpress-4"), charm.MustParseURL("precise/wordpress-3"), "wordpress", http.StatusInternalServerError, params.Error{ Message: "duplicate upload", Code: params.ErrDuplicateUpload, }, ) // Check we get an error if promulgated url has user. s.assertUploadCharmError( c, "PUT", charm.MustParseURL("~charmers/precise/wordpress-4"), charm.MustParseURL("~charmers/precise/wordpress-4"), "mysql", http.StatusBadRequest, params.Error{ Message: "promulgated URL cannot have a user", Code: params.ErrBadRequest, }, ) // Check we get an error if promulgated url has different name. s.assertUploadCharmError( c, "PUT", charm.MustParseURL("~charmers/precise/wordpress-4"), charm.MustParseURL("precise/mysql-4"), "mysql", http.StatusBadRequest, params.Error{ Message: "promulgated URL has incorrect charm name", Code: params.ErrBadRequest, }, ) } func (s *ArchiveSuite) TestPostBundle(c *gc.C) { // Upload the required charms. for _, rurl := range []*router.ResolvedURL{ newResolvedURL("cs:~charmers/utopic/mysql-42", 42), newResolvedURL("cs:~charmers/utopic/wordpress-47", 47), newResolvedURL("cs:~charmers/utopic/logging-1", 1), } { err := s.store.AddCharmWithArchive(rurl, storetesting.Charms.CharmArchive(c.MkDir(), rurl.URL.Name)) c.Assert(err, gc.IsNil) err = s.store.Publish(rurl, params.StableChannel) c.Assert(err, gc.IsNil) } // A bundle that did not exist before should get revision 0. s.assertUploadBundle(c, "POST", newResolvedURL("~charmers/bundle/wordpress-simple-0", -1), "wordpress-simple") // Subsequent bundle uploads should increment the // revision by 1. s.assertUploadBundle(c, "POST", newResolvedURL("~charmers/bundle/wordpress-simple-1", -1), "wordpress-with-logging") // Uploading the same archive twice should not increment the revision... s.assertUploadBundle(c, "POST", newResolvedURL("~charmers/bundle/wordpress-simple-1", -1), "wordpress-with-logging") // ... but uploading an archive used by a previous revision should. s.assertUploadBundle(c, "POST", newResolvedURL("~charmers/bundle/wordpress-simple-2", -1), "wordpress-simple") } func (s *ArchiveSuite) TestPostHashMismatch(c *gc.C) { content := []byte("some content") hash, _ := hashOf(bytes.NewReader(content)) // Corrupt the content. copy(content, "bogus") path := fmt.Sprintf("~charmers/precise/wordpress/archive?hash=%s", hash) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(path), Method: "POST", Header: http.Header{ "Content-Type": {"application/zip"}, }, Body: bytes.NewReader(content), Username: testUsername, Password: testPassword, ExpectStatus: http.StatusInternalServerError, ExpectBody: params.Error{ Message: "cannot put archive blob: hash mismatch", }, }) } func invalidZip() io.ReadSeeker { return strings.NewReader("invalid zip content") } func (s *ArchiveSuite) TestPostInvalidCharmZip(c *gc.C) { s.assertCannotUpload(c, "~charmers/precise/wordpress", invalidZip(), http.StatusBadRequest, params.ErrInvalidEntity, "cannot read charm archive: zip: not a valid zip file") } func (s *ArchiveSuite) TestPostInvalidBundleZip(c *gc.C) { s.assertCannotUpload(c, "~charmers/bundle/wordpress", invalidZip(), http.StatusBadRequest, params.ErrInvalidEntity, "cannot read bundle archive: zip: not a valid zip file") } var postInvalidCharmMetadataTests = []struct { about string spec charmtesting.CharmSpec expectError string }{{ about: "bad provider relation name", spec: charmtesting.CharmSpec{ Meta: ` name: foo summary: bar description: d provides: relation-name: interface: baz `, }, expectError: "relation relation-name has almost certainly not been changed from the template", }, { about: "bad provider interface name", spec: charmtesting.CharmSpec{ Meta: ` name: foo summary: bar description: d provides: baz: interface: interface-name `, }, expectError: "interface interface-name in relation baz has almost certainly not been changed from the template", }, { about: "bad requirer relation name", spec: charmtesting.CharmSpec{ Meta: ` name: foo summary: bar description: d requires: relation-name: interface: baz `, }, expectError: "relation relation-name has almost certainly not been changed from the template", }, { about: "bad requirer interface name", spec: charmtesting.CharmSpec{ Meta: ` name: foo summary: bar description: d requires: baz: interface: interface-name `, }, expectError: "interface interface-name in relation baz has almost certainly not been changed from the template", }, { about: "bad peer relation name", spec: charmtesting.CharmSpec{ Meta: ` name: foo summary: bar description: d peers: relation-name: interface: baz `, }, expectError: "relation relation-name has almost certainly not been changed from the template", }, { about: "bad peer interface name", spec: charmtesting.CharmSpec{ Meta: ` name: foo summary: bar description: d peers: baz: interface: interface-name `, }, expectError: "interface interface-name in relation baz has almost certainly not been changed from the template", }} func (s *ArchiveSuite) TestPostInvalidCharmMetadata(c *gc.C) { for i, test := range postInvalidCharmMetadataTests { c.Logf("test %d: %s", i, test.about) ch := charmtesting.NewCharm(c, test.spec) r := bytes.NewReader(ch.ArchiveBytes()) s.assertCannotUpload(c, "~charmers/trusty/wordpress", r, http.StatusBadRequest, params.ErrInvalidEntity, test.expectError) } } func (s *ArchiveSuite) TestPostInvalidBundleData(c *gc.C) { path := storetesting.Charms.BundleArchivePath(c.MkDir(), "bad") f, err := os.Open(path) c.Assert(err, gc.IsNil) defer f.Close() // Here we exercise both bundle internal verification (bad relation) and // validation with respect to charms (wordpress and mysql are missing). expectErr := `bundle verification failed: [` + `"relation [\"foo:db\" \"mysql:server\"] refers to service \"foo\" not defined in this bundle",` + `"service \"mysql\" refers to non-existent charm \"mysql\"",` + `"service \"wordpress\" refers to non-existent charm \"wordpress\""]` s.assertCannotUpload(c, "~charmers/bundle/wordpress", f, http.StatusBadRequest, params.ErrInvalidEntity, expectErr) } func (s *ArchiveSuite) TestPostCounters(c *gc.C) { if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-0", -1), "wordpress") // Check that the upload count for the entity has been updated. key := []string{params.StatsArchiveUpload, "precise", "wordpress", "charmers"} stats.CheckCounterSum(c, s.store, key, false, 1) } func (s *ArchiveSuite) TestPostFailureCounters(c *gc.C) { if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } hash, _ := hashOf(invalidZip()) doPost := func(url string, expectCode int) { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(url), Method: "POST", Header: http.Header{ "Content-Type": {"application/zip"}, }, Body: invalidZip(), Username: testUsername, Password: testPassword, }) c.Assert(rec.Code, gc.Equals, expectCode, gc.Commentf("body: %s", rec.Body.Bytes())) } // Send a first invalid request (revision specified). doPost("~charmers/utopic/wordpress-42/archive", http.StatusBadRequest) // Send a second invalid request (no hash). doPost("~charmers/utopic/wordpress/archive", http.StatusBadRequest) // Send a third invalid request (invalid zip). doPost("~charmers/utopic/wordpress/archive?hash="+hash, http.StatusBadRequest) // Check that the failed upload count for the entity has been updated. key := []string{params.StatsArchiveFailedUpload, "utopic", "wordpress", "charmers"} stats.CheckCounterSum(c, s.store, key, false, 3) } func (s *ArchiveSuite) TestUploadOfCurrentCharmReadsFully(c *gc.C) { s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-0", -1), "wordpress") ch := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") f, err := os.Open(ch.Path) c.Assert(err, gc.IsNil) defer f.Close() // Calculate blob hashes. hash := blobstore.NewHash() _, err = io.Copy(hash, f) c.Assert(err, gc.IsNil) hashSum := fmt.Sprintf("%x", hash.Sum(nil)) // Simulate upload of current version h := s.handler(c) defer h.Close() b := bytes.NewBuffer([]byte("test body")) r, err := http.NewRequest("POST", "/~charmers/precise/wordpress/archive?hash="+hashSum, b) c.Assert(err, gc.IsNil) r.Header.Set("Content-Type", "application/zip") r.SetBasicAuth(testUsername, testPassword) rec := httptest.NewRecorder() h.ServeHTTP(rec, r) httptesting.AssertJSONResponse( c, rec, http.StatusOK, params.ArchiveUploadResponse{ Id: charm.MustParseURL("~charmers/precise/wordpress-0"), }, ) c.Assert(b.Len(), gc.Equals, 0) } func (s *ArchiveSuite) assertCannotUpload(c *gc.C, id string, content io.ReadSeeker, httpStatus int, errorCode params.ErrorCode, errorMessage string) { hash, size := hashOf(content) _, err := content.Seek(0, 0) c.Assert(err, gc.IsNil) path := fmt.Sprintf("%s/archive?hash=%s", id, hash) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(path), Method: "POST", ContentLength: size, Header: http.Header{ "Content-Type": {"application/zip"}, }, Body: content, Username: testUsername, Password: testPassword, ExpectStatus: httpStatus, ExpectBody: params.Error{ Message: errorMessage, Code: errorCode, }, }) // TODO(rog) check that the uploaded blob has been deleted, // by checking that no new blobs have been added to the blob store. } // assertUploadCharm uploads the testing charm with the given name // through the API. The URL must hold the expected revision // that the charm will be given when uploaded. func (s *ArchiveSuite) assertUploadCharm(c *gc.C, method string, url *router.ResolvedURL, charmName string) *charm.CharmArchive { ch := storetesting.Charms.CharmArchive(c.MkDir(), charmName) id, size := s.assertUpload(c, method, url, ch.Path) if url.URL.Series == "" { // V4 SPECIFIC: // We're uploading a multi-series charm, but we always // return charm ids with a series. id.Series = ch.Meta().Series[0] } meta := ch.Meta() meta.Series = nil s.assertEntityInfo(c, entityInfo{ Id: id, Meta: entityMetaInfo{ ArchiveSize: ¶ms.ArchiveSizeResponse{Size: size}, CharmMeta: meta, CharmConfig: ch.Config(), CharmActions: ch.Actions(), }, }) return ch } // assertUploadBundle uploads the testing bundle with the given name // through the API. The URL must hold the expected revision // that the bundle will be given when uploaded. func (s *ArchiveSuite) assertUploadBundle(c *gc.C, method string, url *router.ResolvedURL, bundleName string) { path := storetesting.Charms.BundleArchivePath(c.MkDir(), bundleName) b, err := charm.ReadBundleArchive(path) c.Assert(err, gc.IsNil) id, size := s.assertUpload(c, method, url, path) s.assertEntityInfo(c, entityInfo{ Id: id, Meta: entityMetaInfo{ ArchiveSize: ¶ms.ArchiveSizeResponse{Size: size}, BundleMeta: b.Data(), }, }, ) } func (s *ArchiveSuite) assertUpload(c *gc.C, method string, url *router.ResolvedURL, fileName string) (id *charm.URL, size int64) { f, err := os.Open(fileName) c.Assert(err, gc.IsNil) defer f.Close() // Calculate blob hashes. hash := blobstore.NewHash() hash256 := sha256.New() size, err = io.Copy(io.MultiWriter(hash, hash256), f) c.Assert(err, gc.IsNil) hashSum := fmt.Sprintf("%x", hash.Sum(nil)) hash256Sum := fmt.Sprintf("%x", hash256.Sum(nil)) _, err = f.Seek(0, 0) c.Assert(err, gc.IsNil) uploadURL := url.URL if method == "POST" { uploadURL.Revision = -1 } path := fmt.Sprintf("%s/archive?hash=%s", uploadURL.Path(), hashSum) expectId := uploadURL.WithRevision(url.URL.Revision) expectedPromulgatedId := url.PromulgatedURL() if expectedPromulgatedId != nil { path += fmt.Sprintf("&promulgated=%s", expectedPromulgatedId.String()) } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(path), Method: method, ContentLength: size, Header: http.Header{ "Content-Type": {"application/zip"}, }, Body: f, Username: testUsername, Password: testPassword, ExpectBody: params.ArchiveUploadResponse{ Id: expectId, PromulgatedId: expectedPromulgatedId, }, }) entity, err := s.store.FindEntity(url, nil) c.Assert(err, gc.IsNil) c.Assert(entity.BlobHash, gc.Equals, hashSum) c.Assert(entity.BlobHash256, gc.Equals, hash256Sum) c.Assert(entity.PreV5BlobHash256, gc.Not(gc.Equals), "") c.Assert(entity.PreV5BlobHash, gc.Not(gc.Equals), "") c.Assert(entity.PreV5BlobSize, gc.Not(gc.Equals), int64(0)) c.Assert(entity.PromulgatedURL, gc.DeepEquals, url.PromulgatedURL()) c.Assert(entity.Development, gc.Equals, false) return expectId, entity.PreV5BlobSize } // assertUploadCharmError attempts to upload the testing charm with the // given name through the API, checking that the attempt fails with the // specified error. The URL must hold the expected revision that the // charm will be given when uploaded. func (s *ArchiveSuite) assertUploadCharmError(c *gc.C, method string, url, purl *charm.URL, charmName string, expectStatus int, expectBody interface{}) { ch := storetesting.Charms.CharmDir(charmName) s.assertUploadError(c, method, url, purl, ch, expectStatus, expectBody) } // assertUploadError asserts that we get an error when uploading // the contents of the given file to the given url and promulgated URL. // The reason this method does not take a *router.ResolvedURL // is so that we can test what happens when an inconsistent promulgated URL // is passed in. func (s *ArchiveSuite) assertUploadError(c *gc.C, method string, url, purl *charm.URL, entity charmstore.ArchiverTo, expectStatus int, expectBody interface{}) { blob, hashSum := getBlob(entity) uploadURL := *url if method == "POST" { uploadURL.Revision = -1 } path := fmt.Sprintf("%s/archive?hash=%s", uploadURL.Path(), hashSum) if purl != nil { path += fmt.Sprintf("&promulgated=%s", purl.String()) } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(path), Method: method, ContentLength: int64(blob.Len()), Header: http.Header{ "Content-Type": {"application/zip"}, }, Body: blob, Username: testUsername, Password: testPassword, ExpectStatus: expectStatus, ExpectBody: expectBody, }) } // getBlob returns the contents and blob checksum of the given entity. func getBlob(entity charmstore.ArchiverTo) (blob *bytes.Buffer, hash string) { blob = new(bytes.Buffer) err := entity.ArchiveTo(blob) if err != nil { panic(err) } h := blobstore.NewHash() h.Write(blob.Bytes()) hash = fmt.Sprintf("%x", h.Sum(nil)) return blob, hash } var archiveFileErrorsTests = []struct { about string path string expectStatus int expectMessage string expectCode params.ErrorCode }{{ about: "entity not found", path: "~charmers/trusty/no-such-42/archive/icon.svg", expectStatus: http.StatusNotFound, expectMessage: `no matching charm or bundle for cs:~charmers/trusty/no-such-42`, expectCode: params.ErrNotFound, }, { about: "directory listing", path: "~charmers/utopic/wordpress-0/archive/hooks", expectStatus: http.StatusForbidden, expectMessage: "directory listing not allowed", expectCode: params.ErrForbidden, }, { about: "file not found", path: "~charmers/utopic/wordpress-0/archive/no-such", expectStatus: http.StatusNotFound, expectMessage: `file "no-such" not found in the archive`, expectCode: params.ErrNotFound, }, { about: "no permissions", path: "~charmers/utopic/mysql-0/archive/metadata.yaml", expectStatus: http.StatusUnauthorized, expectMessage: `unauthorized: access denied for user "bob"`, expectCode: params.ErrUnauthorized, }} func (s *ArchiveSuite) TestArchiveFileErrors(c *gc.C) { s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/utopic/wordpress-0", 0)) id, _ := s.addPublicCharmFromRepo(c, "mysql", newResolvedURL("cs:~charmers/utopic/mysql-0", 0)) err := s.store.SetPerms(&id.URL, "stable.read", "no-one") c.Assert(err, gc.IsNil) s.discharge = dischargeForUser("bob") for i, test := range archiveFileErrorsTests { c.Logf("test %d: %s", i, test.about) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(test.path), Do: bakeryDo(nil), Method: "GET", ExpectStatus: test.expectStatus, ExpectBody: params.Error{ Message: test.expectMessage, Code: test.expectCode, }, }) } } func (s *ArchiveSuite) TestArchiveFileGet(c *gc.C) { ch := storetesting.Charms.CharmArchive(c.MkDir(), "all-hooks") id := newResolvedURL("cs:~charmers/utopic/all-hooks-0", 0) s.addPublicCharm(c, ch, id) zipFile, err := zip.OpenReader(ch.Path) c.Assert(err, gc.IsNil) defer zipFile.Close() // Check a file in the root directory. s.assertArchiveFileContents(c, zipFile, "~charmers/utopic/all-hooks-0/archive/metadata.yaml") // Check a file in a subdirectory. s.assertArchiveFileContents(c, zipFile, "~charmers/utopic/all-hooks-0/archive/hooks/install") } func (s *ArchiveSuite) TestArchiveFileGetMultiSeries(c *gc.C) { // V4 SPECIFIC: // Check that the series field of a multi-series charm is omitted. url := charm.MustParseURL("~charmers/juju-gui-0") s.assertUploadCharm(c, "POST", newResolvedURL(url.String(), -1), "multi-series") err := s.store.SetPerms(url, "unpublished.read", params.Everyone) c.Assert(err, gc.IsNil) c.Logf("dorequest %v", storeURL(url.String()+"/archive")) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(url.Path() + "/archive"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %q", rec.Body.Bytes())) ch, err := charm.ReadCharmArchiveBytes(rec.Body.Bytes()) c.Assert(err, gc.IsNil) c.Assert(ch.Meta().Series, gc.HasLen, 0) } // assertArchiveFileContents checks that the response returned by the // serveArchiveFile endpoint is correct for the given archive and URL path. func (s *ArchiveSuite) assertArchiveFileContents(c *gc.C, zipFile *zip.ReadCloser, path string) { // For example: trusty/django/archive/hooks/install -> hooks/install. filePath := strings.SplitN(path, "/archive/", 2)[1] // Retrieve the expected bytes. var expectBytes []byte for _, file := range zipFile.File { if file.Name == filePath { r, err := file.Open() c.Assert(err, gc.IsNil) defer r.Close() expectBytes, err = ioutil.ReadAll(r) c.Assert(err, gc.IsNil) break } } c.Assert(expectBytes, gc.Not(gc.HasLen), 0) // Make the request. url := storeURL(path) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: url, }) // Ensure the response is what we expect. c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Body.Bytes(), gc.DeepEquals, expectBytes) headers := rec.Header() c.Assert(headers.Get("Content-Length"), gc.Equals, strconv.Itoa(len(expectBytes))) // We only have text files in the charm repository used for tests. c.Assert(headers.Get("Content-Type"), gc.Equals, "text/plain; charset=utf-8") assertCacheControl(c, rec.Header(), true) } func (s *ArchiveSuite) TestDelete(c *gc.C) { // Add a charm to the database (including the archive). id := "~charmers/utopic/mysql-42" url := newResolvedURL(id, -1) err := s.store.AddCharmWithArchive(url, storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) c.Assert(err, gc.IsNil) // Retrieve the corresponding entity. var entity mongodoc.Entity err = s.store.DB.Entities().FindId(&url.URL).Select(bson.D{{"blobname", 1}}).One(&entity) c.Assert(err, gc.IsNil) // Delete the charm using the API. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(id + "/archive"), Method: "DELETE", Username: testUsername, Password: testPassword, ExpectStatus: http.StatusMethodNotAllowed, ExpectBody: params.Error{ Message: `DELETE not allowed`, Code: params.ErrMethodNotAllowed, }, }) // TODO(mhilton) reinstate this check when DELETE is re-enabled. // // The entity has been deleted. // count, err := s.store.DB.Entities().FindId(url).Count() // c.Assert(err, gc.IsNil) // c.Assert(count, gc.Equals, 0) // // // The blob has been deleted. // _, _, err = s.store.BlobStore.Open(entity.BlobName) // c.Assert(err, gc.ErrorMatches, "resource.*not found") } func (s *ArchiveSuite) TestDeleteSpecificCharm(c *gc.C) { // Add a couple of charms to the database. for _, id := range []string{"~charmers/trusty/mysql-42", "~charmers/utopic/mysql-42", "~charmers/utopic/mysql-47"} { err := s.store.AddCharmWithArchive( newResolvedURL(id, -1), storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) c.Assert(err, gc.IsNil) } // Delete the second charm using the API. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("~charmers/utopic/mysql-42/archive"), Method: "DELETE", Username: testUsername, Password: testPassword, ExpectStatus: http.StatusMethodNotAllowed, ExpectBody: params.Error{ Message: `DELETE not allowed`, Code: params.ErrMethodNotAllowed, }, }) // The other two charms are still present in the database. urls := []*charm.URL{ charm.MustParseURL("~charmers/trusty/mysql-42"), charm.MustParseURL("~charmers/utopic/mysql-47"), } count, err := s.store.DB.Entities().Find(bson.D{{ "_id", bson.D{{"$in", urls}}, }}).Count() c.Assert(err, gc.IsNil) c.Assert(count, gc.Equals, 2) } func (s *ArchiveSuite) TestDeleteNotFound(c *gc.C) { // Try to delete a non existing charm using the API. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("~charmers/utopic/no-such-0/archive"), Method: "DELETE", Username: testUsername, Password: testPassword, ExpectStatus: http.StatusMethodNotAllowed, ExpectBody: params.Error{ Message: `DELETE not allowed`, Code: params.ErrMethodNotAllowed, }, }) } // TODO(mhilton) reinstate this test when DELETE is re-enabled. //func (s *ArchiveSuite) TestDeleteError(c *gc.C) { // // Add a charm to the database (not including the archive). // id := "~charmers/utopic/mysql-42" // url := newResolvedURL(id, -1) // err := s.store.AddCharmWithArchive(url, storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) // c.Assert(err, gc.IsNil) // // err = s.store.DB.Entities().UpdateId(&url.URL, bson.M{ // "$set": bson.M{ // "blobname": "no-such-name", // }, // }) // c.Assert(err, gc.IsNil) // // TODO update entity to change BlobName to "no-such-name" // // // Try to delete the charm using the API. // httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ // Handler: s.srv, // URL: storeURL(id + "/archive"), // Method: "DELETE", // Username: testUsername, // Password: testPassword, // ExpectStatus: http.StatusInternalServerError, // ExpectBody: params.Error{ // Message: `cannot delete "cs:~charmers/utopic/mysql-42": cannot remove blob no-such-name: resource at path "global/no-such-name" not found`, // }, // }) //} // TODO(mhilton) reinstate this test when DELETE is re-enabled //.func (s *ArchiveSuite) TestDeleteCounters(c *gc.C) { // if !storetesting.MongoJSEnabled() { // c.Skip("MongoDB JavaScript not available") // } // // // Add a charm to the database (including the archive). // id := "~charmers/utopic/mysql-42" // err := s.store.AddCharmWithArchive( // newResolvedURL(id, -1), // storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) // c.Assert(err, gc.IsNil) // // // Delete the charm using the API. // rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ // Handler: s.srv, // Method: "DELETE", // URL: storeURL(id + "/archive"), // Username: testUsername, // Password: testPassword, // }) // c.Assert(rec.Code, gc.Equals, http.StatusOK) // // // Check that the delete count for the entity has been updated. // key := []string{params.StatsArchiveDelete, "utopic", "mysql", "charmers", "42"} // stats.CheckCounterSum(c, s.store, key, false, 1) //} type basicAuthArchiveSuite struct { commonSuite } var _ = gc.Suite(&basicAuthArchiveSuite{}) func (s *basicAuthArchiveSuite) TestPostAuthErrors(c *gc.C) { s.checkAuthErrors(c, "POST", "~charmers/utopic/django/archive") } // TODO(mhilton) reinstate this test when DELETE is re-enabled. //func (s *basicAuthArchiveSuite) TestDeleteAuthErrors(c *gc.C) { // err := s.store.AddCharmWithArchive( // newResolvedURL("~charmers/utopic/django-42", 42), // storetesting.Charms.CharmArchive(c.MkDir(), "wordpress"), // ) // c.Assert(err, gc.IsNil) // s.checkAuthErrors(c, "DELETE", "utopic/django-42/archive") //} func (s *basicAuthArchiveSuite) TestPostErrorReadsFully(c *gc.C) { h := s.handler(c) defer h.Close() b := strings.NewReader("test body") r, err := http.NewRequest("POST", "/~charmers/trusty/wordpress/archive", b) c.Assert(err, gc.IsNil) r.Header.Set("Content-Type", "application/zip") r.SetBasicAuth(testUsername, testPassword) rec := httptest.NewRecorder() h.ServeHTTP(rec, r) c.Assert(rec.Code, gc.Equals, http.StatusBadRequest) c.Assert(b.Len(), gc.Equals, 0) } func (s *basicAuthArchiveSuite) TestPostAuthErrorReadsFully(c *gc.C) { h := s.handler(c) defer h.Close() b := strings.NewReader("test body") r, err := http.NewRequest("POST", "/~charmers/trusty/wordpress/archive", b) c.Assert(err, gc.IsNil) r.Header.Set("Content-Type", "application/zip") rec := httptest.NewRecorder() h.ServeHTTP(rec, r) c.Assert(rec.Code, gc.Equals, http.StatusUnauthorized) c.Assert(b.Len(), gc.Equals, 0) } var archiveAuthErrorsTests = []struct { about string header http.Header username string password string expectMessage string }{{ about: "no credentials", expectMessage: "authentication failed: missing HTTP auth header", }, { about: "invalid encoding", header: http.Header{ "Authorization": {"Basic not-a-valid-base64"}, }, expectMessage: "authentication failed: invalid HTTP auth encoding", }, { about: "invalid header", header: http.Header{ "Authorization": {"Basic " + base64.StdEncoding.EncodeToString([]byte("invalid"))}, }, expectMessage: "authentication failed: invalid HTTP auth contents", }, { about: "invalid credentials", username: "no-such", password: "exterminate!", expectMessage: "invalid user name or password", }} func (s *basicAuthArchiveSuite) checkAuthErrors(c *gc.C, method, url string) { for i, test := range archiveAuthErrorsTests { c.Logf("test %d: %s", i, test.about) if test.header == nil { test.header = http.Header{} } if method == "POST" { test.header.Add("Content-Type", "application/zip") } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(url), Method: method, Header: test.header, Username: test.username, Password: test.password, ExpectStatus: http.StatusUnauthorized, ExpectBody: params.Error{ Message: test.expectMessage, Code: params.ErrUnauthorized, }, }) } } // entityInfo holds all the information we want to find // out about a charm or bundle uploaded to the store. type entityInfo struct { Id *charm.URL Meta entityMetaInfo } type entityMetaInfo struct { ArchiveSize *params.ArchiveSizeResponse `json:"archive-size,omitempty"` CharmMeta *charm.Meta `json:"charm-metadata,omitempty"` CharmConfig *charm.Config `json:"charm-config,omitempty"` CharmActions *charm.Actions `json:"charm-actions,omitempty"` BundleMeta *charm.BundleData `json:"bundle-metadata,omitempty"` } func (s *ArchiveSuite) assertEntityInfo(c *gc.C, expect entityInfo) { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL( expect.Id.Path() + "/meta/any" + "?include=archive-size" + "&include=charm-metadata" + "&include=charm-config" + "&include=charm-actions" + "&include=bundle-metadata", ), Username: testUsername, Password: testPassword, ExpectBody: expect, }) } func (s *ArchiveSuite) TestArchiveFileGetHasCORSHeaders(c *gc.C) { id := "~charmers/precise/wordpress-0" s.assertUploadCharm(c, "POST", newResolvedURL(id, -1), "wordpress") rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(fmt.Sprintf("%s/archive/metadata.yaml", id)), }) headers := rec.Header() c.Assert(len(headers["Access-Control-Allow-Origin"]), gc.Equals, 1) c.Assert(len(headers["Access-Control-Allow-Headers"]), gc.Equals, 1) c.Assert(headers["Access-Control-Allow-Origin"][0], gc.Equals, "*") c.Assert(headers["Access-Control-Cache-Max-Age"][0], gc.Equals, "600") c.Assert(headers["Access-Control-Allow-Headers"][0], gc.Equals, "Bakery-Protocol-Version, Macaroons, X-Requested-With") } func hashOfBytes(data []byte) string { hash := blobstore.NewHash() hash.Write(data) return fmt.Sprintf("%x", hash.Sum(nil)) } func hashOf(r io.Reader) (hashSum string, size int64) { hash := blobstore.NewHash() n, err := io.Copy(hash, r) if err != nil { panic(err) } return fmt.Sprintf("%x", hash.Sum(nil)), n } // assertCacheControl asserts that the cache control headers are // appropriately set. The isPublic parameter specifies // whether the id in the request represents a public charm or bundle. func assertCacheControl(c *gc.C, h http.Header, isPublic bool) { if isPublic { seconds := v4.ArchiveCachePublicMaxAge / time.Second c.Assert(h.Get("Cache-Control"), gc.Equals, fmt.Sprintf("public, max-age=%d", seconds)) } else { c.Assert(h.Get("Cache-Control"), gc.Equals, "no-cache, must-revalidate") } } type ArchiveSearchSuite struct { commonSuite } var _ = gc.Suite(&ArchiveSearchSuite{}) func (s *ArchiveSearchSuite) SetUpSuite(c *gc.C) { s.enableES = true s.commonSuite.SetUpSuite(c) } func (s *ArchiveSearchSuite) SetUpTest(c *gc.C) { s.commonSuite.SetUpTest(c) // TODO (frankban): remove this call when removing the legacy counts logic. patchLegacyDownloadCountsEnabled(s.AddCleanup, false) } func (s *ArchiveSearchSuite) TestGetSearchUpdate(c *gc.C) { if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } for i, id := range []string{"~charmers/wily/mysql-42", "~who/wily/mysql-42"} { c.Logf("test %d: %s", i, id) url := newResolvedURL(id, -1) // Add a charm to the database. s.addPublicCharm(c, storetesting.NewCharm(nil), url) // Download the charm archive using the API. rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(id + "/archive"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) // Check that the search record for the entity has been updated. stats.CheckSearchTotalDownloads(c, s.store, &url.URL, 1) } } func (s *commonSuite) assertArchiveDownload(c *gc.C, id string, extraParams *httptesting.DoRequestParams, archiveBytes []byte) *httptest.ResponseRecorder { doParams := httptesting.DoRequestParams{} if extraParams != nil { doParams = *extraParams } doParams.Handler = s.srv if doParams.URL == "" { doParams.URL = storeURL(id + "/archive") } rec := httptesting.DoRequest(c, doParams) c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Body.Bytes(), gc.DeepEquals, archiveBytes) c.Assert(rec.Header().Get(params.ContentHashHeader), gc.Equals, hashOfBytes(archiveBytes)) return rec } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/export_test.go0000664000175000017500000000030312672604603026444 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v4 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" var ResolveURL = resolveURL charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/status_test.go0000664000175000017500000002042312672604603026453 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v4_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" import ( "encoding/json" "net/http" "time" jc "github.com/juju/testing/checkers" "github.com/juju/testing/httptesting" "github.com/juju/utils/debugstatus" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" "gopkg.in/juju/charmstore.v5-unstable/internal/router" ) var zeroTimeStr = time.Time{}.Format(time.RFC3339) func (s *APISuite) TestStatus(c *gc.C) { for _, id := range []*router.ResolvedURL{ newResolvedURL("cs:~charmers/precise/wordpress-2", 2), newResolvedURL("cs:~charmers/precise/wordpress-3", 3), newResolvedURL("cs:~foo/precise/mysql-9", 1), newResolvedURL("cs:~bar/utopic/mysql-10", -1), newResolvedURL("cs:~charmers/bundle/wordpress-simple-3", 3), newResolvedURL("cs:~bar/bundle/wordpress-simple-4", -1), } { if id.URL.Series == "bundle" { s.addPublicBundleFromRepo(c, id.URL.Name, id, false) } else { s.addPublicCharmFromRepo(c, id.URL.Name, id) } } now := time.Now() s.PatchValue(&debugstatus.StartTime, now) start := now.Add(-2 * time.Hour) s.addLog(c, &mongodoc.Log{ Data: []byte(`"ingestion started"`), Level: mongodoc.InfoLevel, Type: mongodoc.IngestionType, Time: start, }) end := now.Add(-1 * time.Hour) s.addLog(c, &mongodoc.Log{ Data: []byte(`"ingestion completed"`), Level: mongodoc.InfoLevel, Type: mongodoc.IngestionType, Time: end, }) statisticsStart := now.Add(-1*time.Hour - 30*time.Minute) s.addLog(c, &mongodoc.Log{ Data: []byte(`"legacy statistics import started"`), Level: mongodoc.InfoLevel, Type: mongodoc.LegacyStatisticsType, Time: statisticsStart, }) statisticsEnd := now.Add(-30 * time.Minute) s.addLog(c, &mongodoc.Log{ Data: []byte(`"legacy statistics import completed"`), Level: mongodoc.InfoLevel, Type: mongodoc.LegacyStatisticsType, Time: statisticsEnd, }) s.AssertDebugStatus(c, true, map[string]params.DebugStatus{ "mongo_connected": { Name: "MongoDB is connected", Value: "Connected", Passed: true, }, "mongo_collections": { Name: "MongoDB collections", Value: "All required collections exist", Passed: true, }, "elasticsearch": { Name: "Elastic search is running", Value: "Elastic search is not configured", Passed: true, }, "entities": { Name: "Entities in charm store", Value: "4 charms; 2 bundles; 4 promulgated", Passed: true, }, "base_entities": { Name: "Base entities in charm store", Value: "count: 5", Passed: true, }, "server_started": { Name: "Server started", Value: now.String(), Passed: true, }, "ingestion": { Name: "Ingestion", Value: "started: " + start.Format(time.RFC3339) + ", completed: " + end.Format(time.RFC3339), Passed: true, }, "legacy_statistics": { Name: "Legacy Statistics Load", Value: "started: " + statisticsStart.Format(time.RFC3339) + ", completed: " + statisticsEnd.Format(time.RFC3339), Passed: true, }, }) } func (s *APISuite) TestStatusWithoutCorrectCollections(c *gc.C) { s.store.DB.Entities().DropCollection() s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ "mongo_collections": { Name: "MongoDB collections", Value: "Missing collections: [" + s.store.DB.Entities().Name + "]", Passed: false, }, }) } func (s *APISuite) TestStatusWithoutIngestion(c *gc.C) { s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ "ingestion": { Name: "Ingestion", Value: "started: " + zeroTimeStr + ", completed: " + zeroTimeStr, Passed: false, }, }) } func (s *APISuite) TestStatusIngestionStarted(c *gc.C) { now := time.Now() start := now.Add(-1 * time.Hour) s.addLog(c, &mongodoc.Log{ Data: []byte(`"ingestion started"`), Level: mongodoc.InfoLevel, Type: mongodoc.IngestionType, Time: start, }) s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ "ingestion": { Name: "Ingestion", Value: "started: " + start.Format(time.RFC3339) + ", completed: " + zeroTimeStr, Passed: false, }, }) } func (s *APISuite) TestStatusWithoutLegacyStatistics(c *gc.C) { s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ "legacy_statistics": { Name: "Legacy Statistics Load", Value: "started: " + zeroTimeStr + ", completed: " + zeroTimeStr, Passed: false, }, }) } func (s *APISuite) TestStatusLegacyStatisticsStarted(c *gc.C) { now := time.Now() statisticsStart := now.Add(-1*time.Hour - 30*time.Minute) s.addLog(c, &mongodoc.Log{ Data: []byte(`"legacy statistics import started"`), Level: mongodoc.InfoLevel, Type: mongodoc.LegacyStatisticsType, Time: statisticsStart, }) s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ "legacy_statistics": { Name: "Legacy Statistics Load", Value: "started: " + statisticsStart.Format(time.RFC3339) + ", completed: " + zeroTimeStr, Passed: false, }, }) } func (s *APISuite) TestStatusLegacyStatisticsMultipleLogs(c *gc.C) { now := time.Now() statisticsStart := now.Add(-1*time.Hour - 30*time.Minute) s.addLog(c, &mongodoc.Log{ Data: []byte(`"legacy statistics import started"`), Level: mongodoc.InfoLevel, Type: mongodoc.LegacyStatisticsType, Time: statisticsStart.Add(-1 * time.Hour), }) s.addLog(c, &mongodoc.Log{ Data: []byte(`"legacy statistics import started"`), Level: mongodoc.InfoLevel, Type: mongodoc.LegacyStatisticsType, Time: statisticsStart, }) statisticsEnd := now.Add(-30 * time.Minute) s.addLog(c, &mongodoc.Log{ Data: []byte(`"legacy statistics import completed"`), Level: mongodoc.InfoLevel, Type: mongodoc.LegacyStatisticsType, Time: statisticsEnd.Add(-1 * time.Hour), }) s.addLog(c, &mongodoc.Log{ Data: []byte(`"legacy statistics import completed"`), Level: mongodoc.InfoLevel, Type: mongodoc.LegacyStatisticsType, Time: statisticsEnd, }) s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ "legacy_statistics": { Name: "Legacy Statistics Load", Value: "started: " + statisticsStart.Format(time.RFC3339) + ", completed: " + statisticsEnd.Format(time.RFC3339), Passed: true, }, }) } func (s *APISuite) TestStatusBaseEntitiesError(c *gc.C) { // Add a base entity without any corresponding entities. entity := &mongodoc.BaseEntity{ URL: charm.MustParseURL("django"), Name: "django", } err := s.store.DB.BaseEntities().Insert(entity) c.Assert(err, gc.IsNil) s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ "base_entities": { Name: "Base entities in charm store", Value: "count: 1", Passed: false, }, }) } // AssertDebugStatus asserts that the current /debug/status endpoint // matches the given status, ignoring status duration. // If complete is true, it fails if the results contain // keys not mentioned in status. func (s *APISuite) AssertDebugStatus(c *gc.C, complete bool, status map[string]params.DebugStatus) { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("debug/status"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.Bytes())) c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "application/json") var gotStatus map[string]params.DebugStatus err := json.Unmarshal(rec.Body.Bytes(), &gotStatus) c.Assert(err, gc.IsNil) for key, r := range gotStatus { if _, found := status[key]; !complete && !found { delete(gotStatus, key) continue } r.Duration = 0 gotStatus[key] = r } c.Assert(gotStatus, jc.DeepEquals, status) } type statusWithElasticSearchSuite struct { commonSuite } var _ = gc.Suite(&statusWithElasticSearchSuite{}) func (s *statusWithElasticSearchSuite) SetUpSuite(c *gc.C) { s.enableES = true s.commonSuite.SetUpSuite(c) } func (s *statusWithElasticSearchSuite) TestStatusWithElasticSearch(c *gc.C) { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("debug/status"), }) var results map[string]params.DebugStatus err := json.Unmarshal(rec.Body.Bytes(), &results) c.Assert(err, gc.IsNil) c.Assert(results["elasticsearch"].Name, gc.Equals, "Elastic search is running") c.Assert(results["elasticsearch"].Value, jc.Contains, "cluster_name:") } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/search.go0000664000175000017500000000217712672604603025344 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v4 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" import ( "net/http" "gopkg.in/juju/charmstore.v5-unstable/internal/v5" ) const maxConcurrency = 20 // GET search[?text=text][&autocomplete=1][&filter=value…][&limit=limit][&include=meta][&skip=count][&sort=field[+dir]] // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-search func (h ReqHandler) serveSearch(_ http.Header, req *http.Request) (interface{}, error) { sp, err := v5.ParseSearchParams(req) if err != nil { return "", err } sp.ExpandedMultiSeries = true auth, err := h.CheckRequest(req, nil, v5.OpOther) if err != nil { logger.Infof("authorization failed on search request, granting no privileges: %v", err) } sp.Admin = auth.Admin if auth.Username != "" { sp.Groups = append(sp.Groups, auth.Username) groups, err := h.GroupsForUser(auth.Username) if err != nil { logger.Infof("cannot get groups for user %q, assuming no groups: %v", auth.Username, err) } sp.Groups = append(sp.Groups, groups...) } return h.Search(sp, req) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/search_test.go0000664000175000017500000006375412672604603026413 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v4_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" import ( "encoding/json" "net/http" "sort" "strings" "github.com/juju/loggo" jc "github.com/juju/testing/checkers" "github.com/juju/testing/httptesting" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/macaroon.v1" "gopkg.in/juju/charmstore.v5-unstable/internal/router" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" "gopkg.in/juju/charmstore.v5-unstable/internal/v4" ) type SearchSuite struct { commonSuite } var _ = gc.Suite(&SearchSuite{}) var exportTestCharms = map[string]*router.ResolvedURL{ "multi-series": newResolvedURL("cs:~charmers/multi-series-0", 0), "wordpress": newResolvedURL("cs:~charmers/precise/wordpress-23", 23), "mysql": newResolvedURL("cs:~openstack-charmers/trusty/mysql-7", 7), "varnish": newResolvedURL("cs:~foo/trusty/varnish-1", -1), "riak": newResolvedURL("cs:~charmers/trusty/riak-67", 67), } var exportTestBundles = map[string]*router.ResolvedURL{ "wordpress-simple": newResolvedURL("cs:~charmers/bundle/wordpress-simple-4", 4), } func (s *SearchSuite) SetUpSuite(c *gc.C) { s.enableES = true s.enableIdentity = true s.commonSuite.SetUpSuite(c) } func (s *SearchSuite) SetUpTest(c *gc.C) { s.commonSuite.SetUpTest(c) s.addCharmsToStore(c) err := s.store.SetPerms(charm.MustParseURL("cs:~charmers/riak"), "stable.read", "charmers", "test-user") c.Assert(err, gc.IsNil) err = s.store.UpdateSearch(newResolvedURL("~charmers/trusty/riak-0", 0)) c.Assert(err, gc.IsNil) err = s.esSuite.ES.RefreshIndex(s.esSuite.TestIndex) c.Assert(err, gc.IsNil) } func (s *SearchSuite) addCharmsToStore(c *gc.C) { for name, id := range exportTestCharms { s.addPublicCharm(c, getSearchCharm(name), id) } for name, id := range exportTestBundles { s.addPublicBundle(c, getSearchBundle(name), id, false) } } func getSearchCharm(name string) *storetesting.Charm { ca := storetesting.Charms.CharmDir(name) meta := ca.Meta() meta.Categories = append(strings.Split(name, "-"), "bar") return storetesting.NewCharm(meta) } func getSearchBundle(name string) *storetesting.Bundle { ba := storetesting.Charms.BundleDir(name) data := ba.Data() data.Tags = append(strings.Split(name, "-"), "baz") return storetesting.NewBundle(data) } func (s *SearchSuite) TestSuccessfulSearches(c *gc.C) { tests := []struct { about string query string results []*router.ResolvedURL }{{ about: "bare search", query: "", results: []*router.ResolvedURL{ // V4 SPECIFIC router.MustNewResolvedURL("cs:~charmers/trusty/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/utopic/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/vivid/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/wily/multi-series-0", 0), exportTestCharms["wordpress"], exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], }, }, { about: "text search", query: "text=wordpress", results: []*router.ResolvedURL{ exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }, { about: "autocomplete search", query: "text=word&autocomplete=1", results: []*router.ResolvedURL{ exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }, { about: "blank text search", query: "text=", results: []*router.ResolvedURL{ // V4 SPECIFIC router.MustNewResolvedURL("cs:~charmers/trusty/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/utopic/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/vivid/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/wily/multi-series-0", 0), exportTestCharms["wordpress"], exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], }, }, { about: "description filter search", query: "description=database", results: []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["varnish"], }, }, { about: "name filter search", query: "name=mysql", results: []*router.ResolvedURL{ exportTestCharms["mysql"], }, }, { about: "owner filter search", query: "owner=foo", results: []*router.ResolvedURL{ exportTestCharms["varnish"], }, }, { about: "provides filter search", query: "provides=mysql", results: []*router.ResolvedURL{ exportTestCharms["mysql"], }, }, { about: "requires filter search", query: "requires=mysql", results: []*router.ResolvedURL{ // V4 SPECIFIC router.MustNewResolvedURL("cs:~charmers/trusty/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/utopic/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/vivid/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/wily/multi-series-0", 0), exportTestCharms["wordpress"], }, }, { about: "series filter search", query: "series=trusty", results: []*router.ResolvedURL{ // V4 SPECIFIC router.MustNewResolvedURL("cs:~charmers/trusty/multi-series-0", 0), exportTestCharms["mysql"], exportTestCharms["varnish"], }, }, { about: "summary filter search", query: "summary=database", results: []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["varnish"], }, }, { about: "tags filter search", query: "tags=wordpress", results: []*router.ResolvedURL{ exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }, { about: "type filter search", query: "type=bundle", results: []*router.ResolvedURL{ exportTestBundles["wordpress-simple"], }, }, { about: "multiple type filter search", query: "type=bundle&type=charm", results: []*router.ResolvedURL{ // V4 SPECIFIC router.MustNewResolvedURL("cs:~charmers/trusty/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/utopic/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/vivid/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/wily/multi-series-0", 0), exportTestCharms["wordpress"], exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], }, }, { about: "provides multiple interfaces filter search", query: "provides=monitoring+http", results: []*router.ResolvedURL{ // V4 SPECIFIC router.MustNewResolvedURL("cs:~charmers/trusty/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/utopic/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/vivid/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/wily/multi-series-0", 0), exportTestCharms["wordpress"], }, }, { about: "requires multiple interfaces filter search", query: "requires=mysql+varnish", results: []*router.ResolvedURL{ // V4 SPECIFIC router.MustNewResolvedURL("cs:~charmers/trusty/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/utopic/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/vivid/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/wily/multi-series-0", 0), exportTestCharms["wordpress"], }, }, { about: "multiple tags filter search", query: "tags=mysql+bar", results: []*router.ResolvedURL{ exportTestCharms["mysql"], }, }, { about: "blank owner", query: "owner=", results: []*router.ResolvedURL{ // V4 SPECIFIC router.MustNewResolvedURL("cs:~charmers/trusty/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/utopic/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/vivid/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/wily/multi-series-0", 0), exportTestCharms["wordpress"], exportTestCharms["mysql"], exportTestBundles["wordpress-simple"], }, }, { about: "paginated search", query: "name=mysql&skip=1", }, { about: "promulgated", query: "promulgated=1", results: []*router.ResolvedURL{ // V4 SPECIFIC router.MustNewResolvedURL("cs:~charmers/trusty/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/utopic/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/vivid/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/wily/multi-series-0", 0), exportTestCharms["wordpress"], exportTestCharms["mysql"], exportTestBundles["wordpress-simple"], }, }, { about: "not promulgated", query: "promulgated=0", results: []*router.ResolvedURL{ exportTestCharms["varnish"], }, }, { about: "promulgated with owner", query: "promulgated=1&owner=openstack-charmers", results: []*router.ResolvedURL{ exportTestCharms["mysql"], }, }} for i, test := range tests { c.Logf("test %d. %s", i, test.about) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search?" + test.query), }) var sr params.SearchResponse err := json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) c.Assert(sr.Results, gc.HasLen, len(test.results)) c.Logf("results: %s", rec.Body.Bytes()) assertResultSet(c, sr, test.results) } } func (s *SearchSuite) TestPaginatedSearch(c *gc.C) { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search?text=wordpress&skip=1"), }) var sr params.SearchResponse err := json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) c.Assert(sr.Results, gc.HasLen, 1) c.Assert(sr.Total, gc.Equals, 2) } func (s *SearchSuite) TestMetadataFields(c *gc.C) { tests := []struct { about string query string meta map[string]interface{} }{{ about: "archive-size", query: "name=mysql&include=archive-size", meta: map[string]interface{}{ "archive-size": params.ArchiveSizeResponse{getSearchCharm("mysql").Size()}, }, }, { about: "bundle-metadata", query: "name=wordpress-simple&type=bundle&include=bundle-metadata", meta: map[string]interface{}{ "bundle-metadata": getSearchBundle("wordpress-simple").Data(), }, }, { about: "bundle-machine-count", query: "name=wordpress-simple&type=bundle&include=bundle-machine-count", meta: map[string]interface{}{ "bundle-machine-count": params.BundleCount{2}, }, }, { about: "bundle-unit-count", query: "name=wordpress-simple&type=bundle&include=bundle-unit-count", meta: map[string]interface{}{ "bundle-unit-count": params.BundleCount{2}, }, }, { about: "charm-actions", query: "name=wordpress&type=charm&include=charm-actions", meta: map[string]interface{}{ "charm-actions": getSearchCharm("wordpress").Actions(), }, }, { about: "charm-config", query: "name=wordpress&type=charm&include=charm-config", meta: map[string]interface{}{ "charm-config": getSearchCharm("wordpress").Config(), }, }, { about: "charm-related", query: "name=wordpress&type=charm&include=charm-related", meta: map[string]interface{}{ "charm-related": params.RelatedResponse{ Provides: map[string][]params.EntityResult{ "mysql": { { Id: exportTestCharms["mysql"].PreferredURL(), }, }, "varnish": { { Id: exportTestCharms["varnish"].PreferredURL(), }, }, }, }, }, }, { about: "multiple values", query: "name=wordpress&type=charm&include=charm-related&include=charm-config", meta: map[string]interface{}{ "charm-related": params.RelatedResponse{ Provides: map[string][]params.EntityResult{ "mysql": { { Id: exportTestCharms["mysql"].PreferredURL(), }, }, "varnish": { { Id: exportTestCharms["varnish"].PreferredURL(), }, }, }, }, "charm-config": getSearchCharm("wordpress").Config(), }, }} for i, test := range tests { c.Logf("test %d. %s", i, test.about) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search?" + test.query), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) var sr struct { Results []struct { Meta json.RawMessage } } err := json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) c.Assert(sr.Results, gc.HasLen, 1) c.Assert(string(sr.Results[0].Meta), jc.JSONEquals, test.meta) } } func (s *SearchSuite) TestSearchError(c *gc.C) { err := s.esSuite.ES.DeleteIndex(s.esSuite.TestIndex) c.Assert(err, gc.Equals, nil) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search?name=wordpress"), }) c.Assert(rec.Code, gc.Equals, http.StatusInternalServerError) var resp params.Error err = json.Unmarshal(rec.Body.Bytes(), &resp) c.Assert(err, gc.IsNil) c.Assert(resp.Code, gc.Equals, params.ErrorCode("")) c.Assert(resp.Message, gc.Matches, "error performing search: search failed: .*") } func (s *SearchSuite) TestSearchIncludeError(c *gc.C) { // Perform a search for all charms, including the // manifest, which will try to retrieve all charm // blobs. rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search?type=charm&include=manifest"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) var resp params.SearchResponse err := json.Unmarshal(rec.Body.Bytes(), &resp) // V4 SPECIFIC // cs:riak will not be found because it is not visible to "everyone". // cs:multi-series will be expanded to 4 different results. c.Assert(resp.Results, gc.HasLen, len(exportTestCharms)+3-1) // Now remove one of the blobs. The list should still // work, but only return a single result. entity, err := s.store.FindEntity(newResolvedURL("~charmers/precise/wordpress-23", 23), nil) c.Assert(err, gc.IsNil) err = s.store.BlobStore.Remove(entity.BlobName) c.Assert(err, gc.IsNil) // Now search again - we should get one result less // (and the error will be logged). // Register a logger that so that we can check the logging output. // It will be automatically removed later because IsolatedMgoESSuite // uses LoggingSuite. var tw loggo.TestWriter err = loggo.RegisterWriter("test-log", &tw, loggo.DEBUG) c.Assert(err, gc.IsNil) rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search?type=charm&include=manifest"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) resp = params.SearchResponse{} err = json.Unmarshal(rec.Body.Bytes(), &resp) // V4 SPECIFIC // cs:riak will not be found because it is not visible to "everyone". // cs:multi-series will be expanded to 4 different results. // cs:wordpress will not be found because it has no manifest. c.Assert(resp.Results, gc.HasLen, len(exportTestCharms)+3-2) c.Assert(tw.Log(), jc.LogMatches, []string{"cannot retrieve metadata for cs:precise/wordpress-23: cannot open archive data for cs:precise/wordpress-23: .*"}) } func (s *SearchSuite) TestSorting(c *gc.C) { tests := []struct { about string query string results []*router.ResolvedURL }{{ about: "name ascending", query: "sort=name", results: []*router.ResolvedURL{ // V4 SPECIFIC router.MustNewResolvedURL("cs:~charmers/trusty/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/utopic/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/vivid/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/wily/multi-series-0", 0), exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }, { about: "name descending", query: "sort=-name", results: []*router.ResolvedURL{ // V4 SPECIFIC exportTestBundles["wordpress-simple"], exportTestCharms["wordpress"], exportTestCharms["varnish"], exportTestCharms["mysql"], router.MustNewResolvedURL("cs:~charmers/trusty/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/utopic/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/vivid/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/wily/multi-series-0", 0), }, }, { about: "series ascending", query: "sort=series,name", results: []*router.ResolvedURL{ // V4 SPECIFIC exportTestBundles["wordpress-simple"], exportTestCharms["wordpress"], router.MustNewResolvedURL("cs:~charmers/trusty/multi-series-0", 0), exportTestCharms["mysql"], exportTestCharms["varnish"], router.MustNewResolvedURL("cs:~charmers/utopic/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/vivid/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/wily/multi-series-0", 0), }, }, { about: "series descending", query: "sort=-series&sort=name", results: []*router.ResolvedURL{ // V4 SPECIFIC router.MustNewResolvedURL("cs:~charmers/wily/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/vivid/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/utopic/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/trusty/multi-series-0", 0), exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }, { about: "owner ascending", query: "sort=owner,name", results: []*router.ResolvedURL{ // V4 SPECIFIC router.MustNewResolvedURL("cs:~charmers/trusty/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/utopic/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/vivid/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/wily/multi-series-0", 0), exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], exportTestCharms["varnish"], exportTestCharms["mysql"], }, }, { about: "owner descending", query: "sort=-owner&sort=name", results: []*router.ResolvedURL{ // V4 SPECIFIC exportTestCharms["mysql"], exportTestCharms["varnish"], router.MustNewResolvedURL("cs:~charmers/trusty/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/utopic/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/vivid/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/wily/multi-series-0", 0), exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }} for i, test := range tests { c.Logf("test %d. %s", i, test.about) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search?" + test.query), }) var sr params.SearchResponse err := json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) // Not using assertResultSet(c, sr, test.results) as it does sort internally c.Assert(sr.Results, gc.HasLen, len(test.results), gc.Commentf("expected %#v", test.results)) c.Logf("results: %s", rec.Body.Bytes()) for i := range test.results { c.Assert(sr.Results[i].Id.String(), gc.Equals, test.results[i].PreferredURL().String(), gc.Commentf("element %d")) } } } func (s *SearchSuite) TestSortUnsupportedField(c *gc.C) { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search?sort=foo"), }) var e params.Error err := json.Unmarshal(rec.Body.Bytes(), &e) c.Assert(err, gc.IsNil) c.Assert(e.Code, gc.Equals, params.ErrBadRequest) c.Assert(e.Message, gc.Equals, "invalid sort field: unrecognized sort parameter \"foo\"") } func (s *SearchSuite) TestDownloadsBoost(c *gc.C) { // TODO (frankban): remove this call when removing the legacy counts logic. patchLegacyDownloadCountsEnabled(s.AddCleanup, false) charmDownloads := map[string]int{ "mysql": 0, "wordpress": 1, "varnish": 8, } for n, cnt := range charmDownloads { url := newResolvedURL("cs:~downloads-test/trusty/x-1", -1) url.URL.Name = n s.addPublicCharm(c, getSearchCharm(n), url) for i := 0; i < cnt; i++ { err := s.store.IncrementDownloadCounts(url) c.Assert(err, gc.IsNil) } } err := s.esSuite.ES.RefreshIndex(s.esSuite.TestIndex) c.Assert(err, gc.IsNil) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search?owner=downloads-test"), }) var sr params.SearchResponse err = json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) c.Assert(sr.Results, gc.HasLen, 3) c.Assert(sr.Results[0].Id.Name, gc.Equals, "varnish") c.Assert(sr.Results[1].Id.Name, gc.Equals, "wordpress") c.Assert(sr.Results[2].Id.Name, gc.Equals, "mysql") } // TODO(mhilton) remove this test when removing legacy counts logic. func (s *SearchSuite) TestLegacyStatsUpdatesSearch(c *gc.C) { patchLegacyDownloadCountsEnabled(s.AddCleanup, true) doc, err := s.store.ES.GetSearchDocument(charm.MustParseURL("~openstack-charmers/trusty/mysql-7")) c.Assert(err, gc.IsNil) c.Assert(doc.TotalDownloads, gc.Equals, int64(0)) s.assertPutAsAdmin(c, "~openstack-charmers/trusty/mysql-7/meta/extra-info/"+params.LegacyDownloadStats, 57) doc, err = s.store.ES.GetSearchDocument(charm.MustParseURL("~openstack-charmers/trusty/mysql-7")) c.Assert(err, gc.IsNil) c.Assert(doc.TotalDownloads, gc.Equals, int64(57)) } func (s *SearchSuite) TestSearchWithAdminCredentials(c *gc.C) { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search"), Username: testUsername, Password: testPassword, }) c.Assert(rec.Code, gc.Equals, http.StatusOK) expected := []*router.ResolvedURL{ // V4 SPECIFIC router.MustNewResolvedURL("cs:~charmers/trusty/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/utopic/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/vivid/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/wily/multi-series-0", 0), exportTestCharms["mysql"], exportTestCharms["wordpress"], exportTestCharms["riak"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], } var sr params.SearchResponse err := json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) assertResultSet(c, sr, expected) } func (s *SearchSuite) TestSearchWithUserMacaroon(c *gc.C) { m, err := s.store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ checkers.DeclaredCaveat("username", "test-user"), }) c.Assert(err, gc.IsNil) macaroonCookie, err := httpbakery.NewCookie(macaroon.Slice{m}) c.Assert(err, gc.IsNil) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search"), Cookies: []*http.Cookie{macaroonCookie}, }) c.Assert(rec.Code, gc.Equals, http.StatusOK) expected := []*router.ResolvedURL{ // V4 SPECIFIC router.MustNewResolvedURL("cs:~charmers/trusty/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/utopic/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/vivid/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/wily/multi-series-0", 0), exportTestCharms["mysql"], exportTestCharms["wordpress"], exportTestCharms["riak"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], } var sr params.SearchResponse err = json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) assertResultSet(c, sr, expected) } func (s *SearchSuite) TestSearchWithUserInGroups(c *gc.C) { m, err := s.store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ checkers.DeclaredCaveat(v4.UsernameAttr, "bob"), }) c.Assert(err, gc.IsNil) macaroonCookie, err := httpbakery.NewCookie(macaroon.Slice{m}) c.Assert(err, gc.IsNil) s.idM.groups = map[string][]string{ "bob": {"test-user", "test-user2"}, } rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search"), Cookies: []*http.Cookie{macaroonCookie}, }) c.Assert(rec.Code, gc.Equals, http.StatusOK) expected := []*router.ResolvedURL{ // V4 SPECIFIC router.MustNewResolvedURL("cs:~charmers/trusty/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/utopic/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/vivid/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/wily/multi-series-0", 0), exportTestCharms["mysql"], exportTestCharms["wordpress"], exportTestCharms["riak"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], } var sr params.SearchResponse err = json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) assertResultSet(c, sr, expected) } func (s *SearchSuite) TestSearchWithBadAdminCredentialsAndACookie(c *gc.C) { m, err := s.store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ checkers.DeclaredCaveat("username", "test-user"), }) c.Assert(err, gc.IsNil) macaroonCookie, err := httpbakery.NewCookie(macaroon.Slice{m}) c.Assert(err, gc.IsNil) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search"), Cookies: []*http.Cookie{macaroonCookie}, Username: testUsername, Password: "bad-password", }) c.Assert(rec.Code, gc.Equals, http.StatusOK) expected := []*router.ResolvedURL{ // V4 SPECIFIC router.MustNewResolvedURL("cs:~charmers/trusty/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/utopic/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/vivid/multi-series-0", 0), router.MustNewResolvedURL("cs:~charmers/wily/multi-series-0", 0), exportTestCharms["mysql"], exportTestCharms["wordpress"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], } var sr params.SearchResponse err = json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) assertResultSet(c, sr, expected) } func assertResultSet(c *gc.C, sr params.SearchResponse, expected []*router.ResolvedURL) { sort.Sort(searchResultById(sr.Results)) sort.Sort(resolvedURLByPreferredURL(expected)) c.Assert(sr.Results, gc.HasLen, len(expected), gc.Commentf("expected %#v", expected)) for i := range expected { c.Assert(sr.Results[i].Id.String(), gc.Equals, expected[i].PreferredURL().String(), gc.Commentf("element %d")) } } type searchResultById []params.EntityResult func (s searchResultById) Len() int { return len(s) } func (s searchResultById) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s searchResultById) Less(i, j int) bool { return s[i].Id.String() < s[j].Id.String() } type resolvedURLByPreferredURL []*router.ResolvedURL func (s resolvedURLByPreferredURL) Len() int { return len(s) } func (s resolvedURLByPreferredURL) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s resolvedURLByPreferredURL) Less(i, j int) bool { return s[i].PreferredURL().String() < s[j].PreferredURL().String() } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/archive.go0000664000175000017500000000311612672604603025512 0ustar marcomarco// Copyright 2016 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v4 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" import ( "net/http" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/charmstore.v5-unstable/internal/router" ) // serveArchive returns a handler for /archive that falls back to v5ServeArchive // for all operations not handled by v4. func (h ReqHandler) serveArchive(v5ServeArchive router.IdHandler) router.IdHandler { get := h.ResolvedIdHandler(h.serveGetArchive) return func(id *charm.URL, w http.ResponseWriter, req *http.Request) error { if req.Method == "GET" { return get(id, w, req) } return v5ServeArchive(id, w, req) } } func (h ReqHandler) serveGetArchive(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { _, err := h.AuthorizeEntityAndTerms(req, []*router.ResolvedURL{id}) if err != nil { return errgo.Mask(err, errgo.Any) } blob, err := h.Store.OpenBlobPreV5(id) if err != nil { return errgo.Mask(err, errgo.Is(params.ErrNotFound)) } defer blob.Close() h.SendEntityArchive(id, w, req, blob) return nil } // GET id/archive/path // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idarchivepath func (h ReqHandler) serveArchiveFile(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { blob, err := h.Store.OpenBlobPreV5(id) if err != nil { return errgo.Notef(err, "cannot open archive data for %v", id) } defer blob.Close() return h.ServeBlobFile(w, req, id, blob) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/auth_test.go0000664000175000017500000011025412672604603026073 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v4_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" import ( "encoding/json" "fmt" "io" "net/http" "net/url" "os" "sort" "strings" "sync" "time" jc "github.com/juju/testing/checkers" "github.com/juju/testing/httptesting" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/macaroon.v1" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" "gopkg.in/juju/charmstore.v5-unstable/internal/v4" ) func (s *commonSuite) AssertEndpointAuth(c *gc.C, p httptesting.JSONCallParams) { s.testNonMacaroonAuth(c, p) s.testMacaroonAuth(c, p) } func (s *commonSuite) testNonMacaroonAuth(c *gc.C, p httptesting.JSONCallParams) { p.Handler = s.noMacaroonSrv // Check that the request succeeds when provided with the // correct credentials. p.Username = "test-user" p.Password = "test-password" httptesting.AssertJSONCall(c, p) // Check that auth fails with no creds provided. p.Username = "" p.Password = "" p.ExpectStatus = http.StatusUnauthorized p.ExpectBody = params.Error{ Message: "authentication failed: missing HTTP auth header", Code: params.ErrUnauthorized, } httptesting.AssertJSONCall(c, p) // Check that auth fails with the wrong username provided. p.Username = "wrong" p.Password = "test-password" p.ExpectStatus = http.StatusUnauthorized p.ExpectBody = params.Error{ Message: "invalid user name or password", Code: params.ErrUnauthorized, } httptesting.AssertJSONCall(c, p) // Check that auth fails with the wrong password provided. p.Username = "test-user" p.Password = "test-password-wrong" p.ExpectStatus = http.StatusUnauthorized p.ExpectBody = params.Error{ Message: "invalid user name or password", Code: params.ErrUnauthorized, } httptesting.AssertJSONCall(c, p) } func (s *commonSuite) testMacaroonAuth(c *gc.C, p httptesting.JSONCallParams) { // Make a test third party caveat discharger. var checkedCaveats []string var mu sync.Mutex var dischargeError error s.discharge = func(cond string, arg string) ([]checkers.Caveat, error) { mu.Lock() defer mu.Unlock() checkedCaveats = append(checkedCaveats, cond+" "+arg) if dischargeError != nil { return nil, dischargeError } return []checkers.Caveat{ checkers.DeclaredCaveat("username", "bob"), }, nil } p.Handler = s.srv client := httpbakery.NewHTTPClient() cookieJar := &cookieJar{CookieJar: client.Jar} client.Jar = cookieJar p.Do = bakeryDo(client) // Check that the call succeeds with simple auth. c.Log("simple auth sucess") p.Username = "test-user" p.Password = "test-password" httptesting.AssertJSONCall(c, p) c.Assert(checkedCaveats, gc.HasLen, 0) c.Assert(cookieJar.cookieURLs, gc.HasLen, 0) // Check that the call gives us the correct // "authentication denied response" without simple auth // and uses the third party checker // and that a cookie is stored at the correct location. // TODO when we allow admin access via macaroon creds, // change this test to expect success. c.Log("macaroon unauthorized error") p.Username, p.Password = "", "" p.ExpectStatus = http.StatusUnauthorized p.ExpectBody = params.Error{ Message: `unauthorized: access denied for user "bob"`, Code: params.ErrUnauthorized, } httptesting.AssertJSONCall(c, p) sort.Strings(checkedCaveats) c.Assert(checkedCaveats, jc.DeepEquals, []string{ "is-authenticated-user ", }) checkedCaveats = nil c.Assert(cookieJar.cookieURLs, gc.DeepEquals, []string{"http://somehost/"}) // Check that the call fails with incorrect simple auth info. c.Log("simple auth error") p.Password = "bad-password" p.ExpectStatus = http.StatusUnauthorized p.ExpectBody = params.Error{ Message: "authentication failed: missing HTTP auth header", Code: params.ErrUnauthorized, } // Check that it fails when the discharger refuses the discharge. c.Log("macaroon discharge error") client = httpbakery.NewHTTPClient() dischargeError = fmt.Errorf("go away") p.Do = bakeryDo(client) // clear cookies p.Password = "" p.Username = "" p.ExpectError = `cannot get discharge from "https://[^"]*": third party refused discharge: cannot discharge: go away` httptesting.AssertJSONCall(c, p) } type cookieJar struct { cookieURLs []string http.CookieJar } func (j *cookieJar) SetCookies(url *url.URL, cookies []*http.Cookie) { url1 := *url url1.Host = "somehost" for _, cookie := range cookies { if cookie.Path != "" { url1.Path = cookie.Path } if cookie.Name != "macaroon-authn" { panic("unexpected cookie name: " + cookie.Name) } } j.cookieURLs = append(j.cookieURLs, url1.String()) j.CookieJar.SetCookies(url, cookies) } func noInteraction(*url.URL) error { return fmt.Errorf("unexpected interaction required") } // dischargedAuthCookie retrieves and discharges an authentication macaroon cookie. It adds the provided // first-party caveats before discharging the macaroon. func dischargedAuthCookie(c *gc.C, srv http.Handler, caveats ...string) *http.Cookie { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: srv, URL: storeURL("macaroon"), Method: "GET", }) var m macaroon.Macaroon err := json.Unmarshal(rec.Body.Bytes(), &m) c.Assert(err, gc.IsNil) for _, cav := range caveats { err := m.AddFirstPartyCaveat(cav) c.Assert(err, gc.IsNil) } client := httpbakery.NewClient() ms, err := client.DischargeAll(&m) c.Assert(err, gc.IsNil) macaroonCookie, err := httpbakery.NewCookie(ms) c.Assert(err, gc.IsNil) return macaroonCookie } type authSuite struct { commonSuite } var _ = gc.Suite(&authSuite{}) func (s *authSuite) SetUpSuite(c *gc.C) { s.enableIdentity = true s.commonSuite.SetUpSuite(c) } var readAuthorizationTests = []struct { // about holds the test description. about string // username holds the authenticated user name returned by the discharger. // If empty, an anonymous user is returned. username string // groups holds group names the user is member of, as returned by the // discharger. groups []string // unpublishedReadPerm stores a list of users with read permissions on // on the unpublished entities. unpublishedReadPerm []string // developmentReadPerm stores a list of users with read permissions on the development channel. developmentReadPerm []string // stableReadPerm stores a list of users with read permissions on the stable channel. stableReadPerm []string // channels contains a list of channels, to which the entity belongs. channels []params.Channel // expectStatus is the expected HTTP response status. // Defaults to 200 status OK. expectStatus int // expectBody holds the expected body of the HTTP response. If nil, // the body is not checked and the response is assumed to be ok. expectBody interface{} }{{ about: "anonymous users are authorized", unpublishedReadPerm: []string{params.Everyone}, }, { about: "everyone is authorized", username: "dalek", unpublishedReadPerm: []string{params.Everyone}, }, { about: "everyone and a specific user", username: "dalek", unpublishedReadPerm: []string{params.Everyone, "janeway"}, }, { about: "specific user authorized", username: "who", unpublishedReadPerm: []string{"who"}, }, { about: "multiple specific users authorized", username: "picard", unpublishedReadPerm: []string{"kirk", "picard", "sisko"}, }, { about: "nobody authorized", username: "picard", expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "picard"`, }, }, { about: "access denied for user", username: "kirk", unpublishedReadPerm: []string{"picard", "sisko"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "everyone is authorized (user is member of groups)", username: "dalek", groups: []string{"group1", "group2"}, unpublishedReadPerm: []string{params.Everyone}, }, { about: "everyone and a specific group", username: "dalek", groups: []string{"group2", "group3"}, unpublishedReadPerm: []string{params.Everyone, "group1"}, }, { about: "specific group authorized", username: "who", groups: []string{"group1", "group42", "group2"}, unpublishedReadPerm: []string{"group42"}, }, { about: "multiple specific groups authorized", username: "picard", groups: []string{"group2"}, unpublishedReadPerm: []string{"kirk", "group0", "group2"}, }, { about: "no group authorized", username: "picard", groups: []string{"group1", "group2"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "picard"`, }, }, { about: "access denied for group", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group47"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "access provided through development channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group47"}, developmentReadPerm: []string{"group1"}, channels: []params.Channel{params.DevelopmentChannel}, }, { about: "access provided through development channel, but charm not published", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group47"}, developmentReadPerm: []string{"group1"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "access provided through stable channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group47"}, developmentReadPerm: []string{"group12"}, stableReadPerm: []string{"group2"}, channels: []params.Channel{params.DevelopmentChannel, params.StableChannel}, }, { about: "access provided through stable channel, but charm not published", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group47"}, developmentReadPerm: []string{"group12"}, stableReadPerm: []string{"group2"}, channels: []params.Channel{params.DevelopmentChannel}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "access provided through development channel, but charm on stable channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group47"}, developmentReadPerm: []string{"group1"}, stableReadPerm: []string{"group11"}, channels: []params.Channel{ params.DevelopmentChannel, params.StableChannel, }, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "access provided through unpublished ACL, but charm on stable channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group1"}, stableReadPerm: []string{"group11"}, channels: []params.Channel{ params.DevelopmentChannel, params.StableChannel, }, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "access provided through unpublished ACL, but charm on development channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group1"}, developmentReadPerm: []string{"group11"}, channels: []params.Channel{ params.DevelopmentChannel, }, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }} func dischargeForUser(username string) func(_, _ string) ([]checkers.Caveat, error) { return func(_, _ string) ([]checkers.Caveat, error) { return []checkers.Caveat{ checkers.DeclaredCaveat(v4.UsernameAttr, username), }, nil } } func (s *authSuite) TestReadAuthorization(c *gc.C) { for i, test := range readAuthorizationTests { c.Logf("test %d: %s", i, test.about) s.discharge = dischargeForUser(test.username) s.idM.groups = map[string][]string{ test.username: test.groups, } // Add a charm to the store, used for testing. rurl := newResolvedURL("~charmers/utopic/wordpress-42", -1) err := s.store.AddCharmWithArchive(rurl, storetesting.Charms.CharmDir("wordpress")) c.Assert(err, gc.IsNil) // publish the charm on any required channels. if len(test.channels) > 0 { err := s.store.Publish(rurl, test.channels...) c.Assert(err, gc.IsNil) } // Change the ACLs for the testing charm. err = s.store.SetPerms(&rurl.URL, "unpublished.read", test.unpublishedReadPerm...) c.Assert(err, gc.IsNil) err = s.store.SetPerms(&rurl.URL, "development.read", test.developmentReadPerm...) c.Assert(err, gc.IsNil) err = s.store.SetPerms(&rurl.URL, "stable.read", test.stableReadPerm...) c.Assert(err, gc.IsNil) // Define an helper function used to send requests and check responses. doRequest := func(path string, expectStatus int, expectBody interface{}) { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL(path), }) if expectStatus == 0 { expectStatus = http.StatusOK } c.Assert(rec.Code, gc.Equals, expectStatus, gc.Commentf("body: %s", rec.Body)) if expectBody != nil { c.Assert(rec.Body.String(), jc.JSONEquals, expectBody) } } // Perform meta and id requests. // Note that we use the full URL so that we test authorization specifically // on that entity without trying to look up the entity in the stable channel. doRequest("~charmers/utopic/wordpress-42/meta/archive-size", test.expectStatus, test.expectBody) doRequest("~charmers/utopic/wordpress-42/expand-id", test.expectStatus, test.expectBody) // Remove all entities from the store. _, err = s.store.DB.Entities().RemoveAll(nil) c.Assert(err, gc.IsNil) } } var writeAuthorizationTests = []struct { // about holds the test description. about string // username holds the authenticated user name returned by the discharger. // If empty, an anonymous user is returned. username string // groups holds group names the user is member of, as returned by the // discharger. groups []string // writePerm stores a list of users with write permissions. unpublishedWritePerm []string // developmentWritePerm stores a list of users with write permissions on the development channel. developmentWritePerm []string // stableWritePerm stores a list of users with write permissions on the stable channel. stableWritePerm []string // channels contains a list of channels, to which the entity belongs. channels []params.Channel // expectStatus is the expected HTTP response status. // Defaults to 200 status OK. expectStatus int // expectBody holds the expected body of the HTTP response. If nil, // the body is not checked and the response is assumed to be ok. expectBody interface{} }{{ about: "anonymous users are not authorized", unpublishedWritePerm: []string{"who"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: "unauthorized: no username declared", }, }, { about: "specific user authorized to write", username: "dalek", unpublishedWritePerm: []string{"dalek"}, }, { about: "multiple users authorized", username: "sisko", unpublishedWritePerm: []string{"kirk", "picard", "sisko"}, }, { about: "no users authorized", username: "who", expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "who"`, }, }, { about: "specific user unauthorized", username: "kirk", unpublishedWritePerm: []string{"picard", "sisko", "janeway"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "access granted for group", username: "picard", groups: []string{"group1", "group2"}, unpublishedWritePerm: []string{"group2"}, }, { about: "multiple groups authorized", username: "picard", groups: []string{"group1", "group2"}, unpublishedWritePerm: []string{"kirk", "group0", "group1", "group2"}, }, { about: "no group authorized", username: "picard", groups: []string{"group1", "group2"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "picard"`, }, }, { about: "access denied for group", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group47"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "access provided through development channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group47"}, developmentWritePerm: []string{"group1"}, channels: []params.Channel{params.DevelopmentChannel}, }, { about: "access provided through development channel, but charm not published", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group47"}, developmentWritePerm: []string{"group1"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "access provided through stable channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group47"}, developmentWritePerm: []string{"group12"}, stableWritePerm: []string{"group2"}, channels: []params.Channel{params.DevelopmentChannel, params.StableChannel}, }, { about: "access provided through stable channel, but charm not published", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group47"}, developmentWritePerm: []string{"group12"}, stableWritePerm: []string{"group2"}, channels: []params.Channel{params.DevelopmentChannel}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "access provided through development channel, but charm on stable channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group47"}, developmentWritePerm: []string{"group1"}, stableWritePerm: []string{"group11"}, channels: []params.Channel{ params.DevelopmentChannel, params.StableChannel, }, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "access provided through unpublished ACL, but charm on stable channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group1"}, stableWritePerm: []string{"group11"}, channels: []params.Channel{ params.DevelopmentChannel, params.StableChannel, }, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "access provided through unpublished ACL, but charm on development channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group1"}, developmentWritePerm: []string{"group11"}, channels: []params.Channel{ params.DevelopmentChannel, }, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }} func (s *authSuite) TestWriteAuthorization(c *gc.C) { for i, test := range writeAuthorizationTests { c.Logf("test %d: %s", i, test.about) s.discharge = dischargeForUser(test.username) s.idM.groups = map[string][]string{ test.username: test.groups, } // Add a charm to the store, used for testing. rurl := newResolvedURL("~charmers/utopic/wordpress-42", -1) err := s.store.AddCharmWithArchive(rurl, storetesting.Charms.CharmDir("wordpress")) c.Assert(err, gc.IsNil) // publish the charm on any required channels. if len(test.channels) > 0 { err := s.store.Publish(rurl, test.channels...) c.Assert(err, gc.IsNil) } // Change the ACLs for the testing charm. err = s.store.SetPerms(&rurl.URL, "unpublished.write", test.unpublishedWritePerm...) c.Assert(err, gc.IsNil) err = s.store.SetPerms(&rurl.URL, "development.write", test.developmentWritePerm...) c.Assert(err, gc.IsNil) err = s.store.SetPerms(&rurl.URL, "stable.write", test.stableWritePerm...) c.Assert(err, gc.IsNil) makeRequest := func(path string, expectStatus int, expectBody interface{}) { client := httpbakery.NewHTTPClient() rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, Do: bakeryDo(client), URL: storeURL(path), Method: "PUT", Header: http.Header{"Content-Type": {"application/json"}}, Body: strings.NewReader("42"), }) if expectStatus == 0 { expectStatus = http.StatusOK } c.Assert(rec.Code, gc.Equals, expectStatus, gc.Commentf("body: %s", rec.Body)) if expectBody != nil { c.Assert(rec.Body.String(), jc.JSONEquals, expectBody) } } // Perform a meta PUT request to the URLs. // Note that we use the full URL so that we test authorization specifically // on that entity without trying to look up the entity in the stable channel. makeRequest("~charmers/utopic/wordpress-42/meta/extra-info/key", test.expectStatus, test.expectBody) // Remove all entities from the store. _, err = s.store.DB.Entities().RemoveAll(nil) c.Assert(err, gc.IsNil) } } var uploadEntityAuthorizationTests = []struct { // about holds the test description. about string // username holds the authenticated user name returned by the discharger. // If empty, an anonymous user is returned. username string // groups holds group names the user is member of, as returned by the // discharger. groups []string // id holds the id of the entity to be uploaded. id string // promulgated holds whether the corresponding promulgated entity must be // already present in the charm store before performing the upload. promulgated bool // writeAcls can be used to set customized write ACLs for the published // entity before performing the upload. If empty, default ACLs are used. writeAcls []string // expectStatus is the expected HTTP response status. // Defaults to 200 status OK. expectStatus int // expectBody holds the expected body of the HTTP response. If nil, // the body is not checked and the response is assumed to be ok. expectBody interface{} }{{ about: "user owned entity", username: "who", id: "~who/utopic/django", }, { about: "group owned entity", username: "dalek", groups: []string{"group1", "group2"}, id: "~group1/utopic/django", }, { about: "specific group", username: "dalek", groups: []string{"group42"}, id: "~group42/utopic/django", }, { about: "promulgated entity", username: "sisko", groups: []string{"charmers", "group2"}, id: "~charmers/utopic/django", promulgated: true, }, { about: "unauthorized: promulgated entity", username: "sisko", groups: []string{"group1", "group2"}, id: "~charmers/utopic/django", promulgated: true, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "sisko"`, }, }, { about: "unauthorized: anonymous user", id: "~who/utopic/django", expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: "unauthorized: no username declared", }, }, { about: "unauthorized: anonymous user and promulgated entity", id: "~charmers/utopic/django", promulgated: true, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: "unauthorized: no username declared", }, }, { about: "unauthorized: user does not match", username: "kirk", id: "~picard/utopic/django", expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "unauthorized: group does not match", username: "kirk", groups: []string{"group1", "group2", "group3"}, id: "~group0/utopic/django", expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "unauthorized: specific group and promulgated entity", username: "janeway", groups: []string{"group1"}, id: "~charmers/utopic/django", promulgated: true, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "janeway"`, }, }, { about: "unauthorized: published entity no published permissions", username: "picard", id: "~picard/wily/django", writeAcls: []string{"kirk"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "picard"`, }, }} func (s *authSuite) TestUploadEntityAuthorization(c *gc.C) { for i, test := range uploadEntityAuthorizationTests { c.Logf("test %d: %s", i, test.about) s.discharge = dischargeForUser(test.username) s.idM.groups = map[string][]string{ test.username: test.groups, } // Prepare the expected status. expectStatus := test.expectStatus if expectStatus == 0 { expectStatus = http.StatusOK } // Add a pre-existing entity if required. if test.promulgated || len(test.writeAcls) != 0 { id := charm.MustParseURL(test.id).WithRevision(0) revision := -1 if test.promulgated { revision = 1 } rurl := newResolvedURL(id.String(), revision) s.store.AddCharmWithArchive(rurl, storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) if len(test.writeAcls) != 0 { s.store.SetPerms(&rurl.URL, "unpublished.write", test.writeAcls...) } } // Try to upload the entity. body, hash, size := archiveInfo(c, "wordpress") defer body.Close() client := httpbakery.NewHTTPClient() rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, Do: bakeryDo(client), URL: storeURL(test.id + "/archive?hash=" + hash), Method: "POST", ContentLength: size, Header: http.Header{ "Content-Type": {"application/zip"}, }, Body: body, }) c.Assert(rec.Code, gc.Equals, expectStatus, gc.Commentf("body: %s", rec.Body)) if test.expectBody != nil { c.Assert(rec.Body.String(), jc.JSONEquals, test.expectBody) } // Remove all entities from the store. _, err := s.store.DB.Entities().RemoveAll(nil) c.Assert(err, gc.IsNil) _, err = s.store.DB.BaseEntities().RemoveAll(nil) c.Assert(err, gc.IsNil) } } type readSeekCloser interface { io.ReadCloser io.Seeker } // archiveInfo prepares a zip archive of an entity and return a reader for the // archive, its blob hash and size. func archiveInfo(c *gc.C, name string) (r readSeekCloser, hashSum string, size int64) { ch := storetesting.Charms.CharmArchive(c.MkDir(), name) f, err := os.Open(ch.Path) c.Assert(err, gc.IsNil) hash, size := hashOf(f) _, err = f.Seek(0, 0) c.Assert(err, gc.IsNil) return f, hash, size } var isEntityCaveatTests = []struct { url string expectError string }{{ url: "~charmers/utopic/wordpress-42/archive", }, { url: "~charmers/utopic/wordpress-42/meta/hash", }, { url: "wordpress/archive", }, { url: "wordpress/meta/hash", }, { url: "utopic/wordpress-10/archive", }, { url: "utopic/wordpress-10/meta/hash", }, { url: "~charmers/utopic/wordpress-41/archive", expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: operation on entity cs:~charmers/utopic/wordpress-41 not allowed`, }, { url: "~charmers/utopic/wordpress-41/meta/hash", expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: operation on entity cs:~charmers/utopic/wordpress-41 not allowed`, }, { url: "utopic/wordpress-9/archive", expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: operation on entity cs:utopic/wordpress-9 not allowed`, }, { url: "utopic/wordpress-9/meta/hash", expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: operation on entity cs:utopic/wordpress-9 not allowed`, }, { url: "log", expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: operation does not involve any of the allowed entities cs:~charmers/utopic/wordpress-42`, }} func (s *authSuite) TestIsEntityCaveat(c *gc.C) { s.discharge = func(_, _ string) ([]checkers.Caveat, error) { return []checkers.Caveat{{ Condition: "is-entity cs:~charmers/utopic/wordpress-42", }, checkers.DeclaredCaveat(v4.UsernameAttr, "bob"), }, nil } // Add a charm to the store, used for testing. s.addPublicCharm(c, storetesting.NewCharm(nil), newResolvedURL("~charmers/utopic/wordpress-41", 9)) s.addPublicCharm(c, storetesting.NewCharm(nil), newResolvedURL("~charmers/utopic/wordpress-42", 10)) // Change the ACLs for charms we've just uploaded, otherwise // no authorization checking will take place. err := s.store.SetPerms(charm.MustParseURL("cs:~charmers/wordpress"), "stable.read", "bob") c.Assert(err, gc.IsNil) for i, test := range isEntityCaveatTests { c.Logf("test %d: %s", i, test.url) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL(test.url), Method: "GET", }) if test.expectError != "" { c.Assert(rec.Code, gc.Equals, http.StatusUnauthorized) var respErr httpbakery.Error err := json.Unmarshal(rec.Body.Bytes(), &respErr) c.Assert(err, gc.IsNil) c.Assert(respErr.Message, gc.Matches, test.expectError) continue } c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.Bytes())) } } func (s *authSuite) TestDelegatableMacaroon(c *gc.C) { // Create a new server with a third party discharger. s.discharge = dischargeForUser("bob") // First check that we get a macaraq error when using a vanilla http do // request with both bakery protocol. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("delegatable-macaroon"), Header: http.Header{"Bakery-Protocol-Version": {"1"}}, ExpectBody: httptesting.BodyAsserter(func(c *gc.C, m json.RawMessage) { // Allow any body - the next check will check that it's a valid macaroon. }), ExpectStatus: http.StatusUnauthorized, }) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("delegatable-macaroon"), ExpectBody: httptesting.BodyAsserter(func(c *gc.C, m json.RawMessage) { // Allow any body - the next check will check that it's a valid macaroon. }), ExpectStatus: http.StatusProxyAuthRequired, }) client := httpbakery.NewHTTPClient() now := time.Now() var gotBody json.RawMessage httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("delegatable-macaroon"), ExpectBody: httptesting.BodyAsserter(func(c *gc.C, m json.RawMessage) { gotBody = m }), Do: bakeryDo(client), ExpectStatus: http.StatusOK, }) c.Assert(gotBody, gc.NotNil) var m macaroon.Macaroon err := json.Unmarshal(gotBody, &m) c.Assert(err, gc.IsNil) caveats := m.Caveats() foundExpiry := false for _, cav := range caveats { cond, arg, err := checkers.ParseCaveat(cav.Id) c.Assert(err, gc.IsNil) switch cond { case checkers.CondTimeBefore: t, err := time.Parse(time.RFC3339Nano, arg) c.Assert(err, gc.IsNil) c.Assert(t, jc.TimeBetween(now.Add(v4.DelegatableMacaroonExpiry), now.Add(v4.DelegatableMacaroonExpiry+time.Second))) foundExpiry = true } } c.Assert(foundExpiry, jc.IsTrue) // Now check that we can use the obtained macaroon to do stuff // as the declared user. rurl := newResolvedURL("~charmers/utopic/wordpress-41", 9) err = s.store.AddCharmWithArchive( rurl, storetesting.Charms.CharmDir("wordpress")) c.Assert(err, gc.IsNil) err = s.store.Publish(rurl, params.StableChannel) c.Assert(err, gc.IsNil) // Change the ACLs for the testing charm. err = s.store.SetPerms(charm.MustParseURL("cs:~charmers/wordpress"), "stable.read", "bob") c.Assert(err, gc.IsNil) // First check that we require authorization to access the charm. rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("~charmers/utopic/wordpress/meta/id-name"), Method: "GET", }) c.Assert(rec.Code, gc.Equals, http.StatusProxyAuthRequired) // Then check that the request succeeds if we provide the delegatable // macaroon. client = httpbakery.NewHTTPClient() u, err := url.Parse("http://127.0.0.1") c.Assert(err, gc.IsNil) err = httpbakery.SetCookie(client.Jar, u, macaroon.Slice{&m}) c.Assert(err, gc.IsNil) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("~charmers/utopic/wordpress/meta/id-name"), ExpectBody: params.IdNameResponse{ Name: "wordpress", }, ExpectStatus: http.StatusOK, Do: bakeryDo(client), }) } func (s *authSuite) TestDelegatableMacaroonWithBasicAuth(c *gc.C) { // First check that we get a macaraq error when using a vanilla http do // request. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Username: testUsername, Password: testPassword, URL: storeURL("delegatable-macaroon"), ExpectBody: params.Error{ Code: params.ErrForbidden, Message: "delegatable macaroon is not obtainable using admin credentials", }, ExpectStatus: http.StatusForbidden, }) } type errorTransport string func (e errorTransport) RoundTrip(*http.Request) (*http.Response, error) { return nil, errgo.New(string(e)) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/defaulticon_test.go0000664000175000017500000000104512672604603027424 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v4_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" import ( "strings" gc "gopkg.in/check.v1" "gopkg.in/juju/charmstore.v5-unstable/internal/v4" ) type iconSuite struct{} var _ = gc.Suite(&iconSuite{}) func (s *iconSuite) TestValidXML(c *gc.C) { // The XML declaration must be included in the first line of the icon. hasXMLPrefix := strings.HasPrefix(v4.DefaultIcon, " 0 { return params.TagsResponse{entity.CharmMeta.Tags} } return params.TagsResponse{entity.CharmMeta.Categories} }), checkURL: newResolvedURL("~charmers/utopic/category-2", 2), assertCheckData: func(c *gc.C, data interface{}) { c.Assert(data, jc.DeepEquals, params.TagsResponse{ Tags: []string{"openstack", "storage"}, }) }, }, { name: "id-user", get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { return params.IdUserResponse{url.PreferredURL().User}, nil }, checkURL: newResolvedURL("cs:~bob/utopic/wordpress-2", -1), assertCheckData: func(c *gc.C, data interface{}) { c.Assert(data, gc.Equals, params.IdUserResponse{"bob"}) }, }, { name: "id-series", get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { return params.IdSeriesResponse{url.URL.Series}, nil }, checkURL: newResolvedURL("~charmers/utopic/category-2", 2), assertCheckData: func(c *gc.C, data interface{}) { c.Assert(data, gc.Equals, params.IdSeriesResponse{"utopic"}) }, }, { name: "id-name", get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { return params.IdNameResponse{url.URL.Name}, nil }, checkURL: newResolvedURL("~charmers/utopic/category-2", 2), assertCheckData: func(c *gc.C, data interface{}) { c.Assert(data, gc.Equals, params.IdNameResponse{"category"}) }, }, { name: "id-revision", get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { return params.IdRevisionResponse{url.PreferredURL().Revision}, nil }, checkURL: newResolvedURL("~charmers/utopic/category-2", 2), assertCheckData: func(c *gc.C, data interface{}) { c.Assert(data, gc.Equals, params.IdRevisionResponse{2}) }, }, { name: "id", get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { id := url.PreferredURL() return params.IdResponse{ Id: id, User: id.User, Series: id.Series, Name: id.Name, Revision: id.Revision, }, nil }, checkURL: newResolvedURL("~charmers/utopic/category-2", 2), assertCheckData: func(c *gc.C, data interface{}) { c.Assert(data, jc.DeepEquals, params.IdResponse{ Id: charm.MustParseURL("cs:utopic/category-2"), User: "", Series: "utopic", Name: "category", Revision: 2, }) }, }, { name: "promulgated", get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { e, err := store.FindBaseEntity(&url.URL, nil) if err != nil { return nil, err } return params.PromulgatedResponse{ Promulgated: bool(e.Promulgated), }, nil }, checkURL: newResolvedURL("cs:~bob/utopic/wordpress-2", -1), assertCheckData: func(c *gc.C, data interface{}) { c.Assert(data, gc.Equals, params.PromulgatedResponse{Promulgated: false}) }, }, { name: "supported-series", get: entityGetter(func(entity *mongodoc.Entity) interface{} { if entity.URL.Series == "bundle" { return nil } return params.SupportedSeriesResponse{ SupportedSeries: entity.SupportedSeries, } }), checkURL: newResolvedURL("~charmers/utopic/category-2", 2), assertCheckData: func(c *gc.C, data interface{}) { c.Assert(data, jc.DeepEquals, params.SupportedSeriesResponse{ SupportedSeries: []string{"utopic"}, }) }, }} // TestEndpointGet tries to ensure that the endpoint // test data getters correspond with reality. func (s *APISuite) TestEndpointGet(c *gc.C) { s.addTestEntities(c) for i, ep := range metaEndpoints { c.Logf("test %d: %s\n", i, ep.name) data, err := ep.get(s.store, ep.checkURL) c.Assert(err, gc.IsNil) ep.assertCheckData(c, data) } } func (s *APISuite) TestAllMetaEndpointsTested(c *gc.C) { // Make sure that we're testing all the metadata // endpoints that we need to. s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("~charmers/precise/wordpress-23", 23)) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("precise/wordpress-23/meta"), }) c.Logf("meta response body: %s", rec.Body) var list []string err := json.Unmarshal(rec.Body.Bytes(), &list) c.Assert(err, gc.IsNil) listNames := make(map[string]bool) for _, name := range list { c.Assert(listNames[name], gc.Equals, false, gc.Commentf("name %s", name)) listNames[name] = true } testNames := make(map[string]bool) for _, test := range metaEndpoints { if strings.Contains(test.name, "/") { continue } testNames[test.name] = true } testNames["terms"] = true c.Assert(testNames, jc.DeepEquals, listNames) } var testEntities = []*router.ResolvedURL{ // A stock charm. newResolvedURL("cs:~charmers/precise/wordpress-23", 23), // Another stock charm, to satisfy the bundle's requirements. newResolvedURL("cs:~charmers/precise/mysql-5", 5), // A stock bundle. newResolvedURL("cs:~charmers/bundle/wordpress-simple-42", 42), // A charm with some actions. newResolvedURL("cs:~charmers/precise/dummy-10", 10), // A charm with some tags. newResolvedURL("cs:~charmers/utopic/category-2", 2), // A charm with a different user. newResolvedURL("cs:~bob/utopic/wordpress-2", -1), } func (s *APISuite) addTestEntities(c *gc.C) []*router.ResolvedURL { for _, e := range testEntities { if e.URL.Series == "bundle" { s.addPublicBundleFromRepo(c, e.URL.Name, e, true) } else { s.addPublicCharmFromRepo(c, e.URL.Name, e) } // Associate some extra-info data with the entity. key := e.URL.Path() + "/meta/extra-info/key" commonkey := e.URL.Path() + "/meta/common-info/key" s.assertPutAsAdmin(c, key, "value "+e.URL.String()) s.assertPutAsAdmin(c, commonkey, "value "+e.URL.String()) } return testEntities } func (s *APISuite) TestMetaEndpointsSingle(c *gc.C) { urls := s.addTestEntities(c) for i, ep := range metaEndpoints { c.Logf("test %d. %s", i, ep.name) tested := false for _, url := range urls { charmId := strings.TrimPrefix(url.String(), "cs:") path := charmId + "/meta/" + ep.name expectData, err := ep.get(s.store, url) c.Assert(err, gc.IsNil) c.Logf(" expected data for %q: %#v", url, expectData) if isNull(expectData) { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(path), ExpectStatus: http.StatusNotFound, ExpectBody: params.Error{ Message: params.ErrMetadataNotFound.Error(), Code: params.ErrMetadataNotFound, }, }) continue } tested = true c.Logf(" path %q: %#v", url, path) s.assertGet(c, path, expectData) } if !tested { c.Errorf("endpoint %q is null for all endpoints, so is not properly tested", ep.name) } } } func (s *APISuite) TestMetaPerm(c *gc.C) { s.discharge = dischargeForUser("charmers") for _, u := range []*router.ResolvedURL{ newResolvedURL("~charmers/precise/wordpress-23", 23), newResolvedURL("~charmers/precise/wordpress-24", 24), newResolvedURL("~charmers/trusty/wordpress-1", 1), } { err := s.store.AddCharmWithArchive(u, storetesting.NewCharm(nil)) c.Assert(err, gc.IsNil) } s.doAsUser("charmers", func() { s.assertGet(c, "wordpress/meta/perm?channel=unpublished", params.PermResponse{ Read: []string{"charmers"}, Write: []string{"charmers"}, }) }) s.assertChannelACLs(c, "precise/wordpress-23", map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, params.DevelopmentChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, params.StableChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, }) s.doAsUser("charmers", func() { // Change the read perms to only include a specific user and the // published write perms to include an "admin" user. // Because the entity isn't published yet, the unpublished channel ACLs // will be changed. s.assertPut(c, "precise/wordpress-23/meta/perm/read", []string{"bob"}) s.assertPut(c, "precise/wordpress-23/meta/perm/write", []string{"admin"}) // charmers no longer has permission. s.assertGetIsUnauthorized(c, "precise/wordpress-23/meta/perm", `unauthorized: access denied for user "charmers"`) }) // The permissions are only for bob now, so act as bob. s.doAsUser("bob", func() { // Check that the perms have changed for all revisions and series. for i, u := range []string{"precise/wordpress-23", "precise/wordpress-24", "trusty/wordpress-1"} { c.Logf("id %d: %q", i, u) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL(u + "/meta/perm"), ExpectBody: params.PermResponse{ Read: []string{"bob"}, Write: []string{"admin"}, }, }) } }) s.assertChannelACLs(c, "precise/wordpress-23", map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"bob"}, Write: []string{"admin"}, }, params.DevelopmentChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, params.StableChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, }) // Publish one of the revisions to development, then PUT to meta/perm // and check that the development ACLs have changed. err := s.store.Publish(newResolvedURL("~charmers/precise/wordpress-23", 23), params.DevelopmentChannel) c.Assert(err, gc.IsNil) s.doAsUser("bob", func() { // Check that we aren't allowed to put to the newly published entity as bob. s.assertPutIsUnauthorized(c, "~charmers/precise/wordpress/meta/perm/read?channel=development", []string{}, `unauthorized: access denied for user "bob"`) }) s.doAsUser("charmers", func() { s.discharge = dischargeForUser("charmers") s.assertPut(c, "precise/wordpress-23/meta/perm/read", []string{"bob", "charlie"}) s.assertGetIsUnauthorized(c, "~charmers/precise/wordpress/meta/perm/read?channel=development", `unauthorized: access denied for user "charmers"`) }) s.doAsUser("bob", func() { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL("precise/wordpress-23/meta/perm"), ExpectBody: params.PermResponse{ Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, }) // The other revisions should still see the old ACLs. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL("precise/wordpress-24/meta/perm"), ExpectBody: params.PermResponse{ Read: []string{"bob"}, Write: []string{"admin"}, }, }) }) s.assertChannelACLs(c, "precise/wordpress-23", map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"bob"}, Write: []string{"admin"}, }, params.DevelopmentChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, params.StableChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, }) // Publish wordpress-1 to stable and check that the stable ACLs // have changed. err = s.store.Publish(newResolvedURL("~charmers/trusty/wordpress-1", 1), params.StableChannel) c.Assert(err, gc.IsNil) // The stable permissions only allow charmers currently, so act as // charmers again. s.doAsUser("charmers", func() { s.assertPut(c, "trusty/wordpress-1/meta/perm/write", []string{"doris"}) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL("~charmers/trusty/wordpress-1/meta/perm"), ExpectBody: params.PermResponse{ Read: []string{"charmers"}, Write: []string{"doris"}, }, }) }) // The other revisions should still see the old ACLs. s.doAsUser("bob", func() { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL("precise/wordpress-24/meta/perm"), ExpectBody: params.PermResponse{ Read: []string{"bob"}, Write: []string{"admin"}, }, }) // The development-channel entity should still see the development ACLS. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL("precise/wordpress-23/meta/perm"), ExpectBody: params.PermResponse{ Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, }) }) s.assertChannelACLs(c, "precise/wordpress-23", map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"bob"}, Write: []string{"admin"}, }, params.DevelopmentChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, params.StableChannel: { Read: []string{"charmers"}, Write: []string{"doris"}, }, }) s.doAsUser("doris", func() { // Try restoring everyone's read permission on the charm. // Note: wordpress resolves to trusty/wordpress-1 here because // trusty is a later LTS series than precise. s.assertPut(c, "wordpress/meta/perm/read", []string{"bob", params.Everyone}) }) s.assertChannelACLs(c, "precise/wordpress-23", map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"bob"}, Write: []string{"admin"}, }, params.DevelopmentChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, params.StableChannel: { Read: []string{"bob", params.Everyone}, Write: []string{"doris"}, }, }) s.doAsUser("bob", func() { s.assertGet(c, "wordpress/meta/perm", params.PermResponse{ Read: []string{"bob", params.Everyone}, Write: []string{"doris"}, }) s.assertGet(c, "wordpress/meta/perm/read", []string{"bob", params.Everyone}) }) s.assertChannelACLs(c, "precise/wordpress-23", map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"bob"}, Write: []string{"admin"}, }, params.DevelopmentChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, params.StableChannel: { Read: []string{"bob", params.Everyone}, Write: []string{"doris"}, }, }) // Try deleting all permissions. s.doAsUser("doris", func() { s.assertPut(c, "wordpress/meta/perm/read", []string{}) s.assertPut(c, "wordpress/meta/perm/write", []string{}) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL("wordpress/meta/perm"), ExpectStatus: http.StatusUnauthorized, ExpectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "doris"`, }, }) }) // Now no-one except admin can do anything with trusty/wordpress-1. for _, user := range []string{"charmers", "bob", "charlie", "doris", "admin"} { s.doAsUser(user, func() { s.assertGetIsUnauthorized(c, "wordpress/meta/perm", fmt.Sprintf("unauthorized: access denied for user %q", user)) s.assertPutIsUnauthorized(c, "wordpress/meta/perm", []string{}, fmt.Sprintf("unauthorized: access denied for user %q", user)) }) } s.assertChannelACLs(c, "precise/wordpress-23", map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"bob"}, Write: []string{"admin"}, }, params.DevelopmentChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, params.StableChannel: { Read: []string{}, Write: []string{}, }, }) // Try setting all permissions in one request. We need to be admin here. s.assertPutAsAdmin(c, "wordpress/meta/perm", params.PermRequest{ Read: []string{"bob"}, Write: []string{"admin"}, }) s.assertChannelACLs(c, "precise/wordpress-23", map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"bob"}, Write: []string{"admin"}, }, params.DevelopmentChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, params.StableChannel: { Read: []string{"bob"}, Write: []string{"admin"}, }, }) // Try putting only read permissions. s.doAsUser("admin", func() { readRequest := struct { Read []string }{Read: []string{"joe"}} s.assertPut(c, "wordpress/meta/perm", readRequest) }) s.assertChannelACLs(c, "precise/wordpress-23", map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"bob"}, Write: []string{"admin"}, }, params.DevelopmentChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, params.StableChannel: { Read: []string{"joe"}, Write: []string{}, }, }) // Restore some write rights to the stable channel. s.assertPutAsAdmin(c, "trusty/wordpress-1/meta/perm/write", []string{"bob"}) // ~charmers/trusty/wordpress-1 has been published only to the // stable channel. If we specify a different channel in a perm PUT // request, we'll get an error because the channel isn't valid for // that entity. s.doAsUser("charmers", func() { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), Method: "PUT", JSONBody: params.PermRequest{ Read: []string{"foo"}, Write: []string{"bar"}, }, URL: storeURL("trusty/wordpress-1/meta/perm?channel=development"), ExpectStatus: http.StatusNotFound, ExpectBody: params.Error{ Code: params.ErrNotFound, Message: `cs:trusty/wordpress-1 not found in development channel`, }, }) }) // Similarly, we should be able to specify a channel on read // to read a different channel. s.doAsUser("bob", func() { s.assertGet(c, "trusty/wordpress/meta/perm?channel=unpublished", params.PermResponse{ Read: []string{"bob"}, Write: []string{"admin"}, }) s.assertGet(c, "wordpress/meta/perm?channel=development", params.PermResponse{ Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }) }) // We can't write to a channel that the charm's not in. s.doAsUser("charmers", func() { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), Method: "PUT", JSONBody: []string{"arble"}, URL: storeURL("trusty/wordpress-1/meta/perm/read?channel=development"), ExpectStatus: http.StatusNotFound, ExpectBody: params.Error{ Code: params.ErrNotFound, Message: `cs:trusty/wordpress-1 not found in development channel`, }, }) }) s.assertChannelACLs(c, "precise/wordpress-23", map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"bob"}, Write: []string{"admin"}, }, params.DevelopmentChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, params.StableChannel: { Read: []string{"joe"}, Write: []string{"bob"}, }, }) s.doAsUser("bob", func() { s.assertGet(c, "trusty/wordpress/meta/perm/read?channel=unpublished", []string{"bob"}) }) } // assertChannelACLs asserts that the ChannelACLs field of the base entity with the // given URL are as given. func (s *APISuite) assertChannelACLs(c *gc.C, url string, acls map[params.Channel]mongodoc.ACL) { e, err := s.store.FindBaseEntity(charm.MustParseURL(url), nil) c.Assert(err, gc.IsNil) c.Assert(e.ChannelACLs, jc.DeepEquals, acls) } func (s *APISuite) TestMetaPermPutUnauthorized(c *gc.C) { id := "precise/wordpress-23" s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("~charmers/"+id, 23)) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.noMacaroonSrv, URL: storeURL("~charmers/" + id + "/meta/perm/read"), Method: "PUT", Header: http.Header{ "Content-Type": {"application/json"}, }, Body: strings.NewReader(`["some-user"]`), ExpectStatus: http.StatusUnauthorized, ExpectBody: params.Error{ Code: params.ErrUnauthorized, Message: "authentication failed: missing HTTP auth header", }, }) } func (s *APISuite) TestExtraInfo(c *gc.C) { id := "precise/wordpress-23" s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("~charmers/"+id, 23)) s.checkInfo(c, "extra-info", id) s.checkInfo(c, "common-info", id) } func (s *APISuite) checkInfo(c *gc.C, path string, id string) { // Add one value and check that it's there. s.assertPutAsAdmin(c, id+"/meta/"+path+"/foo", "fooval") s.assertGet(c, id+"/meta/"+path+"/foo", "fooval") s.assertGet(c, id+"/meta/"+path, map[string]string{ "foo": "fooval", }) // Add another value and check that both values are there. s.assertPutAsAdmin(c, id+"/meta/"+path+"/bar", "barval") s.assertGet(c, id+"/meta/"+path+"/bar", "barval") s.assertGet(c, id+"/meta/"+path, map[string]string{ "foo": "fooval", "bar": "barval", }) // Overwrite a value and check that it's changed. s.assertPutAsAdmin(c, id+"/meta/"+path+"/foo", "fooval2") s.assertGet(c, id+"/meta/"+path+"/foo", "fooval2") s.assertGet(c, id+"/meta/"+path+"", map[string]string{ "foo": "fooval2", "bar": "barval", }) // Write several values at once. s.assertPutAsAdmin(c, id+"/meta/any", params.MetaAnyResponse{ Meta: map[string]interface{}{ path: map[string]string{ "foo": "fooval3", "baz": "bazval", }, path + "/frob": []int{1, 4, 6}, }, }) s.assertGet(c, id+"/meta/"+path, map[string]interface{}{ "foo": "fooval3", "baz": "bazval", "bar": "barval", "frob": []int{1, 4, 6}, }) // Delete a single value. s.assertPutAsAdmin(c, id+"/meta/"+path+"/foo", nil) s.assertGet(c, id+"/meta/"+path, map[string]interface{}{ "baz": "bazval", "bar": "barval", "frob": []int{1, 4, 6}, }) // Delete a value and add some values at the same time. s.assertPutAsAdmin(c, id+"/meta/any", params.MetaAnyResponse{ Meta: map[string]interface{}{ path: map[string]interface{}{ "baz": nil, "bar": nil, "dazzle": "x", "fizzle": "y", }, }, }) s.assertGet(c, id+"/meta/"+path, map[string]interface{}{ "frob": []int{1, 4, 6}, "dazzle": "x", "fizzle": "y", }) } var extraInfoBadPutRequestsTests = []struct { about string key string body interface{} contentType string expectStatus int expectBody params.Error }{{ about: "key with extra element", key: "foo/bar", body: "hello", expectStatus: http.StatusBadRequest, expectBody: params.Error{ Code: params.ErrBadRequest, Message: "bad key for $1", }, }, { about: "key with a dot", key: "foo.bar", body: "hello", expectStatus: http.StatusBadRequest, expectBody: params.Error{ Code: params.ErrBadRequest, Message: "bad key for $1", }, }, { about: "key with a dollar", key: "foo$bar", body: "hello", expectStatus: http.StatusBadRequest, expectBody: params.Error{ Code: params.ErrBadRequest, Message: "bad key for $1", }, }, { about: "multi key with extra element", key: "", body: map[string]string{ "foo/bar": "value", }, expectStatus: http.StatusBadRequest, expectBody: params.Error{ Code: params.ErrBadRequest, Message: "bad key for $1", }, }, { about: "multi key with dot", key: "", body: map[string]string{ ".bar": "value", }, expectStatus: http.StatusBadRequest, expectBody: params.Error{ Code: params.ErrBadRequest, Message: "bad key for $1", }, }, { about: "multi key with dollar", key: "", body: map[string]string{ "$bar": "value", }, expectStatus: http.StatusBadRequest, expectBody: params.Error{ Code: params.ErrBadRequest, Message: "bad key for $1", }, }, { about: "multi key with bad map", key: "", body: "bad", expectStatus: http.StatusInternalServerError, expectBody: params.Error{ Message: `cannot unmarshal $1 body: json: cannot unmarshal string into Go value of type map[string]*json.RawMessage`, }, }} func (s *APISuite) TestExtraInfoBadPutRequests(c *gc.C) { s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) path := "precise/wordpress-23/meta/" for i, test := range extraInfoBadPutRequestsTests { c.Logf("test %d: %s", i, test.about) contentType := test.contentType if contentType == "" { contentType = "application/json" } extraBodyMessage := strings.Replace(test.expectBody.Message, "$1", "extra-info", -1) commonBodyMessage := strings.Replace(test.expectBody.Message, "$1", "common-info", -1) test.expectBody.Message = extraBodyMessage httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(path + "extra-info/" + test.key), Method: "PUT", Header: http.Header{ "Content-Type": {contentType}, }, Username: testUsername, Password: testPassword, Body: strings.NewReader(mustMarshalJSON(test.body)), ExpectStatus: test.expectStatus, ExpectBody: test.expectBody, }) test.expectBody.Message = commonBodyMessage httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(path + "common-info/" + test.key), Method: "PUT", Header: http.Header{ "Content-Type": {contentType}, }, Username: testUsername, Password: testPassword, Body: strings.NewReader(mustMarshalJSON(test.body)), ExpectStatus: test.expectStatus, ExpectBody: test.expectBody, }) } } func (s *APISuite) TestExtraInfoPutUnauthorized(c *gc.C) { s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("precise/wordpress-23/meta/extra-info"), Method: "PUT", Header: http.Header{ "Content-Type": {"application/json"}, }, Body: strings.NewReader(mustMarshalJSON(map[string]string{ "bar": "value", })), ExpectStatus: http.StatusProxyAuthRequired, ExpectBody: dischargeRequiredBody, }) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("precise/wordpress-23/meta/extra-info"), Method: "PUT", Header: http.Header{ "Content-Type": {"application/json"}, "Bakery-Protocol-Version": {"1"}, }, Body: strings.NewReader(mustMarshalJSON(map[string]string{ "bar": "value", })), ExpectStatus: http.StatusUnauthorized, ExpectHeader: http.Header{ "WWW-Authenticate": {"Macaroon"}, }, ExpectBody: dischargeRequiredBody, }) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("precise/wordpress-23/meta/common-info"), Method: "PUT", Header: http.Header{ "Content-Type": {"application/json"}, }, Body: strings.NewReader(mustMarshalJSON(map[string]string{ "bar": "value", })), ExpectStatus: http.StatusProxyAuthRequired, ExpectBody: dischargeRequiredBody, }) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("precise/wordpress-23/meta/common-info"), Method: "PUT", Header: http.Header{ "Content-Type": {"application/json"}, "Bakery-Protocol-Version": {"1"}, }, Body: strings.NewReader(mustMarshalJSON(map[string]string{ "bar": "value", })), ExpectStatus: http.StatusUnauthorized, ExpectHeader: http.Header{ "WWW-Authenticate": {"Macaroon"}, }, ExpectBody: dischargeRequiredBody, }) } func (s *APISuite) TestCommonInfo(c *gc.C) { s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("~charmers/precise/wordpress-23", 23)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("~charmers/precise/wordpress-24", 24)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("~charmers/trusty/wordpress-1", 1)) s.assertPutAsAdmin(c, "wordpress/meta/common-info/key", "something") s.assertGet(c, "wordpress/meta/common-info", map[string]string{ "key": "something", }) for i, u := range []string{"precise/wordpress-23", "precise/wordpress-24", "trusty/wordpress-1"} { c.Logf("id %d: %q", i, u) s.assertGet(c, u+"/meta/common-info", map[string]string{ "key": "something", }) e, err := s.store.FindBaseEntity(charm.MustParseURL(u), nil) c.Assert(err, gc.IsNil) c.Assert(e.CommonInfo, gc.DeepEquals, map[string][]byte{ "key": []byte("\"something\""), }) } } func isNull(v interface{}) bool { data, err := json.Marshal(v) if err != nil { panic(err) } return string(data) == "null" } func (s *APISuite) TestMetaEndpointsAny(c *gc.C) { rurls := s.addTestEntities(c) // We check the meta endpoint for both promulgated and non-promulgated // versions of each URL. urls := make([]*router.ResolvedURL, 0, len(rurls)*2) for _, rurl := range rurls { urls = append(urls, rurl) if rurl.PromulgatedRevision != -1 { rurl1 := *rurl rurl1.PromulgatedRevision = -1 urls = append(urls, &rurl1) } } for _, url := range urls { charmId := strings.TrimPrefix(url.String(), "cs:") var flags []string expectData := params.MetaAnyResponse{ Id: url.PreferredURL(), Meta: make(map[string]interface{}), } for _, ep := range metaEndpoints { flags = append(flags, "include="+ep.name) isBundle := url.URL.Series == "bundle" if ep.exclusive != 0 && isBundle != (ep.exclusive == bundleOnly) { // endpoint not relevant. continue } val, err := ep.get(s.store, url) c.Assert(err, gc.IsNil) if val != nil { expectData.Meta[ep.name] = val } } s.assertGet(c, charmId+"/meta/any?"+strings.Join(flags, "&"), expectData) } } func (s *APISuite) TestMetaAnyWithNoIncludesAndNoEntity(c *gc.C) { wordpressURL, _ := s.addPublicCharmFromRepo( c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23), ) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("precise/wordpress-1/meta/any"), ExpectStatus: http.StatusNotFound, ExpectBody: params.Error{ Code: params.ErrNotFound, Message: `no matching charm or bundle for cs:precise/wordpress-1`, }, }) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("meta/any?id=precise/wordpress-23&id=precise/wordpress-1"), ExpectStatus: http.StatusOK, ExpectBody: map[string]interface{}{ "precise/wordpress-23": params.MetaAnyResponse{ Id: wordpressURL.PreferredURL(), }, }, }) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("precise/wordpress-23/meta/any"), ExpectStatus: http.StatusOK, ExpectBody: params.MetaAnyResponse{ Id: wordpressURL.PreferredURL(), }, }) } // In this test we rely on the charm.v2 testing repo package and // dummy charm that has actions included. func (s *APISuite) TestMetaCharmActions(c *gc.C) { url, dummy := s.addPublicCharmFromRepo(c, "dummy", newResolvedURL("cs:~charmers/precise/dummy-10", 10)) s.assertGet(c, "precise/dummy-10/meta/charm-actions", dummy.Actions()) s.assertGet(c, "precise/dummy-10/meta/any?include=charm-actions", params.MetaAnyResponse{ Id: url.PreferredURL(), Meta: map[string]interface{}{ "charm-actions": dummy.Actions(), }, }, ) } // V4 SPECIFIC func (s *APISuite) TestMetaCharmMetadataElidesSeriesFromMultiSeriesCharm(c *gc.C) { _, ch := s.addPublicCharmFromRepo(c, "multi-series", newResolvedURL("cs:~charmers/multi-series-10", 10)) expectMeta := *ch.Meta() c.Assert(expectMeta.Series, gc.Not(gc.HasLen), 0) expectMeta.Series = nil s.assertGet(c, "multi-series/meta/charm-metadata", &expectMeta) } func (s *APISuite) TestBulkMeta(c *gc.C) { // We choose an arbitrary set of ids and metadata here, just to smoke-test // whether the meta/any logic is hooked up correctly. // Detailed tests for this feature are in the router package. _, wordpress := s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) _, mysql := s.addPublicCharmFromRepo(c, "mysql", newResolvedURL("cs:~charmers/precise/mysql-10", 10)) s.assertGet(c, "meta/charm-metadata?id=precise/wordpress-23&id=precise/mysql-10", map[string]*charm.Meta{ "precise/wordpress-23": wordpress.Meta(), "precise/mysql-10": mysql.Meta(), }, ) } func (s *APISuite) TestBulkMetaAny(c *gc.C) { // We choose an arbitrary set of metadata here, just to smoke-test // whether the meta/any logic is hooked up correctly. // Detailed tests for this feature are in the router package. wordpressURL, wordpress := s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) mysqlURL, mysql := s.addPublicCharmFromRepo(c, "mysql", newResolvedURL("cs:~charmers/precise/mysql-10", 10)) s.assertGet(c, "meta/any?include=charm-metadata&include=charm-config&id=precise/wordpress-23&id=precise/mysql-10", map[string]params.MetaAnyResponse{ "precise/wordpress-23": { Id: wordpressURL.PreferredURL(), Meta: map[string]interface{}{ "charm-config": wordpress.Config(), "charm-metadata": wordpress.Meta(), }, }, "precise/mysql-10": { Id: mysqlURL.PreferredURL(), Meta: map[string]interface{}{ "charm-config": mysql.Config(), "charm-metadata": mysql.Meta(), }, }, }, ) } var metaCharmTagsTests = []struct { about string tags []string categories []string expectTags []string }{{ about: "tags only", tags: []string{"foo", "bar"}, expectTags: []string{"foo", "bar"}, }, { about: "categories only", categories: []string{"foo", "bar"}, expectTags: []string{"foo", "bar"}, }, { about: "tags and categories", categories: []string{"foo", "bar"}, tags: []string{"tag1", "tag2"}, expectTags: []string{"tag1", "tag2"}, }, { about: "no tags or categories", }} func (s *APISuite) TestMetaCharmTags(c *gc.C) { url := newResolvedURL("~charmers/precise/wordpress-0", -1) for i, test := range metaCharmTagsTests { c.Logf("%d: %s", i, test.about) url.URL.Revision = i s.addPublicCharm(c, storetesting.NewCharm(&charm.Meta{ Tags: test.tags, Categories: test.categories, }), url) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(url.URL.Path() + "/meta/tags"), ExpectStatus: http.StatusOK, ExpectBody: params.TagsResponse{test.expectTags}, }) } } func (s *APISuite) TestPromulgatedMetaCharmTags(c *gc.C) { url := newResolvedURL("~charmers/precise/wordpress-0", 0) for i, test := range metaCharmTagsTests { c.Logf("%d: %s", i, test.about) url.URL.Revision = i url.PromulgatedRevision = i s.addPublicCharm(c, storetesting.NewCharm(&charm.Meta{ Tags: test.tags, Categories: test.categories, }), url) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(url.URL.Path() + "/meta/tags"), ExpectStatus: http.StatusOK, ExpectBody: params.TagsResponse{test.expectTags}, }) } } func (s *APISuite) TestBundleTags(c *gc.C) { url := newResolvedURL("~charmers/bundle/wordpress-simple-2", -1) s.addPublicBundle(c, storetesting.NewBundle(&charm.BundleData{ Tags: []string{"foo", "bar"}, Services: map[string]*charm.ServiceSpec{ "wordpress": { Charm: "wordpress", }, }, }), url, true) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(url.URL.Path() + "/meta/tags"), ExpectStatus: http.StatusOK, ExpectBody: params.TagsResponse{[]string{"foo", "bar"}}, }) } func (s *APISuite) TestPromulgatedBundleTags(c *gc.C) { url := newResolvedURL("~charmers/bundle/wordpress-simple-2", 2) s.addPublicBundle(c, storetesting.NewBundle(&charm.BundleData{ Tags: []string{"foo", "bar"}, Services: map[string]*charm.ServiceSpec{ "wordpress": { Charm: "wordpress", }, }, }), url, true) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(url.URL.Path() + "/meta/tags"), ExpectStatus: http.StatusOK, ExpectBody: params.TagsResponse{[]string{"foo", "bar"}}, }) } type testMetaCharm struct { meta *charm.Meta charm.Charm } func (c *testMetaCharm) Meta() *charm.Meta { return c.meta } func (s *APISuite) TestIdsAreResolved(c *gc.C) { // This is just testing that ResolveURL is actually // passed to the router. Given how Router is // defined, and the ResolveURL tests, this should // be sufficient to "join the dots". _, wordpress := s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) s.assertGet(c, "wordpress/meta/charm-metadata", wordpress.Meta()) } func (s *APISuite) TestMetaCharmNotFound(c *gc.C) { for i, ep := range metaEndpoints { c.Logf("test %d: %s", i, ep.name) expected := params.Error{ Message: `no matching charm or bundle for cs:precise/wordpress-23`, Code: params.ErrNotFound, } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("precise/wordpress-23/meta/" + ep.name), ExpectStatus: http.StatusNotFound, ExpectBody: expected, }) expected.Message = `no matching charm or bundle for cs:wordpress` httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("wordpress/meta/" + ep.name), ExpectStatus: http.StatusNotFound, ExpectBody: expected, }) } } var resolveURLTests = []struct { url string expect *router.ResolvedURL notFound bool }{{ url: "wordpress", expect: newResolvedURL("cs:~charmers/trusty/wordpress-25", 25), }, { url: "precise/wordpress", expect: newResolvedURL("cs:~charmers/precise/wordpress-24", 24), }, { url: "utopic/bigdata", expect: newResolvedURL("cs:~charmers/utopic/bigdata-10", 10), }, { url: "~charmers/precise/wordpress", expect: newResolvedURL("cs:~charmers/precise/wordpress-24", -1), }, { url: "~charmers/precise/wordpress-99", notFound: true, }, { url: "~charmers/wordpress", expect: newResolvedURL("cs:~charmers/trusty/wordpress-25", -1), }, { url: "~charmers/wordpress-24", notFound: true, }, { url: "~bob/wordpress", expect: newResolvedURL("cs:~bob/trusty/wordpress-1", -1), }, { url: "~bob/precise/wordpress", expect: newResolvedURL("cs:~bob/precise/wordpress-2", -1), }, { url: "bigdata", expect: newResolvedURL("cs:~charmers/utopic/bigdata-10", 10), }, { url: "wordpress-24", notFound: true, }, { url: "bundlelovin", expect: newResolvedURL("cs:~charmers/bundle/bundlelovin-10", 10), }, { url: "wordpress-26", notFound: true, }, { url: "foo", notFound: true, }, { url: "trusty/bigdata", notFound: true, }, { url: "~bob/wily/django-47", notFound: true, }, { url: "~bob/django", notFound: true, }, { url: "wily/django", notFound: true, }, { url: "django", notFound: true, }, { url: "~bob/trusty/haproxy-0", notFound: true, }, { url: "~bob/haproxy", notFound: true, }, { url: "trusty/haproxy", notFound: true, }, { url: "haproxy", notFound: true, }, { // V4 SPECIFIC url: "~bob/multi-series", expect: newResolvedURLWithPreferredSeries("cs:~bob/multi-series-0", -1, "trusty"), }, { // V4 SPECIFIC url: "~bob/utopic/multi-series", expect: newResolvedURLWithPreferredSeries("cs:~bob/multi-series-0", -1, "utopic"), }} func (s *APISuite) TestResolveURL(c *gc.C) { s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-24", 24)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-24", 24)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-25", 25)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/utopic/wordpress-10", 10)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/saucy/bigdata-99", 99)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/utopic/bigdata-10", 10)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~bob/trusty/wordpress-1", -1)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~bob/precise/wordpress-2", -1)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~bob/precise/other-2", -1)) s.addPublicBundleFromRepo(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/bundlelovin-10", 10), true) s.addPublicBundleFromRepo(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/wordpress-simple-10", 10), true) s.addPublicCharmFromRepo(c, "multi-series", newResolvedURL("cs:~bob/multi-series-0", -1)) for i, test := range resolveURLTests { c.Logf("test %d: %s", i, test.url) url := charm.MustParseURL(test.url) rurl, err := v4.ResolveURL(entitycache.New(&v5.StoreWithChannel{ Store: s.store, Channel: params.UnpublishedChannel, }), url) if test.notFound { c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) c.Assert(err, gc.ErrorMatches, `no matching charm or bundle for .*`) c.Assert(rurl, gc.IsNil) continue } c.Assert(err, gc.IsNil) c.Assert(rurl, jc.DeepEquals, test.expect) } } var serveExpandIdTests = []struct { about string url string expect []params.ExpandedId err string }{{ about: "fully qualified URL", url: "~charmers/trusty/wordpress-47", expect: []params.ExpandedId{ // V4 SPECIFIC {Id: "cs:~charmers/utopic/wordpress-42"}, {Id: "cs:~charmers/trusty/wordpress-47"}, {Id: "cs:~charmers/trusty/wordpress-5"}, {Id: "cs:~charmers/utopic/wordpress-5"}, {Id: "cs:~charmers/vivid/wordpress-5"}, {Id: "cs:~charmers/wily/wordpress-5"}, }, }, { about: "promulgated URL", url: "trusty/wordpress-47", expect: []params.ExpandedId{ // V4 SPECIFIC {Id: "cs:utopic/wordpress-42"}, {Id: "cs:trusty/wordpress-47"}, {Id: "cs:trusty/wordpress-49"}, {Id: "cs:utopic/wordpress-49"}, {Id: "cs:vivid/wordpress-49"}, {Id: "cs:wily/wordpress-49"}, }, }, { about: "non-promulgated charm", url: "~bob/precise/builder", expect: []params.ExpandedId{ {Id: "cs:~bob/precise/builder-5"}, }, }, { about: "partial URL", url: "haproxy", expect: []params.ExpandedId{ {Id: "cs:trusty/haproxy-1"}, {Id: "cs:precise/haproxy-1"}, }, }, { about: "revision with series matches bundles (and multi-series charms) only", url: "mongo-0", expect: []params.ExpandedId{ {Id: "cs:bundle/mongo-0"}, }, }, { about: "single result", url: "bundle/mongo-0", expect: []params.ExpandedId{ {Id: "cs:bundle/mongo-0"}, }, }, { about: "fully qualified URL with no entities found", url: "~charmers/precise/no-such-42", err: `no matching charm or bundle for cs:~charmers/precise/no-such-42`, }, { about: "partial URL with no entities found", url: "no-such", err: `no matching charm or bundle for cs:no-such`, }} func (s *APISuite) TestServeExpandId(c *gc.C) { // Add a bunch of entities in the database. // Note that expand-id only cares about entity identifiers, // so it is ok to reuse the same charm for all the entities. s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/utopic/wordpress-42", 42)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-47", 47)) err := s.store.AddCharmWithArchive(newResolvedURL("cs:~charmers/trusty/wordpress-48", 48), storetesting.NewCharm(nil)) c.Assert(err, gc.IsNil) err = s.store.Publish(newResolvedURL("cs:~charmers/trusty/wordpress-48", 48), params.DevelopmentChannel) c.Assert(err, gc.IsNil) s.addPublicCharmFromRepo(c, "multi-series", newResolvedURL("cs:~charmers/wordpress-5", 49)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/precise/haproxy-1", 1)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/haproxy-1", 1)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~bob/precise/builder-5", -1)) s.addPublicBundleFromRepo(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/mongo-0", 0), true) s.addPublicBundleFromRepo(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/wordpress-simple-0", 0), true) for i, test := range serveExpandIdTests { c.Logf("test %d: %s", i, test.about) storeURL := storeURL(test.url + "/expand-id") var expectStatus int var expectBody interface{} if test.err == "" { expectStatus = http.StatusOK expectBody = test.expect } else { expectStatus = http.StatusNotFound expectBody = params.Error{ Code: params.ErrNotFound, Message: test.err, } } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL, ExpectStatus: expectStatus, ExpectBody: expectBody, }) } } var serveMetaRevisionInfoTests = []struct { about string url string expect params.RevisionInfoResponse err string }{{ about: "fully qualified url", url: "trusty/wordpress-42", expect: params.RevisionInfoResponse{ []*charm.URL{ charm.MustParseURL("cs:trusty/wordpress-43"), charm.MustParseURL("cs:trusty/wordpress-42"), charm.MustParseURL("cs:trusty/wordpress-41"), charm.MustParseURL("cs:trusty/wordpress-9"), }, }, }, { about: "partial url uses a default series", url: "wordpress", expect: params.RevisionInfoResponse{ []*charm.URL{ charm.MustParseURL("cs:trusty/wordpress-43"), charm.MustParseURL("cs:trusty/wordpress-42"), charm.MustParseURL("cs:trusty/wordpress-41"), charm.MustParseURL("cs:trusty/wordpress-9"), }, }, }, { about: "non-promulgated URL gives non-promulgated revisions (~charmers)", url: "~charmers/trusty/cinder", expect: params.RevisionInfoResponse{ []*charm.URL{ charm.MustParseURL("cs:~charmers/trusty/cinder-6"), charm.MustParseURL("cs:~charmers/trusty/cinder-5"), charm.MustParseURL("cs:~charmers/trusty/cinder-4"), charm.MustParseURL("cs:~charmers/trusty/cinder-3"), charm.MustParseURL("cs:~charmers/trusty/cinder-2"), charm.MustParseURL("cs:~charmers/trusty/cinder-1"), charm.MustParseURL("cs:~charmers/trusty/cinder-0"), }, }, }, { about: "non-promulgated URL gives non-promulgated revisions (~openstack-charmers)", url: "~openstack-charmers/trusty/cinder", expect: params.RevisionInfoResponse{ []*charm.URL{ charm.MustParseURL("cs:~openstack-charmers/trusty/cinder-1"), charm.MustParseURL("cs:~openstack-charmers/trusty/cinder-0"), }, }, }, { about: "promulgated URL gives promulgated revisions", url: "trusty/cinder", expect: params.RevisionInfoResponse{ []*charm.URL{ charm.MustParseURL("cs:trusty/cinder-5"), charm.MustParseURL("cs:trusty/cinder-4"), charm.MustParseURL("cs:trusty/cinder-3"), charm.MustParseURL("cs:trusty/cinder-2"), charm.MustParseURL("cs:trusty/cinder-1"), charm.MustParseURL("cs:trusty/cinder-0"), }, }, }, { about: "multi-series charm expands to all revisions of that charm", url: "multi-series", expect: params.RevisionInfoResponse{ // V4 SPECIFIC []*charm.URL{ charm.MustParseURL("cs:trusty/multi-series-41"), charm.MustParseURL("cs:trusty/multi-series-40"), }, }, }, { about: "multi-series charm with series specified", url: "trusty/multi-series", expect: params.RevisionInfoResponse{ // V4 SPECIFIC []*charm.URL{ charm.MustParseURL("cs:trusty/multi-series-41"), charm.MustParseURL("cs:trusty/multi-series-40"), }, }, }, { about: "multi-series charm with non-promulgated URL", url: "~charmers/multi-series", expect: params.RevisionInfoResponse{ // V4 SPECIFIC []*charm.URL{ charm.MustParseURL("cs:~charmers/trusty/multi-series-2"), charm.MustParseURL("cs:~charmers/trusty/multi-series-1"), }, }, }, { about: "multi-series charm with non-promulgated URL and series specified", url: "~charmers/utopic/multi-series", expect: params.RevisionInfoResponse{ // V4 SPECIFIC []*charm.URL{ charm.MustParseURL("cs:~charmers/utopic/multi-series-2"), charm.MustParseURL("cs:~charmers/utopic/multi-series-1"), }, }, }, { about: "mixed multi/single series charm, latest rev", url: "mixed", expect: params.RevisionInfoResponse{ // V4 SPECIFIC []*charm.URL{ charm.MustParseURL("cs:trusty/mixed-43"), charm.MustParseURL("cs:trusty/mixed-42"), charm.MustParseURL("cs:trusty/mixed-41"), charm.MustParseURL("cs:trusty/mixed-40"), }, }, }, { about: "mixed multi/single series charm with series", url: "trusty/mixed-40", expect: params.RevisionInfoResponse{ // V4 SPECIFIC []*charm.URL{ charm.MustParseURL("cs:trusty/mixed-43"), charm.MustParseURL("cs:trusty/mixed-42"), charm.MustParseURL("cs:trusty/mixed-41"), charm.MustParseURL("cs:trusty/mixed-40"), }, }, }, { about: "no entities found", url: "precise/no-such-33", err: `no matching charm or bundle for cs:precise/no-such-33`, }} func (s *APISuite) TestServeMetaRevisionInfo(c *gc.C) { s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/mysql-41", 41)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/mysql-42", 42)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-9", 9)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-41", 41)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-42", 42)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-43", 43)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-42", 42)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-0", -1)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-1", -1)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-2", 0)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-3", 1)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~openstack-charmers/trusty/cinder-0", 2)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~openstack-charmers/trusty/cinder-1", 3)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-4", -1)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-5", 4)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-6", 5)) s.addPublicCharmFromRepo(c, "multi-series", newResolvedURL("cs:~charmers/multi-series-1", 40)) s.addPublicCharmFromRepo(c, "multi-series", newResolvedURL("cs:~charmers/multi-series-2", 41)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/mixed-1", 40)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/mixed-2", 41)) s.addPublicCharmFromRepo(c, "multi-series", newResolvedURL("cs:~charmers/mixed-3", 42)) s.addPublicCharmFromRepo(c, "multi-series", newResolvedURL("cs:~charmers/mixed-4", 43)) for i, test := range serveMetaRevisionInfoTests { c.Logf("test %d: %s", i, test.about) storeURL := storeURL(test.url + "/meta/revision-info") var expectStatus int var expectBody interface{} if test.err == "" { expectStatus = http.StatusOK expectBody = test.expect } else { expectStatus = http.StatusNotFound expectBody = params.Error{ Code: params.ErrNotFound, Message: test.err, } } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL, ExpectStatus: expectStatus, ExpectBody: expectBody, }) } } var metaStatsTests = []struct { // about describes the test. about string // url is the entity id to use when making the meta/stats request. url string // downloads maps entity ids to a numeric key/value pair where the key is // the number of days in the past when the entity was downloaded and the // value is the number of downloads performed that day. downloads map[string]map[int]int // expectResponse is the expected response from the meta/stats endpoint. expectResponse params.StatsResponse }{{ about: "no downloads", url: "trusty/mysql-0", downloads: map[string]map[int]int{"trusty/mysql-0": {}}, }, { about: "single download", url: "utopic/django-42", downloads: map[string]map[int]int{ "utopic/django-42": {0: 1}, }, expectResponse: params.StatsResponse{ ArchiveDownloadCount: 1, ArchiveDownload: params.StatsCount{ Total: 1, Day: 1, Week: 1, Month: 1, }, ArchiveDownloadAllRevisions: params.StatsCount{ Total: 1, Day: 1, Week: 1, Month: 1, }, }, }, { about: "single download a long time ago", url: "utopic/django-42", downloads: map[string]map[int]int{ "utopic/django-42": {100: 1}, }, expectResponse: params.StatsResponse{ ArchiveDownloadCount: 1, ArchiveDownload: params.StatsCount{ Total: 1, }, ArchiveDownloadAllRevisions: params.StatsCount{ Total: 1, }, }, }, { about: "some downloads this month", url: "utopic/wordpress-47", downloads: map[string]map[int]int{ "utopic/wordpress-47": {20: 2, 25: 5}, }, expectResponse: params.StatsResponse{ ArchiveDownloadCount: 2 + 5, ArchiveDownload: params.StatsCount{ Total: 2 + 5, Month: 2 + 5, }, ArchiveDownloadAllRevisions: params.StatsCount{ Total: 2 + 5, Month: 2 + 5, }, }, }, { about: "multiple recent downloads", url: "utopic/django-42", downloads: map[string]map[int]int{ "utopic/django-42": {100: 1, 12: 3, 8: 5, 4: 10, 2: 1, 0: 3}, }, expectResponse: params.StatsResponse{ ArchiveDownloadCount: 1 + 3 + 5 + 10 + 1 + 3, ArchiveDownload: params.StatsCount{ Total: 1 + 3 + 5 + 10 + 1 + 3, Day: 3, Week: 10 + 1 + 3, Month: 3 + 5 + 10 + 1 + 3, }, ArchiveDownloadAllRevisions: params.StatsCount{ Total: 1 + 3 + 5 + 10 + 1 + 3, Day: 3, Week: 10 + 1 + 3, Month: 3 + 5 + 10 + 1 + 3, }, }, }, { about: "sparse downloads", url: "utopic/django-42", downloads: map[string]map[int]int{ "utopic/django-42": {200: 3, 27: 4, 3: 5}, }, expectResponse: params.StatsResponse{ ArchiveDownloadCount: 3 + 4 + 5, ArchiveDownload: params.StatsCount{ Total: 3 + 4 + 5, Week: 5, Month: 4 + 5, }, ArchiveDownloadAllRevisions: params.StatsCount{ Total: 3 + 4 + 5, Week: 5, Month: 4 + 5, }, }, }, { about: "bundle downloads", url: "bundle/django-simple-2", downloads: map[string]map[int]int{ "bundle/django-simple-2": {200: 3, 27: 4, 3: 5}, }, expectResponse: params.StatsResponse{ ArchiveDownloadCount: 3 + 4 + 5, ArchiveDownload: params.StatsCount{ Total: 3 + 4 + 5, Week: 5, Month: 4 + 5, }, ArchiveDownloadAllRevisions: params.StatsCount{ Total: 3 + 4 + 5, Week: 5, Month: 4 + 5, }, }, }, { about: "different charms", url: "trusty/rails-47", downloads: map[string]map[int]int{ "utopic/rails-47": {200: 3, 27: 4, 3: 5}, "trusty/rails-47": {20: 2, 6: 10}, "trusty/mysql-0": {200: 1, 14: 2, 1: 7}, }, expectResponse: params.StatsResponse{ ArchiveDownloadCount: 2 + 10, ArchiveDownload: params.StatsCount{ Total: 2 + 10, Week: 10, Month: 2 + 10, }, ArchiveDownloadAllRevisions: params.StatsCount{ Total: 2 + 10, Week: 10, Month: 2 + 10, }, }, }, { about: "different revisions of the same charm", url: "precise/rails-1", downloads: map[string]map[int]int{ "precise/rails-0": {300: 1, 200: 2}, "precise/rails-1": {100: 5, 10: 3, 2: 7}, "precise/rails-2": {6: 10, 0: 9}, }, expectResponse: params.StatsResponse{ ArchiveDownloadCount: 5 + 3 + 7, ArchiveDownload: params.StatsCount{ Total: 5 + 3 + 7, Week: 7, Month: 3 + 7, }, ArchiveDownloadAllRevisions: params.StatsCount{ Total: (1 + 2) + (5 + 3 + 7) + (10 + 9), Day: 0 + 0 + 9, Week: 0 + 7 + (10 + 9), Month: 0 + (3 + 7) + (10 + 9), }, }, }, { about: "downloads only in an old revision", url: "trusty/wordpress-2", downloads: map[string]map[int]int{ "precise/wordpress-2": {2: 2, 0: 1}, "trusty/wordpress-0": {100: 10}, "trusty/wordpress-2": {}, }, expectResponse: params.StatsResponse{ ArchiveDownloadAllRevisions: params.StatsCount{ Total: 10, }, }, }, { about: "downloads only in newer revision", url: "utopic/wordpress-0", downloads: map[string]map[int]int{ "utopic/wordpress-0": {}, "utopic/wordpress-1": {31: 7, 10: 1, 3: 2, 0: 1}, "utopic/wordpress-2": {6: 9, 0: 2}, }, expectResponse: params.StatsResponse{ ArchiveDownloadAllRevisions: params.StatsCount{ Total: (7 + 1 + 2 + 1) + (9 + 2), Day: 1 + 2, Week: (2 + 1) + (9 + 2), Month: (1 + 2 + 1) + (9 + 2), }, }, }, { about: "non promulgated charms", url: "~who/utopic/django-0", downloads: map[string]map[int]int{ "utopic/django-0": {100: 1, 10: 2, 1: 3, 0: 4}, "~who/utopic/django-0": {2: 5}, }, expectResponse: params.StatsResponse{ ArchiveDownloadCount: 5, ArchiveDownload: params.StatsCount{ Total: 5, Week: 5, Month: 5, }, ArchiveDownloadAllRevisions: params.StatsCount{ Total: 5, Week: 5, Month: 5, }, }, }} func (s *APISuite) TestMetaStats(c *gc.C) { if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } // TODO (frankban): remove this call when removing the legacy counts logic. patchLegacyDownloadCountsEnabled(s.AddCleanup, false) today := time.Now() for i, test := range metaStatsTests { c.Logf("test %d: %s", i, test.about) for id, downloadsPerDay := range test.downloads { url := &router.ResolvedURL{ URL: *charm.MustParseURL(id), PromulgatedRevision: -1, } if url.URL.User == "" { url.URL.User = "charmers" url.PromulgatedRevision = url.URL.Revision } // Add the required entities to the database. if url.URL.Series == "bundle" { s.addPublicBundleFromRepo(c, "wordpress-simple", url, true) } else { s.addPublicCharmFromRepo(c, "wordpress", url) } // Simulate the entity was downloaded at the specified dates. for daysAgo, downloads := range downloadsPerDay { date := today.AddDate(0, 0, -daysAgo) key := []string{params.StatsArchiveDownload, url.URL.Series, url.URL.Name, url.URL.User, strconv.Itoa(url.URL.Revision)} for i := 0; i < downloads; i++ { err := s.store.IncCounterAtTime(key, date) c.Assert(err, gc.IsNil) } if url.PromulgatedRevision > -1 { key := []string{params.StatsArchiveDownloadPromulgated, url.URL.Series, url.URL.Name, "", strconv.Itoa(url.PromulgatedRevision)} for i := 0; i < downloads; i++ { err := s.store.IncCounterAtTime(key, date) c.Assert(err, gc.IsNil) } } } } // Ensure the meta/stats response reports the correct downloads count. s.assertGet(c, test.url+"/meta/stats", test.expectResponse) // Clean up the collections. _, err := s.store.DB.Entities().RemoveAll(nil) c.Assert(err, gc.IsNil) _, err = s.store.DB.StatCounters().RemoveAll(nil) c.Assert(err, gc.IsNil) } } var metaStatsWithLegacyDownloadCountsTests = []struct { about string count string expectValue int64 expectError string }{{ about: "no extra-info", }, { about: "zero downloads", count: "0", }, { about: "some downloads", count: "47", expectValue: 47, }, { about: "invalid value", count: "invalid", expectError: "cannot unmarshal extra-info value: invalid character 'i' looking for beginning of value", }} // Tests meta/stats with LegacyDownloadCountsEnabled set to true. // TODO (frankban): remove this test case when removing the legacy counts // logic. func (s *APISuite) TestMetaStatsWithLegacyDownloadCounts(c *gc.C) { patchLegacyDownloadCountsEnabled(s.AddCleanup, true) id, _ := s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("~charmers/utopic/wordpress-42", 42)) url := storeURL("utopic/wordpress-42/meta/stats") for i, test := range metaStatsWithLegacyDownloadCountsTests { c.Logf("test %d: %s", i, test.about) // Update the entity extra info if required. if test.count != "" { extraInfo := map[string][]byte{ params.LegacyDownloadStats: []byte(test.count), } err := s.store.UpdateEntity(id, bson.D{{ "$set", bson.D{{"extrainfo", extraInfo}}, }}) c.Assert(err, gc.IsNil) } var expectBody interface{} var expectStatus int if test.expectError == "" { // Ensure the downloads count is correctly returned. expectBody = params.StatsResponse{ ArchiveDownloadCount: test.expectValue, ArchiveDownload: params.StatsCount{ Total: test.expectValue, }, ArchiveDownloadAllRevisions: params.StatsCount{ Total: test.expectValue, }, } expectStatus = http.StatusOK } else { // Ensure an error is returned. expectBody = params.Error{ Message: test.expectError, } expectStatus = http.StatusInternalServerError } // Perform the request. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: url, ExpectStatus: expectStatus, ExpectBody: expectBody, }) } } type publishSpec struct { id *router.ResolvedURL time string acl []string } func (p publishSpec) published() params.Published { t, err := time.Parse("2006-01-02 15:04", p.time) if err != nil { panic(err) } return params.Published{&p.id.URL, t} } var publishedCharms = []publishSpec{{ id: newResolvedURL("cs:~charmers/precise/wordpress-1", 1), time: "5432-10-12 00:00", }, { id: newResolvedURL("cs:~charmers/precise/mysql-1", 1), time: "5432-10-12 13:00", }, { id: newResolvedURL("cs:~charmers/precise/wordpress-2", 2), time: "5432-10-12 23:59", }, { id: newResolvedURL("cs:~charmers/precise/mysql-2", 2), time: "5432-10-13 00:00", }, { id: newResolvedURL("cs:~charmers/precise/mysql-5", 5), time: "5432-10-13 10:00", }, { id: newResolvedURL("cs:~charmers/precise/wordpress-3", 3), time: "5432-10-14 01:00", }, { id: newResolvedURL("cs:~charmers/precise/django-0", -1), time: "5432-10-14 02:00", acl: []string{"charmers"}, }} var changesPublishedTests = []struct { args string // expect holds indexes into publishedCharms // of the expected indexes returned by charms/published expect []int }{{ args: "", expect: []int{5, 4, 3, 2, 1, 0}, }, { args: "?start=5432-10-13", expect: []int{5, 4, 3}, }, { args: "?stop=5432-10-13", expect: []int{4, 3, 2, 1, 0}, }, { args: "?start=5432-10-13&stop=5432-10-13", expect: []int{4, 3}, }, { args: "?start=5432-10-12&stop=5432-10-13", expect: []int{4, 3, 2, 1, 0}, }, { args: "?start=5432-10-13&stop=5432-10-12", expect: []int{}, }, { args: "?limit=3", expect: []int{5, 4, 3}, }, { args: "?start=5432-10-12&stop=5432-10-13&limit=2", expect: []int{4, 3}, }} func (s *APISuite) TestChangesPublished(c *gc.C) { s.publishCharmsAtKnownTimes(c, publishedCharms) for i, test := range changesPublishedTests { c.Logf("test %d: %q", i, test.args) expect := make([]params.Published, len(test.expect)) for j, index := range test.expect { expect[j] = publishedCharms[index].published() } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("changes/published") + test.args, ExpectBody: expect, }) } } func (s *APISuite) TestChangesPublishedAdmin(c *gc.C) { s.publishCharmsAtKnownTimes(c, publishedCharms) expect := make([]params.Published, len(publishedCharms)) for i := range expect { expect[i] = publishedCharms[len(publishedCharms)-(i+1)].published() } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Username: testUsername, Password: testPassword, URL: storeURL("changes/published"), ExpectBody: expect, }) } var changesPublishedErrorsTests = []struct { args string expect params.Error status int }{{ args: "?limit=0", expect: params.Error{ Code: params.ErrBadRequest, Message: "invalid 'limit' value", }, status: http.StatusBadRequest, }, { args: "?limit=-1", expect: params.Error{ Code: params.ErrBadRequest, Message: "invalid 'limit' value", }, status: http.StatusBadRequest, }, { args: "?limit=-9999", expect: params.Error{ Code: params.ErrBadRequest, Message: "invalid 'limit' value", }, status: http.StatusBadRequest, }, { args: "?start=baddate", expect: params.Error{ Code: params.ErrBadRequest, Message: `invalid 'start' value "baddate": parsing time "baddate" as "2006-01-02": cannot parse "baddate" as "2006"`, }, status: http.StatusBadRequest, }, { args: "?stop=baddate", expect: params.Error{ Code: params.ErrBadRequest, Message: `invalid 'stop' value "baddate": parsing time "baddate" as "2006-01-02": cannot parse "baddate" as "2006"`, }, status: http.StatusBadRequest, }} func (s *APISuite) TestChangesPublishedErrors(c *gc.C) { s.publishCharmsAtKnownTimes(c, publishedCharms) for i, test := range changesPublishedErrorsTests { c.Logf("test %d: %q", i, test.args) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("changes/published") + test.args, ExpectStatus: test.status, ExpectBody: test.expect, }) } } func (s *APISuite) TestPublish(c *gc.C) { // V4 SPECIFIC httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Method: "PUT", URL: storeURL("wordpress/publish"), Do: bakeryDo(nil), JSONBody: params.PublishRequest{}, ExpectStatus: http.StatusNotFound, ExpectBody: params.Error{ Code: params.ErrNotFound, Message: `not found`, }, }) } // publishCharmsAtKnownTimes populates the store with // a range of charms with known time stamps. func (s *APISuite) publishCharmsAtKnownTimes(c *gc.C, charms []publishSpec) { for _, ch := range publishedCharms { id, _ := s.addPublicCharmFromRepo(c, "wordpress", ch.id) t := ch.published().PublishTime err := s.store.UpdateEntity(id, bson.D{{"$set", bson.D{{"uploadtime", t}}}}) c.Assert(err, gc.IsNil) if len(ch.acl) > 0 { err := s.store.SetPerms(&id.URL, "unpublished.read", ch.acl...) c.Assert(err, gc.IsNil) err = s.store.SetPerms(&id.URL, "stable.read", ch.acl...) c.Assert(err, gc.IsNil) } } } var debugPprofTests = []struct { path string match string }{{ path: "debug/pprof/", match: `(?s).*profiles:.*heap.*`, }, { path: "debug/pprof/goroutine?debug=2", match: "(?s)goroutine [0-9]+.*", }, { path: "debug/pprof/cmdline", match: ".+charmstore.+", }} func (s *APISuite) TestDebugPprof(c *gc.C) { for i, test := range debugPprofTests { c.Logf("test %d: %s", i, test.path) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, Header: basicAuthHeader(testUsername, testPassword), URL: storeURL(test.path), }) c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.String())) c.Assert(rec.Body.String(), gc.Matches, test.match) } } func (s *APISuite) TestDebugPprofFailsWithoutAuth(c *gc.C) { for i, test := range debugPprofTests { c.Logf("test %d: %s", i, test.path) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(test.path), ExpectStatus: http.StatusProxyAuthRequired, ExpectBody: dischargeRequiredBody, }) } } func (s *APISuite) TestHash256Laziness(c *gc.C) { // TODO frankban: remove this test after updating entities in the // production db with their SHA256 hash value. Entities are updated by // running the cshash256 command. id, _ := s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~who/precise/wordpress-0", -1)) // Retrieve the SHA256 hash. entity, err := s.store.FindEntity(id, charmstore.FieldSelector("blobhash256")) c.Assert(err, gc.IsNil) c.Assert(entity.BlobHash256, gc.Not(gc.Equals), "") } var urlChannelResolvingEntities = []struct { id *router.ResolvedURL channel params.Channel }{{ id: newResolvedURL("~charmers/precise/wordpress-0", 0), channel: params.StableChannel, }, { id: newResolvedURL("~charmers/precise/wordpress-1", 1), channel: params.DevelopmentChannel, }, { id: newResolvedURL("~charmers/precise/wordpress-2", 2), channel: params.UnpublishedChannel, }, { id: newResolvedURL("~charmers/trusty/mysql-0", 0), channel: params.UnpublishedChannel, }} var urlChannelResolvingTests = []struct { url string channel params.Channel expectURL string expectStatus int expectError params.Error }{{ url: "wordpress", expectURL: "cs:precise/wordpress-0", }, { url: "wordpress", channel: params.StableChannel, expectURL: "cs:precise/wordpress-0", }, { url: "wordpress", channel: params.DevelopmentChannel, expectURL: "cs:precise/wordpress-1", }, { url: "wordpress", channel: params.UnpublishedChannel, expectURL: "cs:precise/wordpress-2", }, { url: "~charmers/precise/wordpress", channel: params.StableChannel, expectURL: "cs:~charmers/precise/wordpress-0", }, { url: "~charmers/precise/wordpress-2", channel: params.StableChannel, expectStatus: http.StatusNotFound, expectError: params.Error{ Message: `cs:~charmers/precise/wordpress-2 not found in stable channel`, Code: params.ErrNotFound, }, }, { url: "mysql", expectStatus: http.StatusNotFound, expectError: params.Error{ Message: `no matching charm or bundle for cs:mysql`, Code: params.ErrNotFound, }, }, { url: "mysql", channel: "unknown", expectStatus: http.StatusBadRequest, expectError: params.Error{ Message: `invalid channel "unknown" specified in request`, Code: params.ErrBadRequest, }, }} func (s *APISuite) TestURLChannelResolving(c *gc.C) { s.discharge = dischargeForUser("charmers") for _, add := range urlChannelResolvingEntities { err := s.store.AddCharmWithArchive(add.id, storetesting.NewCharm(nil)) c.Assert(err, gc.IsNil) if add.channel != params.UnpublishedChannel { err = s.store.Publish(add.id, add.channel) c.Assert(err, gc.IsNil) } } for i, test := range urlChannelResolvingTests { path := test.url + "/meta/any" if test.channel != "" { path += "?channel=" + string(test.channel) } c.Logf("test %d: %v", i, test.url) if test.expectError.Message != "" { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL(path), ExpectStatus: test.expectStatus, ExpectBody: test.expectError, }) } else { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL(path), ExpectBody: params.MetaAnyResponse{ Id: charm.MustParseURL(test.expectURL), }, }) } } } func basicAuthHeader(username, password string) http.Header { // It's a pity we have to jump through this hoop. req := &http.Request{ Header: make(http.Header), } req.SetBasicAuth(username, password) return req.Header } func entityFieldGetter(fieldName string) metaEndpointExpectedValueGetter { return entityGetter(func(entity *mongodoc.Entity) interface{} { field := reflect.ValueOf(entity).Elem().FieldByName(fieldName) if !field.IsValid() { panic(errgo.Newf("entity has no field %q", fieldName)) } return field.Interface() }) } func entityGetter(get func(*mongodoc.Entity) interface{}) metaEndpointExpectedValueGetter { return func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { doc, err := store.FindEntity(url, nil) if err != nil { return nil, errgo.Mask(err) } return get(doc), nil } } func zipGetter(get func(*zip.Reader) interface{}) metaEndpointExpectedValueGetter { return func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { doc, err := store.FindEntity(url, charmstore.FieldSelector("blobname")) if err != nil { return nil, errgo.Mask(err) } blob, size, err := store.BlobStore.Open(doc.BlobName) if err != nil { return nil, errgo.Mask(err) } defer blob.Close() content, err := ioutil.ReadAll(blob) if err != nil { return nil, errgo.Mask(err) } r, err := zip.NewReader(bytes.NewReader(content), size) if err != nil { return nil, errgo.Mask(err) } return get(r), nil } } func entitySizeChecker(c *gc.C, data interface{}) { response := data.(*params.ArchiveSizeResponse) c.Assert(response.Size, gc.Not(gc.Equals), int64(0)) } func (s *APISuite) assertPutNonAdmin(c *gc.C, url string, val interface{}) { s.assertPut0(c, url, val, false) } func (s *APISuite) assertPut(c *gc.C, url string, val interface{}) { s.assertPut0(c, url, val, true) } func (s *APISuite) assertPut0(c *gc.C, url string, val interface{}, asAdmin bool) { body, err := json.Marshal(val) c.Assert(err, gc.IsNil) p := httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(url), Method: "PUT", Do: bakeryDo(nil), Header: http.Header{ "Content-Type": {"application/json"}, }, Body: bytes.NewReader(body), } if asAdmin { p.Username = testUsername p.Password = testPassword } rec := httptesting.DoRequest(c, p) c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.String())) c.Assert(rec.Body.String(), gc.HasLen, 0) } func (s *APISuite) assertGet(c *gc.C, url string, expectVal interface{}) { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL(url), ExpectBody: expectVal, }) } func (s *APISuite) addLog(c *gc.C, log *mongodoc.Log) { err := s.store.DB.Logs().Insert(log) c.Assert(err, gc.Equals, nil) } func mustMarshalJSON(val interface{}) string { data, err := json.Marshal(val) if err != nil { panic(fmt.Errorf("cannot marshal %#v: %v", val, err)) } return string(data) } func (s *APISuite) TestMacaroon(c *gc.C) { var checkedCaveats []string var mu sync.Mutex var dischargeError error s.discharge = func(cond string, arg string) ([]checkers.Caveat, error) { mu.Lock() defer mu.Unlock() checkedCaveats = append(checkedCaveats, cond+" "+arg) return []checkers.Caveat{checkers.DeclaredCaveat("username", "who")}, dischargeError } rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("macaroon"), Method: "GET", }) c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.String())) var m macaroon.Macaroon err := json.Unmarshal(rec.Body.Bytes(), &m) c.Assert(err, gc.IsNil) c.Assert(m.Location(), gc.Equals, "charmstore") client := httpbakery.NewClient() ms, err := client.DischargeAll(&m) c.Assert(err, gc.IsNil) sort.Strings(checkedCaveats) c.Assert(checkedCaveats, jc.DeepEquals, []string{ "is-authenticated-user ", }) macaroonCookie, err := httpbakery.NewCookie(ms) c.Assert(err, gc.IsNil) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("log"), Do: bakeryDo(nil), Cookies: []*http.Cookie{macaroonCookie}, ExpectStatus: http.StatusUnauthorized, ExpectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "who"`, }, }) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.noMacaroonSrv, URL: storeURL("log"), ExpectStatus: http.StatusUnauthorized, ExpectBody: params.Error{ Message: "authentication failed: missing HTTP auth header", Code: params.ErrUnauthorized, }, }) } func (s *APISuite) TestWhoAmIFailWithNoMacaroon(c *gc.C) { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.noMacaroonSrv, URL: storeURL("whoami"), Do: bakeryDo(nil), ExpectStatus: http.StatusUnauthorized, ExpectBody: params.Error{ Code: params.ErrUnauthorized, Message: "authentication failed: missing HTTP auth header", }, }) } func (s *APISuite) TestWhoAmIReturnsNameAndGroups(c *gc.C) { s.discharge = dischargeForUser("who") s.idM.groups = map[string][]string{ "who": {"foo", "bar"}, } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("whoami"), Do: bakeryDo(nil), ExpectStatus: http.StatusOK, ExpectBody: params.WhoAmIResponse{ User: "who", Groups: []string{"foo", "bar"}, }, }) } var promulgateTests = []struct { about string entities []*mongodoc.Entity baseEntities []*mongodoc.BaseEntity id string useHTTPDo bool method string caveats []checkers.Caveat groups map[string][]string body io.Reader username string password string expectStatus int expectBody interface{} expectEntities []*mongodoc.Entity expectBaseEntities []*mongodoc.BaseEntity expectPromulgate bool expectUser string }{{ about: "unpromulgate base entity", entities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, baseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), }, id: "~charmers/wordpress", body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), username: testUsername, password: testPassword, expectStatus: http.StatusOK, expectEntities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, expectBaseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").Build(), }, expectUser: "admin", }, { about: "promulgate base entity", entities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), }, baseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").Build(), }, id: "~charmers/wordpress", body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), username: testUsername, password: testPassword, expectStatus: http.StatusOK, expectEntities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, expectBaseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithACLs(params.StableChannel, mongodoc.ACL{ Write: []string{v4.PromulgatorsGroup}, }).WithPromulgated(true).Build(), }, expectPromulgate: true, expectUser: "admin", }, { about: "unpromulgate base entity not found", entities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, baseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), }, id: "~charmers/mysql", body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), username: testUsername, password: testPassword, expectStatus: http.StatusNotFound, expectBody: params.Error{ Code: params.ErrNotFound, Message: `no matching charm or bundle for cs:~charmers/mysql`, }, expectEntities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, expectBaseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), }, }, { about: "promulgate base entity not found", entities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), }, baseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").Build(), }, id: "~charmers/mysql", body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), username: testUsername, password: testPassword, expectStatus: http.StatusNotFound, expectBody: params.Error{ Code: params.ErrNotFound, Message: `no matching charm or bundle for cs:~charmers/mysql`, }, expectEntities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), }, expectBaseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").Build(), }, }, { about: "bad method", entities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, baseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), }, id: "~charmers/wordpress", body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), username: testUsername, password: testPassword, method: "POST", expectStatus: http.StatusMethodNotAllowed, expectBody: params.Error{ Code: params.ErrMethodNotAllowed, Message: "POST not allowed", }, expectEntities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, expectBaseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), }, }, { about: "bad JSON", entities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, baseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), }, id: "~charmers/wordpress", body: bytes.NewReader([]byte("tru")), username: testUsername, password: testPassword, expectStatus: http.StatusBadRequest, expectBody: params.Error{ Code: params.ErrBadRequest, Message: "bad request: invalid character ' ' in literal true (expecting 'e')", }, expectEntities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, expectBaseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), }, }, { about: "unpromulgate base entity with macaroon", entities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, baseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), }, id: "~charmers/wordpress", body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), caveats: []checkers.Caveat{ checkers.DeclaredCaveat(v4.UsernameAttr, v4.PromulgatorsGroup), }, expectStatus: http.StatusOK, expectEntities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, expectBaseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").Build(), }, expectUser: v4.PromulgatorsGroup, }, { about: "promulgate base entity with macaroon", entities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), }, baseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").Build(), }, id: "~charmers/wordpress", body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), caveats: []checkers.Caveat{ checkers.DeclaredCaveat(v4.UsernameAttr, v4.PromulgatorsGroup), }, expectStatus: http.StatusOK, expectEntities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, expectBaseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithACLs(params.StableChannel, mongodoc.ACL{ Write: []string{v4.PromulgatorsGroup}, }).WithPromulgated(true).Build(), }, expectPromulgate: true, expectUser: v4.PromulgatorsGroup, }, { about: "promulgate base entity with group macaroon", entities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), }, baseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").Build(), }, id: "~charmers/wordpress", body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), caveats: []checkers.Caveat{ checkers.DeclaredCaveat(v4.UsernameAttr, "bob"), }, groups: map[string][]string{ "bob": {v4.PromulgatorsGroup, "yellow"}, }, expectStatus: http.StatusOK, expectEntities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, expectBaseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithACLs(params.StableChannel, mongodoc.ACL{ Write: []string{v4.PromulgatorsGroup}, }).WithPromulgated(true).Build(), }, expectPromulgate: true, expectUser: "bob", }, { about: "no authorisation", entities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, baseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), }, useHTTPDo: true, id: "~charmers/wordpress", body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), expectStatus: http.StatusProxyAuthRequired, expectBody: dischargeRequiredBody, expectEntities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, expectBaseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), }, }, { about: "promulgate base entity with unauthorized user macaroon", entities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), }, baseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").Build(), }, id: "~charmers/wordpress", body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), caveats: []checkers.Caveat{ checkers.DeclaredCaveat(v4.UsernameAttr, "bob"), }, groups: map[string][]string{ "bob": {"yellow"}, }, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Message: `unauthorized: access denied for user "bob"`, Code: params.ErrUnauthorized, }, expectEntities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), }, expectBaseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").Build(), }, }} func (s *APISuite) TestPromulgate(c *gc.C) { for i, test := range promulgateTests { c.Logf("%d. %s\n", i, test.about) _, err := s.store.DB.Entities().RemoveAll(nil) c.Assert(err, gc.IsNil) _, err = s.store.DB.BaseEntities().RemoveAll(nil) c.Assert(err, gc.IsNil) for _, e := range test.entities { err := s.store.DB.Entities().Insert(e) c.Assert(err, gc.IsNil) } for _, e := range test.baseEntities { err := s.store.DB.BaseEntities().Insert(e) c.Assert(err, gc.IsNil) } if test.method == "" { test.method = "PUT" } client := httpbakery.NewHTTPClient() s.discharge = func(_, _ string) ([]checkers.Caveat, error) { return test.caveats, nil } s.idM.groups = test.groups p := httptesting.JSONCallParams{ Handler: s.srv, // TODO avoid using channel=unpublished here URL: storeURL(test.id + "/promulgate?channel=unpublished"), Method: test.method, Body: test.body, Header: http.Header{"Content-Type": {"application/json"}}, Username: test.username, Password: test.password, ExpectStatus: test.expectStatus, ExpectBody: test.expectBody, } if !test.useHTTPDo { p.Do = bakeryDo(client) } httptesting.AssertJSONCall(c, p) n, err := s.store.DB.Entities().Count() c.Assert(err, gc.IsNil) c.Assert(n, gc.Equals, len(test.expectEntities)) for _, e := range test.expectEntities { storetesting.AssertEntity(c, s.store.DB.Entities(), e) } n, err = s.store.DB.BaseEntities().Count() c.Assert(err, gc.IsNil) c.Assert(n, gc.Equals, len(test.expectBaseEntities)) for _, e := range test.expectBaseEntities { storetesting.AssertBaseEntity(c, s.store.DB.BaseEntities(), e) } } } func (s *APISuite) TestEndpointRequiringBaseEntityWithPromulgatedId(c *gc.C) { // Add a promulgated charm. url := newResolvedURL("~charmers/precise/wordpress-23", 23) s.addPublicCharmFromRepo(c, "wordpress", url) // Unpromulgate the base entity err := s.store.SetPromulgated(url, false) c.Assert(err, gc.IsNil) // Check that we can still enquire about the promulgation status // of the entity when using its promulgated URL. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("precise/wordpress-23/meta/promulgated"), ExpectBody: params.PromulgatedResponse{ Promulgated: false, }, }) } func (s *APISuite) TestTooManyConcurrentRequests(c *gc.C) { // We don't have any control over the number of concurrent // connections allowed by s.srv, so we make our own // server here with custom config. config := charmstore.ServerParams{ MaxMgoSessions: 1, } db := s.Session.DB("charmstore") srv, err := charmstore.NewServer(db, nil, config, map[string]charmstore.NewAPIHandlerFunc{"v4": v4.NewAPIHandler}) c.Assert(err, gc.IsNil) defer srv.Close() // Get a store from the pool so that we'll be // at the concurrent request limit. store := srv.Pool().Store() defer store.Close() httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: srv, Do: bakeryDo(nil), URL: storeURL("debug/status"), ExpectStatus: http.StatusServiceUnavailable, ExpectBody: params.Error{ Message: "service unavailable: too many mongo sessions in use", Code: params.ErrServiceUnavailable, }, }) } // dischargeRequiredBody returns a httptesting.BodyAsserter that checks // that the response body contains a discharge required error holding a macaroon // with a third-party caveat addressed to expectedEntityLocation. var dischargeRequiredBody httptesting.BodyAsserter = func(c *gc.C, body json.RawMessage) { var response httpbakery.Error err := json.Unmarshal(body, &response) c.Assert(err, gc.IsNil) c.Assert(response.Code, gc.Equals, httpbakery.ErrDischargeRequired) c.Assert(response.Message, gc.Equals, "verification failed: no macaroon cookies in request") c.Assert(response.Info.Macaroon, gc.NotNil) for _, cav := range response.Info.Macaroon.Caveats() { if cav.Location != "" { return } } c.Fatalf("no third party caveat found in response macaroon; caveats %#v", response.Info.Macaroon.Caveats()) } func (s *APISuite) TestSetAuthCookie(c *gc.C) { m, err := macaroon.New([]byte("key"), "id", "location") c.Assert(err, jc.ErrorIsNil) ms := macaroon.Slice{m} rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("set-auth-cookie"), Method: "PUT", Header: http.Header{"Origin": []string{"https://1.2.3.4"}}, JSONBody: params.SetAuthCookie{ Macaroons: ms, }, }) // The request is successful. c.Assert(rec.Code, gc.Equals, http.StatusOK) // The response includes the CORS header for the specific request. c.Assert(rec.Header().Get("Access-Control-Allow-Origin"), gc.Equals, "https://1.2.3.4") // The response includes the macaroons cookie. resp := http.Response{Header: rec.Header()} cookies := resp.Cookies() c.Assert(len(cookies), gc.Equals, 1) expected, err := httpbakery.NewCookie(ms) expected.Path = "/" c.Assert(err, jc.ErrorIsNil) c.Assert(cookies[0].Value, gc.Equals, expected.Value) } func (s *APISuite) TestSetAuthCookieBodyError(c *gc.C) { m, err := macaroon.New([]byte("key"), "id", "location") c.Assert(err, jc.ErrorIsNil) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("set-auth-cookie"), Method: "PUT", JSONBody: macaroon.Slice{m}, ExpectStatus: http.StatusInternalServerError, ExpectBody: params.Error{ Message: "cannot unmarshal macaroons: json: cannot unmarshal array into Go value of type params.SetAuthCookie", }, }) } func (s *APISuite) TestSetAuthCookieMethodError(c *gc.C) { m, err := macaroon.New([]byte("key"), "id", "location") c.Assert(err, jc.ErrorIsNil) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("set-auth-cookie"), Method: "POST", JSONBody: macaroon.Slice{m}, ExpectStatus: http.StatusMethodNotAllowed, ExpectBody: params.Error{ Code: params.ErrMethodNotAllowed, Message: "POST not allowed", }, }) } // entityACLs returns the ACLs that apply to the entity with the given URL. func entityACLs(store *charmstore.Store, url *router.ResolvedURL) (mongodoc.ACL, error) { e, err := store.FindEntity(url, nil) if err != nil { return mongodoc.ACL{}, err } be, err := store.FindBaseEntity(&url.URL, nil) if err != nil { return mongodoc.ACL{}, err } ch := params.UnpublishedChannel if e.Stable { ch = params.StableChannel } else if e.Development { ch = params.DevelopmentChannel } return be.ChannelACLs[ch], nil } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/log_test.go0000664000175000017500000003511412672604603025714 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v4_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" import ( "bytes" "encoding/json" "net/http" "time" jc "github.com/juju/testing/checkers" "github.com/juju/testing/httptesting" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" ) type logSuite struct { commonSuite } var _ = gc.Suite(&logSuite{}) func (s *logSuite) SetUpSuite(c *gc.C) { s.enableIdentity = true s.commonSuite.SetUpSuite(c) } var logResponses = map[string]*params.LogResponse{ "info1": { Data: rawMessage("info data 1"), Level: params.InfoLevel, Type: params.IngestionType, }, "error1": { Data: rawMessage("error data 1"), Level: params.ErrorLevel, Type: params.IngestionType, }, "info2": { Data: rawMessage("info data 2"), Level: params.InfoLevel, Type: params.IngestionType, URLs: []*charm.URL{ charm.MustParseURL("precise/django"), charm.MustParseURL("django"), charm.MustParseURL("rails"), }, }, "warning1": { Data: rawMessage("warning data 1"), Level: params.WarningLevel, Type: params.IngestionType, }, "error2": { Data: rawMessage("error data 2"), Level: params.ErrorLevel, Type: params.IngestionType, URLs: []*charm.URL{ charm.MustParseURL("hadoop"), }, }, "info3": { Data: rawMessage("info data 3"), Level: params.InfoLevel, Type: params.IngestionType, URLs: []*charm.URL{ charm.MustParseURL("trusty/django"), charm.MustParseURL("django"), charm.MustParseURL("utopic/hadoop"), charm.MustParseURL("hadoop"), }, }, "error3": { Data: rawMessage("error data 3"), Level: params.ErrorLevel, Type: params.IngestionType, URLs: []*charm.URL{ charm.MustParseURL("utopic/hadoop"), charm.MustParseURL("hadoop"), charm.MustParseURL("precise/django"), charm.MustParseURL("django"), }, }, "stats": { Data: rawMessage("statistics info data"), Level: params.InfoLevel, Type: params.LegacyStatisticsType, }, } var getLogsTests = []struct { about string querystring string expectBody []*params.LogResponse }{{ about: "retrieve logs", expectBody: []*params.LogResponse{ logResponses["stats"], logResponses["error3"], logResponses["info3"], logResponses["error2"], logResponses["warning1"], logResponses["info2"], logResponses["error1"], logResponses["info1"], }, }, { about: "use limit", querystring: "?limit=2", expectBody: []*params.LogResponse{ logResponses["stats"], logResponses["error3"], }, }, { about: "use offset", querystring: "?skip=3", expectBody: []*params.LogResponse{ logResponses["error2"], logResponses["warning1"], logResponses["info2"], logResponses["error1"], logResponses["info1"], }, }, { about: "zero offset", querystring: "?skip=0", expectBody: []*params.LogResponse{ logResponses["stats"], logResponses["error3"], logResponses["info3"], logResponses["error2"], logResponses["warning1"], logResponses["info2"], logResponses["error1"], logResponses["info1"], }, }, { about: "use both limit and offset", querystring: "?limit=3&skip=1", expectBody: []*params.LogResponse{ logResponses["error3"], logResponses["info3"], logResponses["error2"], }, }, { about: "filter by level", querystring: "?level=info", expectBody: []*params.LogResponse{ logResponses["stats"], logResponses["info3"], logResponses["info2"], logResponses["info1"], }, }, { about: "filter by type", querystring: "?type=ingestion", expectBody: []*params.LogResponse{ logResponses["error3"], logResponses["info3"], logResponses["error2"], logResponses["warning1"], logResponses["info2"], logResponses["error1"], logResponses["info1"], }, }, { about: "filter by level with a limit", querystring: "?level=error&limit=2", expectBody: []*params.LogResponse{ logResponses["error3"], logResponses["error2"], }, }, { about: "filter by id", querystring: "?id=precise/django", expectBody: []*params.LogResponse{ logResponses["error3"], logResponses["info2"], }, }, { about: "multiple query", querystring: "?id=utopic/hadoop&limit=1&level=error", expectBody: []*params.LogResponse{ logResponses["error3"], }, }, { about: "empty response offset", querystring: "?id=utopic/hadoop&skip=10", }, { about: "empty response id not found", querystring: "?id=utopic/mysql", }, { about: "empty response level", querystring: "?id=trusty/rails&level=error", }, { about: "filter by type - legacyStatistics", querystring: "?type=legacyStatistics", expectBody: []*params.LogResponse{ logResponses["stats"], }, }} var paramsLogLevels = map[params.LogLevel]mongodoc.LogLevel{ params.InfoLevel: mongodoc.InfoLevel, params.WarningLevel: mongodoc.WarningLevel, params.ErrorLevel: mongodoc.ErrorLevel, } // paramsLogTypes maps API params log types to internal mongodoc ones. var paramsLogTypes = map[params.LogType]mongodoc.LogType{ params.IngestionType: mongodoc.IngestionType, params.LegacyStatisticsType: mongodoc.LegacyStatisticsType, } func (s *logSuite) TestGetLogs(c *gc.C) { // Add logs to the database. beforeAdding := time.Now().Add(-time.Second) for _, key := range []string{"info1", "error1", "info2", "warning1", "error2", "info3", "error3", "stats"} { resp := logResponses[key] err := s.store.AddLog(&resp.Data, paramsLogLevels[resp.Level], paramsLogTypes[resp.Type], resp.URLs) c.Assert(err, gc.IsNil) } afterAdding := time.Now().Add(time.Second) // Run the tests. for i, test := range getLogsTests { c.Logf("test %d: %s", i, test.about) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("log" + test.querystring), Username: testUsername, Password: testPassword, }) // Ensure the response is what we expect. c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "application/json") // Decode the response. var logs []*params.LogResponse decoder := json.NewDecoder(rec.Body) err := decoder.Decode(&logs) c.Assert(err, gc.IsNil) // Check and then reset the response time so that the whole body // can be more easily compared later. for _, log := range logs { c.Assert(log.Time, jc.TimeBetween(beforeAdding, afterAdding)) log.Time = time.Time{} } // Ensure the response includes the expected logs. c.Assert(logs, jc.DeepEquals, test.expectBody) } } func rawMessage(msg string) json.RawMessage { message, err := json.Marshal(msg) if err != nil { panic(err) } return json.RawMessage(message) } var getLogsErrorsTests = []struct { about string querystring string expectStatus int expectMessage string expectCode params.ErrorCode }{{ about: "invalid limit (negative number)", querystring: "?limit=-100", expectStatus: http.StatusBadRequest, expectMessage: "invalid limit value: value must be >= 1", expectCode: params.ErrBadRequest, }, { about: "invalid limit (zero value)", querystring: "?limit=0", expectStatus: http.StatusBadRequest, expectMessage: "invalid limit value: value must be >= 1", expectCode: params.ErrBadRequest, }, { about: "invalid limit (not a number)", querystring: "?limit=foo", expectStatus: http.StatusBadRequest, expectMessage: "invalid limit value: value must be a number", expectCode: params.ErrBadRequest, }, { about: "invalid offset (negative number)", querystring: "?skip=-100", expectStatus: http.StatusBadRequest, expectMessage: "invalid skip value: value must be >= 0", expectCode: params.ErrBadRequest, }, { about: "invalid offset (not a number)", querystring: "?skip=bar", expectStatus: http.StatusBadRequest, expectMessage: "invalid skip value: value must be a number", expectCode: params.ErrBadRequest, }, { about: "invalid id", querystring: "?id=no-such:reference", expectStatus: http.StatusBadRequest, expectMessage: `invalid id value: charm or bundle URL has invalid schema: "no-such:reference"`, expectCode: params.ErrBadRequest, }, { about: "invalid log level", querystring: "?level=bar", expectStatus: http.StatusBadRequest, expectMessage: "invalid log level value", expectCode: params.ErrBadRequest, }, { about: "invalid log type", querystring: "?type=no-such", expectStatus: http.StatusBadRequest, expectMessage: "invalid log type value", expectCode: params.ErrBadRequest, }} func (s *logSuite) TestGetLogsErrors(c *gc.C) { for i, test := range getLogsErrorsTests { c.Logf("test %d: %s", i, test.about) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("log" + test.querystring), Username: testUsername, Password: testPassword, ExpectStatus: test.expectStatus, ExpectBody: params.Error{ Message: test.expectMessage, Code: test.expectCode, }, }) } } func (s *logSuite) TestGetLogsErrorInvalidLog(c *gc.C) { // Add a non-parsable log message to the db directly. err := s.store.DB.Logs().Insert(mongodoc.Log{ Data: []byte("!"), Level: mongodoc.InfoLevel, Type: mongodoc.IngestionType, Time: time.Now(), }) c.Assert(err, gc.IsNil) // The log is just ignored. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("log"), Username: testUsername, Password: testPassword, ExpectStatus: http.StatusOK, ExpectBody: []params.LogResponse{}, }) } func (s *logSuite) TestPostLogs(c *gc.C) { // Prepare the request body. body := makeByteLogs(rawMessage("info data"), params.InfoLevel, params.IngestionType, []*charm.URL{ charm.MustParseURL("trusty/django"), charm.MustParseURL("utopic/rails"), }) // Send the request. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("log"), Method: "POST", Username: testUsername, Password: testPassword, Header: http.Header{ "Content-Type": {"application/json"}, }, Body: bytes.NewReader(body), ExpectStatus: http.StatusOK, }) // Ensure the log message has been added to the database. var doc mongodoc.Log err := s.store.DB.Logs().Find(nil).One(&doc) c.Assert(err, gc.IsNil) c.Assert(string(doc.Data), gc.Equals, `"info data"`) c.Assert(doc.Level, gc.Equals, mongodoc.InfoLevel) c.Assert(doc.Type, gc.Equals, mongodoc.IngestionType) c.Assert(doc.URLs, jc.DeepEquals, []*charm.URL{ charm.MustParseURL("trusty/django"), charm.MustParseURL("django"), charm.MustParseURL("utopic/rails"), charm.MustParseURL("rails"), }) } func (s *logSuite) TestPostLogsMultipleEntries(c *gc.C) { // Prepare the request body. infoData := rawMessage("info data") warningData := rawMessage("warning data") logs := []params.Log{{ Data: &infoData, Level: params.InfoLevel, Type: params.IngestionType, }, { Data: &warningData, Level: params.WarningLevel, Type: params.IngestionType, }} body, err := json.Marshal(logs) c.Assert(err, gc.IsNil) // Send the request. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("log"), Method: "POST", Username: testUsername, Password: testPassword, Header: http.Header{ "Content-Type": {"application/json"}, }, Body: bytes.NewReader(body), ExpectStatus: http.StatusOK, }) // Ensure the log messages has been added to the database. var docs []mongodoc.Log err = s.store.DB.Logs().Find(nil).Sort("id").All(&docs) c.Assert(err, gc.IsNil) c.Assert(docs, gc.HasLen, 2) c.Assert(string(docs[0].Data), gc.Equals, string(infoData)) c.Assert(docs[0].Level, gc.Equals, mongodoc.InfoLevel) c.Assert(string(docs[1].Data), gc.Equals, string(warningData)) c.Assert(docs[1].Level, gc.Equals, mongodoc.WarningLevel) } var postLogsErrorsTests = []struct { about string contentType string body []byte expectStatus int expectMessage string expectCode params.ErrorCode }{{ about: "invalid content type", contentType: "application/zip", expectStatus: http.StatusBadRequest, expectMessage: `unexpected Content-Type "application/zip"; expected 'application/json'`, expectCode: params.ErrBadRequest, }, { about: "invalid body", body: []byte("!"), expectStatus: http.StatusBadRequest, expectMessage: "cannot unmarshal body: invalid character '!' looking for beginning of value", expectCode: params.ErrBadRequest, }, { about: "invalid log level", body: makeByteLogs(rawMessage("message"), params.LogLevel(42), params.IngestionType, nil), expectStatus: http.StatusBadRequest, expectMessage: "invalid log level", expectCode: params.ErrBadRequest, }, { about: "invalid log type", body: makeByteLogs(rawMessage("message"), params.WarningLevel, params.LogType(42), nil), expectStatus: http.StatusBadRequest, expectMessage: "invalid log type", expectCode: params.ErrBadRequest, }} func (s *logSuite) TestPostLogsErrors(c *gc.C) { url := storeURL("log") for i, test := range postLogsErrorsTests { c.Logf("test %d: %s", i, test.about) if test.contentType == "" { test.contentType = "application/json" } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: url, Method: "POST", Header: http.Header{ "Content-Type": {test.contentType}, }, Body: bytes.NewReader(test.body), Username: testUsername, Password: testPassword, ExpectStatus: test.expectStatus, ExpectBody: params.Error{ Message: test.expectMessage, Code: test.expectCode, }, }) } } func (s *logSuite) TestGetLogsUnauthorizedError(c *gc.C) { s.AssertEndpointAuth(c, httptesting.JSONCallParams{ URL: storeURL("log"), ExpectStatus: http.StatusOK, ExpectBody: []params.LogResponse{}, }) } func (s *logSuite) TestPostLogsUnauthorizedError(c *gc.C) { // Add a non-parsable log message to the db. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.noMacaroonSrv, URL: storeURL("log"), Method: "POST", Header: http.Header{ "Content-Type": {"application/json"}, }, ExpectStatus: http.StatusUnauthorized, ExpectBody: params.Error{ Message: "authentication failed: missing HTTP auth header", Code: params.ErrUnauthorized, }, }) } func makeByteLogs(data json.RawMessage, logLevel params.LogLevel, logType params.LogType, urls []*charm.URL) []byte { logs := []params.Log{{ Data: &data, Level: logLevel, Type: logType, URLs: urls, }} b, err := json.Marshal(logs) if err != nil { panic(err) } return b } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/api.go0000664000175000017500000002705212672604603024647 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v4 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" import ( "net/http" "net/url" "github.com/juju/httprequest" "github.com/juju/loggo" "github.com/juju/mempool" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" "gopkg.in/juju/charmstore.v5-unstable/internal/entitycache" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" "gopkg.in/juju/charmstore.v5-unstable/internal/router" "gopkg.in/juju/charmstore.v5-unstable/internal/v5" ) var logger = loggo.GetLogger("charmstore.internal.v4") const ( PromulgatorsGroup = v5.PromulgatorsGroup UsernameAttr = v5.UsernameAttr DelegatableMacaroonExpiry = v5.DelegatableMacaroonExpiry DefaultIcon = v5.DefaultIcon ArchiveCachePublicMaxAge = v5.ArchiveCachePublicMaxAge ) // reqHandlerPool holds a cache of ReqHandlers to save // on allocation time. When a handler is done with, // it is put back into the pool. var reqHandlerPool = mempool.Pool{ New: func() interface{} { return newReqHandler() }, } type Handler struct { *v5.Handler } type ReqHandler struct { *v5.ReqHandler } func New(pool *charmstore.Pool, config charmstore.ServerParams, rootPath string) Handler { return Handler{ Handler: v5.New(pool, config, rootPath), } } func (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { rh, err := h.NewReqHandler(req) if err != nil { router.WriteError(w, err) return } defer rh.Close() rh.ServeHTTP(w, req) } func NewAPIHandler(pool *charmstore.Pool, config charmstore.ServerParams, rootPath string) charmstore.HTTPCloseHandler { return New(pool, config, rootPath) } // The v4 resolvedURL function also requires SupportedSeries. var requiredEntityFields = func() map[string]int { fields := make(map[string]int) for f := range v5.RequiredEntityFields { fields[f] = 1 } fields["supportedseries"] = 1 return fields }() // NewReqHandler returns an instance of a *ReqHandler // suitable for handling the given HTTP request. After use, the ReqHandler.Close // method should be called to close it. // // If no handlers are available, it returns an error with // a charmstore.ErrTooManySessions cause. func (h *Handler) NewReqHandler(req *http.Request) (ReqHandler, error) { req.ParseForm() // Validate all the values for channel, even though // most endpoints will only ever use the first one. // PUT to an archive is the notable exception. // TODO Why is the v4 API accepting a channel parameter anyway? We // should probably always use "stable". for _, ch := range req.Form["channel"] { if !v5.ValidChannels[params.Channel(ch)] { return ReqHandler{}, badRequestf(nil, "invalid channel %q specified in request", ch) } } store, err := h.Pool.RequestStore() if err != nil { if errgo.Cause(err) == charmstore.ErrTooManySessions { return ReqHandler{}, errgo.WithCausef(err, params.ErrServiceUnavailable, "") } return ReqHandler{}, errgo.Mask(err) } rh := reqHandlerPool.Get().(ReqHandler) rh.Handler = h.Handler rh.Store = &v5.StoreWithChannel{ Store: store, Channel: params.Channel(req.Form.Get("channel")), } rh.Cache = entitycache.New(rh.Store) rh.Cache.AddEntityFields(requiredEntityFields) rh.Cache.AddBaseEntityFields(v5.RequiredBaseEntityFields) return rh, nil } func newReqHandler() ReqHandler { h := ReqHandler{ ReqHandler: new(v5.ReqHandler), } resolveId := h.ResolvedIdHandler authId := h.AuthIdHandler handlers := v5.RouterHandlers(h.ReqHandler) handlers.Global["search"] = router.HandleJSON(h.serveSearch) handlers.Meta["charm-related"] = h.EntityHandler(h.metaCharmRelated, "charmprovidedinterfaces", "charmrequiredinterfaces") handlers.Meta["charm-metadata"] = h.EntityHandler(h.metaCharmMetadata, "charmmeta") handlers.Meta["revision-info"] = router.SingleIncludeHandler(h.metaRevisionInfo) handlers.Meta["archive-size"] = h.EntityHandler(h.metaArchiveSize, "prev5blobsize") handlers.Meta["hash"] = h.EntityHandler(h.metaHash, "prev5blobhash") handlers.Meta["hash256"] = h.EntityHandler(h.metaHash256, "prev5blobhash256") handlers.Id["expand-id"] = resolveId(authId(h.serveExpandId)) handlers.Id["archive"] = h.serveArchive(handlers.Id["archive"]) handlers.Id["archive/"] = resolveId(authId(h.serveArchiveFile)) // Delete new endpoints that we don't want to provide in v4. delete(handlers.Id, "publish") delete(handlers.Meta, "published") delete(handlers.Id, "resources") delete(handlers.Meta, "resources") h.Router = router.New(handlers, h) return h } // ResolveURL implements router.Context.ResolveURL, // ensuring that any resulting ResolvedURL always // has a non-empty PreferredSeries field. func (h ReqHandler) ResolveURL(url *charm.URL) (*router.ResolvedURL, error) { return resolveURL(h.Cache, url) } func (h ReqHandler) ResolveURLs(urls []*charm.URL) ([]*router.ResolvedURL, error) { h.Cache.StartFetch(urls) rurls := make([]*router.ResolvedURL, len(urls)) for i, url := range urls { var err error rurls[i], err = resolveURL(h.Cache, url) if err != nil && errgo.Cause(err) != params.ErrNotFound { return nil, err } } return rurls, nil } // resolveURL implements URL resolving for the ReqHandler. // It's defined as a separate function so it can be more // easily unit-tested. func resolveURL(cache *entitycache.Cache, url *charm.URL) (*router.ResolvedURL, error) { entity, err := cache.Entity(url, charmstore.FieldSelector("supportedseries")) if err != nil { return nil, errgo.Mask(err, errgo.Is(params.ErrNotFound)) } rurl := &router.ResolvedURL{ URL: *entity.URL, PromulgatedRevision: -1, } if url.User == "" { rurl.PromulgatedRevision = entity.PromulgatedRevision } if rurl.URL.Series != "" { return rurl, nil } if url.Series != "" { rurl.PreferredSeries = url.Series return rurl, nil } if len(entity.SupportedSeries) == 0 { return nil, errgo.Newf("entity %q has no supported series", &rurl.URL) } rurl.PreferredSeries = entity.SupportedSeries[0] return rurl, nil } // Close closes the ReqHandler. This should always be called when the // ReqHandler is done with. func (h ReqHandler) Close() { h.Store.Close() h.Cache.Close() h.Reset() reqHandlerPool.Put(h) } // StatsEnabled reports whether statistics should be gathered for // the given HTTP request. func StatsEnabled(req *http.Request) bool { return v5.StatsEnabled(req) } // GET id/meta/charm-metadata // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetacharm-metadata func (h ReqHandler) metaCharmMetadata(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { m := entity.CharmMeta if m != nil { m.Series = nil } return m, nil } // GET id/meta/revision-info // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetarevision-info func (h ReqHandler) metaRevisionInfo(id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { searchURL := id.PreferredURL() searchURL.Revision = -1 q := h.Store.EntitiesQuery(searchURL) if id.PromulgatedRevision != -1 { q = q.Sort("-promulgated-revision") } else { q = q.Sort("-revision") } var docs []*mongodoc.Entity if err := q.Select(bson.D{{"_id", 1}, {"promulgated-url", 1}, {"supportedseries", 1}}).All(&docs); err != nil { return "", errgo.Notef(err, "cannot get ids") } if len(docs) == 0 { return "", errgo.WithCausef(nil, params.ErrNotFound, "no matching charm or bundle for %s", id) } specifiedSeries := id.URL.Series if specifiedSeries == "" { specifiedSeries = id.PreferredSeries } var response params.RevisionInfoResponse expandMultiSeries(docs, func(series string, doc *mongodoc.Entity) error { if specifiedSeries != series { return nil } url := doc.PreferredURL(id.PromulgatedRevision != -1) url.Series = series response.Revisions = append(response.Revisions, url) return nil }) return &response, nil } // GET id/meta/archive-size // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaarchive-size func (h ReqHandler) metaArchiveSize(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { return ¶ms.ArchiveSizeResponse{ Size: entity.PreV5BlobSize, }, nil } // GET id/meta/hash // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetahash func (h ReqHandler) metaHash(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { return ¶ms.HashResponse{ Sum: entity.PreV5BlobHash, }, nil } // GET id/meta/hash256 // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetahash256 func (h ReqHandler) metaHash256(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { return ¶ms.HashResponse{ Sum: entity.PreV5BlobHash256, }, nil } // GET id/expand-id // https://docs.google.com/a/canonical.com/document/d/1TgRA7jW_mmXoKH3JiwBbtPvQu7WiM6XMrz1wSrhTMXw/edit#bookmark=id.4xdnvxphb2si func (h ReqHandler) serveExpandId(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { baseURL := id.PreferredURL() baseURL.Revision = -1 baseURL.Series = "" // baseURL now represents the base URL of the given id; // it will be a promulgated URL iff the original URL was // specified without a user, which will cause EntitiesQuery // to return entities that match appropriately. // Retrieve all the entities with the same base URL. q := h.Store.EntitiesQuery(baseURL).Select(bson.D{{"_id", 1}, {"promulgated-url", 1}, {"supportedseries", 1}}) if id.PromulgatedRevision != -1 { q = q.Sort("-series", "-promulgated-revision") } else { q = q.Sort("-series", "-revision") } var docs []*mongodoc.Entity err := q.All(&docs) if err != nil && errgo.Cause(err) != mgo.ErrNotFound { return errgo.Mask(err) } // Collect all the expanded identifiers for each entity. response := make([]params.ExpandedId, 0, len(docs)) expandMultiSeries(docs, func(series string, doc *mongodoc.Entity) error { if err := h.AuthorizeEntity(charmstore.EntityResolvedURL(doc), req); err != nil { return nil } url := doc.PreferredURL(id.PromulgatedRevision != -1) url.Series = series response = append(response, params.ExpandedId{Id: url.String()}) return nil }) // Write the response in JSON format. return httprequest.WriteJSON(w, http.StatusOK, response) } // expandMultiSeries calls the provided append function once for every // supported series of each entry in the given entities slice. The series // argument will be passed as that series and the doc argument will point // to the entity. This function will only return an error if the append // function returns an error; such an error will be returned without // masking the cause. // // Note that the SupportedSeries field of the entities must have // been populated for this to work. func expandMultiSeries(entities []*mongodoc.Entity, append func(series string, doc *mongodoc.Entity) error) error { // TODO(rog) make this concurrent. for _, entity := range entities { if entity.URL.Series != "" { append(entity.URL.Series, entity) continue } for _, series := range entity.SupportedSeries { if err := append(series, entity); err != nil { return errgo.Mask(err, errgo.Any) } } } return nil } func badRequestf(underlying error, f string, a ...interface{}) error { err := errgo.WithCausef(underlying, params.ErrBadRequest, f, a...) err.(*errgo.Err).SetLocation(1) return err } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/relations.go0000664000175000017500000001317012672604603026072 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v4 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" import ( "net/http" "net/url" "gopkg.in/errgo.v1" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/mgo.v2/bson" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" "gopkg.in/juju/charmstore.v5-unstable/internal/router" ) // GET id/meta/charm-related[?include=meta[&include=meta…]] // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetacharm-related func (h ReqHandler) metaCharmRelated(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { if id.URL.Series == "bundle" { return nil, nil } // If the charm does not define any relation we can just return without // hitting the db. if len(entity.CharmProvidedInterfaces)+len(entity.CharmRequiredInterfaces) == 0 { return ¶ms.RelatedResponse{}, nil } q := h.Store.MatchingInterfacesQuery(entity.CharmProvidedInterfaces, entity.CharmRequiredInterfaces) fields := bson.D{ {"_id", 1}, {"supportedseries", 1}, {"charmrequiredinterfaces", 1}, {"charmprovidedinterfaces", 1}, {"promulgated-url", 1}, {"promulgated-revision", 1}, } var entities []*mongodoc.Entity if err := q.Select(fields).Sort("_id").All(&entities); err != nil { return nil, errgo.Notef(err, "cannot retrieve the related charms") } // If no entities are found there is no need for further processing the // results. if len(entities) == 0 { return ¶ms.RelatedResponse{}, nil } // Build the results, by grouping entities based on their relations' roles // and interfaces. includes := flags["include"] requires, err := h.getRelatedCharmsResponse(entity.CharmProvidedInterfaces, entities, func(e *mongodoc.Entity) []string { return e.CharmRequiredInterfaces }, includes, req) if err != nil { return nil, errgo.Notef(err, "cannot retrieve the charm requires") } provides, err := h.getRelatedCharmsResponse(entity.CharmRequiredInterfaces, entities, func(e *mongodoc.Entity) []string { return e.CharmProvidedInterfaces }, includes, req) if err != nil { return nil, errgo.Notef(err, "cannot retrieve the charm provides") } // Return the response. return ¶ms.RelatedResponse{ Requires: requires, Provides: provides, }, nil } type entityRelatedInterfacesGetter func(*mongodoc.Entity) []string // getRelatedCharmsResponse returns a response mapping interfaces to related // charms. For instance: // map[string][]params.MetaAnyResponse{ // "http": []params.MetaAnyResponse{ // {Id: "cs:utopic/django-42", Meta: ...}, // {Id: "cs:trusty/wordpress-47", Meta: ...}, // }, // "memcache": []params.MetaAnyResponse{ // {Id: "cs:utopic/memcached-0", Meta: ...}, // }, // } func (h ReqHandler) getRelatedCharmsResponse( ifaces []string, entities []*mongodoc.Entity, getInterfaces entityRelatedInterfacesGetter, includes []string, req *http.Request, ) (map[string][]params.EntityResult, error) { results := make(map[string][]params.EntityResult, len(ifaces)) for _, iface := range ifaces { responses, err := h.getRelatedIfaceResponses(iface, entities, getInterfaces, includes, req) if err != nil { return nil, err } if len(responses) > 0 { results[iface] = responses } } return results, nil } func (h ReqHandler) getRelatedIfaceResponses( iface string, entities []*mongodoc.Entity, getInterfaces entityRelatedInterfacesGetter, includes []string, req *http.Request, ) ([]params.EntityResult, error) { // Build a list of responses including only entities which are related // to the given interface. usesInterface := func(e *mongodoc.Entity) bool { for _, entityIface := range getInterfaces(e) { if entityIface == iface { return true } } return false } resp, err := h.getMetadataForEntities(entities, includes, req, usesInterface) if err != nil { return nil, errgo.Mask(err) } return resp, nil } func (h ReqHandler) getMetadataForEntities(entities []*mongodoc.Entity, includes []string, req *http.Request, includeEntity func(*mongodoc.Entity) bool) ([]params.EntityResult, error) { response := make([]params.EntityResult, 0, len(entities)) for _, inc := range includes { if h.Router.MetaHandler(inc) == nil { return nil, errgo.Newf("unrecognized metadata name %q", inc) } } err := expandMultiSeries(entities, func(series string, e *mongodoc.Entity) error { if includeEntity != nil && !includeEntity(e) { return nil } meta, err := h.getMetadataForEntity(e, includes, req) if err == errMetadataUnauthorized { return nil } if err != nil { // Unfortunately it is possible to get errors here due to // internal inconsistency, so rather than throwing away // all the search results, we just log the error and move on. logger.Errorf("cannot retrieve metadata for %v: %v", e.PreferredURL(true), err) return nil } id := e.PreferredURL(true) id.Series = series response = append(response, params.EntityResult{ Id: id, Meta: meta, }) return nil }) if err != nil { return nil, errgo.Mask(err) } return response, nil } var errMetadataUnauthorized = errgo.Newf("metadata unauthorized") func (h ReqHandler) getMetadataForEntity(e *mongodoc.Entity, includes []string, req *http.Request) (map[string]interface{}, error) { rurl := charmstore.EntityResolvedURL(e) // Ignore entities that aren't readable by the current user. if err := h.AuthorizeEntity(rurl, req); err != nil { return nil, errMetadataUnauthorized } return h.Router.GetMetadata(rurl, includes, req) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/package_test.go0000664000175000017500000000046212672604603026524 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v4_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" import ( "testing" jujutesting "github.com/juju/testing" ) func TestPackage(t *testing.T) { jujutesting.MgoTestPackage(t, nil) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/list_test.go0000664000175000017500000004030412672604603026103 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v4_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" import ( "bytes" "encoding/json" "net/http" "sort" "strings" "github.com/juju/loggo" jc "github.com/juju/testing/checkers" "github.com/juju/testing/httptesting" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/macaroon.v1" "gopkg.in/juju/charmstore.v5-unstable/internal/router" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" ) type ListSuite struct { commonSuite } var _ = gc.Suite(&ListSuite{}) var exportListTestCharms = map[string]*router.ResolvedURL{ "wordpress": newResolvedURL("cs:~charmers/precise/wordpress-23", 23), "mysql": newResolvedURL("cs:~openstack-charmers/trusty/mysql-7", 7), "varnish": newResolvedURL("cs:~foo/trusty/varnish-1", -1), "riak": newResolvedURL("cs:~charmers/trusty/riak-67", 67), } var exportListTestBundles = map[string]*router.ResolvedURL{ "wordpress-simple": newResolvedURL("cs:~charmers/bundle/wordpress-simple-4", 4), } func (s *ListSuite) SetUpSuite(c *gc.C) { s.enableES = true s.enableIdentity = true s.commonSuite.SetUpSuite(c) } func (s *ListSuite) SetUpTest(c *gc.C) { s.commonSuite.SetUpTest(c) s.addCharmsToStore(c) // hide the riak charm err := s.store.SetPerms(charm.MustParseURL("cs:~charmers/riak"), "stable.read", "charmers", "test-user") c.Assert(err, gc.IsNil) err = s.store.UpdateSearch(newResolvedURL("~charmers/trusty/riak-0", 0)) c.Assert(err, gc.IsNil) err = s.esSuite.ES.RefreshIndex(s.esSuite.TestIndex) c.Assert(err, gc.IsNil) } func (s *ListSuite) addCharmsToStore(c *gc.C) { for name, id := range exportListTestCharms { s.addPublicCharm(c, getListCharm(name), id) } for name, id := range exportListTestBundles { s.addPublicBundle(c, getListBundle(name), id, false) } } func getListCharm(name string) *storetesting.Charm { ca := storetesting.Charms.CharmDir(name) meta := ca.Meta() meta.Categories = append(strings.Split(name, "-"), "bar") return storetesting.NewCharm(meta) } func getListBundle(name string) *storetesting.Bundle { ba := storetesting.Charms.BundleDir(name) data := ba.Data() data.Tags = append(strings.Split(name, "-"), "baz") return storetesting.NewBundle(data) } func (s *ListSuite) TestSuccessfulList(c *gc.C) { tests := []struct { about string query string results []*router.ResolvedURL }{{ about: "bare list", query: "", results: []*router.ResolvedURL{ exportTestBundles["wordpress-simple"], exportTestCharms["wordpress"], exportTestCharms["varnish"], exportTestCharms["mysql"], }, }, { about: "name filter list", query: "name=mysql", results: []*router.ResolvedURL{ exportTestCharms["mysql"], }, }, { about: "owner filter list", query: "owner=foo", results: []*router.ResolvedURL{ exportTestCharms["varnish"], }, }, { about: "series filter list", query: "series=trusty", results: []*router.ResolvedURL{ exportTestCharms["varnish"], exportTestCharms["mysql"], }, }, { about: "type filter list", query: "type=bundle", results: []*router.ResolvedURL{ exportTestBundles["wordpress-simple"], }, }, { about: "promulgated", query: "promulgated=1", results: []*router.ResolvedURL{ exportTestBundles["wordpress-simple"], exportTestCharms["wordpress"], exportTestCharms["mysql"], }, }, { about: "not promulgated", query: "promulgated=0", results: []*router.ResolvedURL{ exportTestCharms["varnish"], }, }, { about: "promulgated with owner", query: "promulgated=1&owner=openstack-charmers", results: []*router.ResolvedURL{ exportTestCharms["mysql"], }, }} for i, test := range tests { c.Logf("test %d. %s", i, test.about) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("list?" + test.query), }) var sr params.ListResponse err := json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) c.Assert(sr.Results, gc.HasLen, len(test.results)) c.Logf("results: %s", rec.Body.Bytes()) for i := range test.results { c.Assert(sr.Results[i].Id.String(), gc.Equals, test.results[i].PreferredURL().String(), gc.Commentf("element %d")) } } } func (s *ListSuite) TestMetadataFields(c *gc.C) { tests := []struct { about string query string meta map[string]interface{} }{{ about: "archive-size", query: "name=mysql&include=archive-size", meta: map[string]interface{}{ "archive-size": params.ArchiveSizeResponse{getListCharm("mysql").Size()}, }, }, { about: "bundle-metadata", query: "name=wordpress-simple&type=bundle&include=bundle-metadata", meta: map[string]interface{}{ "bundle-metadata": getListBundle("wordpress-simple").Data(), }, }, { about: "bundle-machine-count", query: "name=wordpress-simple&type=bundle&include=bundle-machine-count", meta: map[string]interface{}{ "bundle-machine-count": params.BundleCount{2}, }, }, { about: "bundle-unit-count", query: "name=wordpress-simple&type=bundle&include=bundle-unit-count", meta: map[string]interface{}{ "bundle-unit-count": params.BundleCount{2}, }, }, { about: "charm-actions", query: "name=wordpress&type=charm&include=charm-actions", meta: map[string]interface{}{ "charm-actions": getListCharm("wordpress").Actions(), }, }, { about: "charm-config", query: "name=wordpress&type=charm&include=charm-config", meta: map[string]interface{}{ "charm-config": getListCharm("wordpress").Config(), }, }, { about: "charm-related", query: "name=wordpress&type=charm&include=charm-related", meta: map[string]interface{}{ "charm-related": params.RelatedResponse{ Provides: map[string][]params.EntityResult{ "mysql": { { Id: exportTestCharms["mysql"].PreferredURL(), }, }, "varnish": { { Id: exportTestCharms["varnish"].PreferredURL(), }, }, }, }, }, }, { about: "multiple values", query: "name=wordpress&type=charm&include=charm-related&include=charm-config", meta: map[string]interface{}{ "charm-related": params.RelatedResponse{ Provides: map[string][]params.EntityResult{ "mysql": { { Id: exportTestCharms["mysql"].PreferredURL(), }, }, "varnish": { { Id: exportTestCharms["varnish"].PreferredURL(), }, }, }, }, "charm-config": getListCharm("wordpress").Config(), }, }} for i, test := range tests { c.Logf("test %d. %s", i, test.about) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("list?" + test.query), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) var sr struct { Results []struct { Meta json.RawMessage } } err := json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) c.Assert(sr.Results, gc.HasLen, 1) c.Assert(string(sr.Results[0].Meta), jc.JSONEquals, test.meta) } } func (s *ListSuite) TestListIncludeError(c *gc.C) { // Perform a list for all charms, including the // manifest, which will try to retrieve all charm // blobs. rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("list?type=charm&include=manifest"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) var resp params.ListResponse err := json.Unmarshal(rec.Body.Bytes(), &resp) // cs:riak will not be found because it is not visible to // "everyone". c.Assert(resp.Results, gc.HasLen, len(exportListTestCharms)-1) // Now remove one of the blobs. The list should still // work, but only return a single result. entity, err := s.store.FindEntity(newResolvedURL("~charmers/precise/wordpress-23", 23), nil) c.Assert(err, gc.IsNil) err = s.store.BlobStore.Remove(entity.BlobName) c.Assert(err, gc.IsNil) // Now list again - we should get one result less // (and the error will be logged). // Register a logger that so that we can check the logging output. // It will be automatically removed later because IsolatedMgoESSuite // uses LoggingSuite. var tw loggo.TestWriter err = loggo.RegisterWriter("test-log", &tw, loggo.DEBUG) c.Assert(err, gc.IsNil) rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("list?type=charm&include=manifest"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) resp = params.ListResponse{} err = json.Unmarshal(rec.Body.Bytes(), &resp) // cs:riak will not be found because it is not visible to "everyone". // cs:wordpress will not be found because it has no manifest. c.Assert(resp.Results, gc.HasLen, len(exportListTestCharms)-2) c.Assert(tw.Log(), jc.LogMatches, []string{"cannot retrieve metadata for cs:precise/wordpress-23: cannot open archive data for cs:precise/wordpress-23: .*"}) } func (s *ListSuite) TestSortingList(c *gc.C) { tests := []struct { about string query string results []*router.ResolvedURL }{{ about: "name ascending", query: "sort=name", results: []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }, { about: "name descending", query: "sort=-name", results: []*router.ResolvedURL{ exportTestBundles["wordpress-simple"], exportTestCharms["wordpress"], exportTestCharms["varnish"], exportTestCharms["mysql"], }, }, { about: "series ascending", query: "sort=series,name", results: []*router.ResolvedURL{ exportTestBundles["wordpress-simple"], exportTestCharms["wordpress"], exportTestCharms["mysql"], exportTestCharms["varnish"], }, }, { about: "series descending", query: "sort=-series&sort=name", results: []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }, { about: "owner ascending", query: "sort=owner,name", results: []*router.ResolvedURL{ exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], exportTestCharms["varnish"], exportTestCharms["mysql"], }, }, { about: "owner descending", query: "sort=-owner&sort=name", results: []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }} for i, test := range tests { c.Logf("test %d. %s", i, test.about) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("list?" + test.query), }) var sr params.ListResponse err := json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) c.Assert(sr.Results, gc.HasLen, len(test.results), gc.Commentf("expected %#v", test.results)) c.Logf("results: %s", rec.Body.Bytes()) for i := range test.results { c.Assert(sr.Results[i].Id.String(), gc.Equals, test.results[i].PreferredURL().String(), gc.Commentf("element %d")) } } } func (s *ListSuite) TestSortUnsupportedListField(c *gc.C) { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("list?sort=text"), }) var e params.Error err := json.Unmarshal(rec.Body.Bytes(), &e) c.Assert(err, gc.IsNil) c.Assert(e.Code, gc.Equals, params.ErrBadRequest) c.Assert(e.Message, gc.Equals, "invalid sort field: unrecognized sort parameter \"text\"") } func (s *ListSuite) TestGetLatestRevisionOnly(c *gc.C) { id := newResolvedURL("cs:~charmers/precise/wordpress-24", 24) s.addPublicCharm(c, getListCharm("wordpress"), id) testresults := []*router.ResolvedURL{ exportTestBundles["wordpress-simple"], id, exportTestCharms["varnish"], exportTestCharms["mysql"], } rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("list"), }) var sr params.ListResponse err := json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) c.Assert(sr.Results, gc.HasLen, 4, gc.Commentf("expected %#v", testresults)) c.Logf("results: %s", rec.Body.Bytes()) for i := range testresults { c.Assert(sr.Results[i].Id.String(), gc.Equals, testresults[i].PreferredURL().String(), gc.Commentf("element %d")) } testresults = []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["varnish"], id, exportTestBundles["wordpress-simple"], } rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("list?sort=name"), }) err = json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) c.Assert(sr.Results, gc.HasLen, 4, gc.Commentf("expected %#v", testresults)) c.Logf("results: %s", rec.Body.Bytes()) for i := range testresults { c.Assert(sr.Results[i].Id.String(), gc.Equals, testresults[i].PreferredURL().String(), gc.Commentf("element %d")) } } func (s *ListSuite) assertPut(c *gc.C, url string, val interface{}) { body, err := json.Marshal(val) c.Assert(err, gc.IsNil) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(url), Method: "PUT", Header: http.Header{ "Content-Type": {"application/json"}, }, Username: testUsername, Password: testPassword, Body: bytes.NewReader(body), }) c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("headers: %v, body: %s", rec.HeaderMap, rec.Body.String())) c.Assert(rec.Body.String(), gc.HasLen, 0) } func (s *ListSuite) TestListWithAdminCredentials(c *gc.C) { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("list"), Username: testUsername, Password: testPassword, }) c.Assert(rec.Code, gc.Equals, http.StatusOK) expected := []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["wordpress"], exportTestCharms["riak"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], } var sr params.ListResponse err := json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) assertListResultSet(c, sr, expected) } func (s *ListSuite) TestListWithUserMacaroon(c *gc.C) { m, err := s.store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ checkers.DeclaredCaveat("username", "test-user"), }) c.Assert(err, gc.IsNil) macaroonCookie, err := httpbakery.NewCookie(macaroon.Slice{m}) c.Assert(err, gc.IsNil) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("list"), Cookies: []*http.Cookie{macaroonCookie}, }) c.Assert(rec.Code, gc.Equals, http.StatusOK) expected := []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["wordpress"], exportTestCharms["riak"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], } var sr params.ListResponse err = json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) assertListResultSet(c, sr, expected) } func (s *ListSuite) TestSearchWithBadAdminCredentialsAndACookie(c *gc.C) { m, err := s.store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ checkers.DeclaredCaveat("username", "test-user"), }) c.Assert(err, gc.IsNil) macaroonCookie, err := httpbakery.NewCookie(macaroon.Slice{m}) c.Assert(err, gc.IsNil) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("list"), Cookies: []*http.Cookie{macaroonCookie}, Username: testUsername, Password: "bad-password", }) c.Assert(rec.Code, gc.Equals, http.StatusOK) expected := []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["wordpress"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], } var sr params.ListResponse err = json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) assertListResultSet(c, sr, expected) } func assertListResultSet(c *gc.C, sr params.ListResponse, expected []*router.ResolvedURL) { sort.Sort(listResultById(sr.Results)) sort.Sort(resolvedURLByPreferredURL(expected)) c.Assert(sr.Results, gc.HasLen, len(expected), gc.Commentf("expected %#v", expected)) for i := range expected { c.Assert(sr.Results[i].Id.String(), gc.Equals, expected[i].PreferredURL().String(), gc.Commentf("element %d")) } } type listResultById []params.EntityResult func (s listResultById) Len() int { return len(s) } func (s listResultById) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s listResultById) Less(i, j int) bool { return s[i].Id.String() < s[j].Id.String() } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/common_test.go0000664000175000017500000003155212672604603026425 0ustar marcomarcopackage v4_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" import ( "bytes" "encoding/json" "io" "net/http" "net/http/httptest" "time" "github.com/juju/loggo" jujutesting "github.com/juju/testing" "github.com/juju/testing/httptesting" "github.com/julienschmidt/httprouter" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/bakerytest" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/mgo.v2" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" "gopkg.in/juju/charmstore.v5-unstable/internal/router" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" "gopkg.in/juju/charmstore.v5-unstable/internal/v4" ) var mgoLogger = loggo.GetLogger("mgo") func init() { mgo.SetLogger(mgoLog{}) } type mgoLog struct{} func (mgoLog) Output(calldepth int, s string) error { mgoLogger.LogCallf(calldepth+1, loggo.INFO, "%s", s) return nil } type commonSuite struct { jujutesting.IsolatedMgoSuite // srv holds the store HTTP handler. srv *charmstore.Server // srvParams holds the parameters that the // srv handler was started with srvParams charmstore.ServerParams // noMacaroonSrv holds the store HTTP handler // for an instance of the store without identity // enabled. If enableIdentity is false, this is // the same as srv. noMacaroonSrv *charmstore.Server // noMacaroonSrvParams holds the parameters that the // noMacaroonSrv handler was started with noMacaroonSrvParams charmstore.ServerParams // store holds an instance of *charm.Store // that can be used to access the charmstore database // directly. store *charmstore.Store // esSuite is set only when enableES is set to true. esSuite *storetesting.ElasticSearchSuite // discharge holds the function that will be used // to check third party caveats by the mock // discharger. This will be ignored if enableIdentity was // not true before commonSuite.SetUpTest is invoked. // // It may be set by tests to influence the behavior of the // discharger. discharge func(cav, arg string) ([]checkers.Caveat, error) discharger *bakerytest.Discharger idM *idM idMServer *httptest.Server // The following fields may be set before // SetUpSuite is invoked on commonSuite // and influences how the suite sets itself up. // enableIdentity holds whether the charmstore server // will be started with a configured identity service. enableIdentity bool // enableES holds whether the charmstore server will be // started with Elastic Search enabled. enableES bool // maxMgoSessions specifies the value that will be given // to config.MaxMgoSessions when calling charmstore.NewServer. maxMgoSessions int } func (s *commonSuite) SetUpSuite(c *gc.C) { s.IsolatedMgoSuite.SetUpSuite(c) if s.enableES { s.esSuite = new(storetesting.ElasticSearchSuite) s.esSuite.SetUpSuite(c) } } func (s *commonSuite) TearDownSuite(c *gc.C) { if s.esSuite != nil { s.esSuite.TearDownSuite(c) } } func (s *commonSuite) SetUpTest(c *gc.C) { s.IsolatedMgoSuite.SetUpTest(c) if s.esSuite != nil { s.esSuite.SetUpTest(c) } if s.enableIdentity { s.idM = newIdM() s.idMServer = httptest.NewServer(s.idM) } s.startServer(c) } func (s *commonSuite) TearDownTest(c *gc.C) { s.store.Pool().Close() s.store.Close() s.srv.Close() s.noMacaroonSrv.Close() if s.esSuite != nil { s.esSuite.TearDownTest(c) } if s.discharger != nil { s.discharger.Close() s.idMServer.Close() } s.IsolatedMgoSuite.TearDownTest(c) } // startServer creates a new charmstore server. func (s *commonSuite) startServer(c *gc.C) { config := charmstore.ServerParams{ AuthUsername: testUsername, AuthPassword: testPassword, StatsCacheMaxAge: time.Nanosecond, MaxMgoSessions: s.maxMgoSessions, } keyring := bakery.NewPublicKeyRing() if s.enableIdentity { s.discharge = func(_, _ string) ([]checkers.Caveat, error) { return nil, errgo.New("no discharge") } discharger := bakerytest.NewDischarger(nil, func(_ *http.Request, cond string, arg string) ([]checkers.Caveat, error) { return s.discharge(cond, arg) }) config.IdentityLocation = discharger.Location() config.IdentityAPIURL = s.idMServer.URL pk, err := httpbakery.PublicKeyForLocation(http.DefaultClient, discharger.Location()) c.Assert(err, gc.IsNil) err = keyring.AddPublicKeyForLocation(discharger.Location(), true, pk) c.Assert(err, gc.IsNil) } config.PublicKeyLocator = keyring var si *charmstore.SearchIndex if s.enableES { si = &charmstore.SearchIndex{ Database: s.esSuite.ES, Index: s.esSuite.TestIndex, } } db := s.Session.DB("charmstore") var err error s.srv, err = charmstore.NewServer(db, si, config, map[string]charmstore.NewAPIHandlerFunc{"v4": v4.NewAPIHandler}) c.Assert(err, gc.IsNil) s.srvParams = config if s.enableIdentity { config.IdentityLocation = "" config.PublicKeyLocator = nil config.IdentityAPIURL = "" s.noMacaroonSrv, err = charmstore.NewServer(db, si, config, map[string]charmstore.NewAPIHandlerFunc{"v4": v4.NewAPIHandler}) c.Assert(err, gc.IsNil) } else { s.noMacaroonSrv = s.srv } s.noMacaroonSrvParams = config s.store = s.srv.Pool().Store() } func (s *commonSuite) addPublicCharmFromRepo(c *gc.C, charmName string, rurl *router.ResolvedURL) (*router.ResolvedURL, charm.Charm) { return s.addPublicCharm(c, storetesting.Charms.CharmDir(charmName), rurl) } func (s *commonSuite) addPublicCharm(c *gc.C, ch charm.Charm, rurl *router.ResolvedURL) (*router.ResolvedURL, charm.Charm) { err := s.store.AddCharmWithArchive(rurl, ch) c.Assert(err, gc.IsNil) s.setPublic(c, rurl) return rurl, ch } func (s *commonSuite) setPublic(c *gc.C, rurl *router.ResolvedURL) { err := s.store.SetPerms(&rurl.URL, "stable.read", params.Everyone) c.Assert(err, gc.IsNil) err = s.store.Publish(rurl, params.StableChannel) c.Assert(err, gc.IsNil) } func (s *commonSuite) addPublicBundleFromRepo(c *gc.C, bundleName string, rurl *router.ResolvedURL, addRequiredCharms bool) (*router.ResolvedURL, charm.Bundle) { return s.addPublicBundle(c, storetesting.Charms.BundleDir(bundleName), rurl, addRequiredCharms) } func (s *commonSuite) addPublicBundle(c *gc.C, bundle charm.Bundle, rurl *router.ResolvedURL, addRequiredCharms bool) (*router.ResolvedURL, charm.Bundle) { if addRequiredCharms { s.addRequiredCharms(c, bundle) } err := s.store.AddBundleWithArchive(rurl, bundle) c.Assert(err, gc.IsNil) s.setPublic(c, rurl) return rurl, bundle } // addCharms adds all the given charms to s.store. The // map key is the id of the charm. func (s *commonSuite) addCharms(c *gc.C, charms map[string]charm.Charm) { for id, ch := range charms { s.addPublicCharm(c, storetesting.NewCharm(ch.Meta()), mustParseResolvedURL(id)) } } // setPerms sets the stable channel read permissions of a set of // entities. The map key is the is the id of each entity; its associated // value is its read ACL. func (s *commonSuite) setPerms(c *gc.C, readACLs map[string][]string) { for url, acl := range readACLs { err := s.store.SetPerms(charm.MustParseURL(url), "stable.read", acl...) c.Assert(err, gc.IsNil) } } // handler returns a request handler that can be // used to invoke private methods. The caller // is responsible for calling Put on the returned handler. func (s *commonSuite) handler(c *gc.C) v4.ReqHandler { h := v4.New(s.store.Pool(), s.srvParams, "") defer h.Close() rh, err := h.NewReqHandler(new(http.Request)) c.Assert(err, gc.IsNil) // It would be nice if we could call s.AddCleanup here // to call rh.Put when the test has completed, but // unfortunately CleanupSuite.TearDownTest runs // after MgoSuite.TearDownTest, so that's not an option. return rh } func storeURL(path string) string { return "/v4/" + path } // addRequiredCharms adds any charms required by the given // bundle that are not already in the store. func (s *commonSuite) addRequiredCharms(c *gc.C, bundle charm.Bundle) { for _, svc := range bundle.Data().Services { u := charm.MustParseURL(svc.Charm) if _, err := s.store.FindBestEntity(u, params.StableChannel, nil); err == nil { continue } if u.Revision == -1 { u.Revision = 0 } var rurl router.ResolvedURL rurl.URL = *u chDir, err := charm.ReadCharmDir(storetesting.Charms.CharmDirPath(u.Name)) ch := charm.Charm(chDir) if err != nil { // The charm doesn't exist in the local charm repo; make one up. ch = storetesting.NewCharm(nil) } if len(ch.Meta().Series) == 0 && u.Series == "" { rurl.URL.Series = "trusty" } if u.User == "" { rurl.URL.User = "charmers" rurl.PromulgatedRevision = rurl.URL.Revision } else { rurl.PromulgatedRevision = -1 } c.Logf("adding charm %v %d required by bundle to fulfil %v", &rurl.URL, rurl.PromulgatedRevision, svc.Charm) s.addPublicCharm(c, ch, &rurl) } } func (s *commonSuite) assertPut(c *gc.C, url string, val interface{}) { s.assertPut0(c, url, val, false) } func (s *commonSuite) assertPutAsAdmin(c *gc.C, url string, val interface{}) { s.assertPut0(c, url, val, true) } func (s *commonSuite) assertPut0(c *gc.C, url string, val interface{}, asAdmin bool) { body, err := json.Marshal(val) c.Assert(err, gc.IsNil) p := httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(url), Method: "PUT", Do: bakeryDo(nil), Header: http.Header{ "Content-Type": {"application/json"}, }, Body: bytes.NewReader(body), } if asAdmin { p.Username = testUsername p.Password = testPassword } httptesting.AssertJSONCall(c, p) } func (s *commonSuite) assertGet(c *gc.C, url string, expectVal interface{}) { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL(url), ExpectBody: expectVal, }) } // assertGetIsUnauthorized asserts that a GET to the given URL results // in an ErrUnauthorized response with the given error message. func (s *commonSuite) assertGetIsUnauthorized(c *gc.C, url, expectMessage string) { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), Method: "GET", URL: storeURL(url), ExpectStatus: http.StatusUnauthorized, ExpectBody: params.Error{ Code: params.ErrUnauthorized, Message: expectMessage, }, }) } // assertGetIsUnauthorized asserts that a PUT to the given URL with the // given body value results in an ErrUnauthorized response with the given // error message. func (s *commonSuite) assertPutIsUnauthorized(c *gc.C, url string, val interface{}, expectMessage string) { body, err := json.Marshal(val) c.Assert(err, gc.IsNil) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(url), Method: "PUT", Do: bakeryDo(nil), Header: http.Header{ "Content-Type": {"application/json"}, }, Body: bytes.NewReader(body), ExpectStatus: http.StatusUnauthorized, ExpectBody: params.Error{ Code: params.ErrUnauthorized, Message: expectMessage, }, }) } // doAsUser calls the given function, discharging any authorization // request as the given user name. func (s *commonSuite) doAsUser(user string, f func()) { old := s.discharge s.discharge = dischargeForUser(user) defer func() { s.discharge = old }() f() } func bakeryDo(client *http.Client) func(*http.Request) (*http.Response, error) { if client == nil { client = httpbakery.NewHTTPClient() } bclient := httpbakery.NewClient() bclient.Client = client return func(req *http.Request) (*http.Response, error) { if req.Body != nil { body := req.Body.(io.ReadSeeker) req.Body = nil return bclient.DoWithBody(req, body) } return bclient.Do(req) } } type idM struct { // groups may be set to determine the mapping // from user to groups for that user. groups map[string][]string // body may be set to cause serveGroups to return // an arbitrary HTTP response body. body string // contentType is the contentType to use when body is not "" contentType string // status may be set to indicate the HTTP status code // when body is not nil. status int router *httprouter.Router } func newIdM() *idM { idM := &idM{ groups: make(map[string][]string), router: httprouter.New(), } idM.router.GET("/v1/u/:user/groups", idM.serveGroups) idM.router.GET("/v1/u/:user/idpgroups", idM.serveGroups) return idM } func (idM *idM) ServeHTTP(w http.ResponseWriter, req *http.Request) { idM.router.ServeHTTP(w, req) } func (idM *idM) serveGroups(w http.ResponseWriter, req *http.Request, p httprouter.Params) { if idM.body != "" { if idM.contentType != "" { w.Header().Set("Content-Type", idM.contentType) } if idM.status != 0 { w.WriteHeader(idM.status) } w.Write([]byte(idM.body)) return } u := p.ByName("user") if u == "" { panic("no user") } w.Header().Set("Content-Type", "application/json") enc := json.NewEncoder(w) if err := enc.Encode(idM.groups[u]); err != nil { panic(err) } } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/stats_test.go0000664000175000017500000003665512672604603026304 0ustar marcomarco// Copyright 2012 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v4_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" import ( "encoding/json" "net/http" "net/url" "strings" "time" "github.com/juju/testing/httptesting" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" "gopkg.in/juju/charmstore.v5-unstable/internal/v4" ) type StatsSuite struct { commonSuite } var _ = gc.Suite(&StatsSuite{}) func (s *StatsSuite) TestServerStatsStatus(c *gc.C) { tests := []struct { path string status int message string code params.ErrorCode }{{ path: "stats/counter/", status: http.StatusForbidden, message: "forbidden", code: params.ErrForbidden, }, { path: "stats/counter/*", status: http.StatusForbidden, message: "unknown key", code: params.ErrForbidden, }, { path: "stats/counter/any/", status: http.StatusNotFound, message: "invalid key", code: params.ErrNotFound, }, { path: "stats/", status: http.StatusNotFound, message: "not found", code: params.ErrNotFound, }, { path: "stats/any", status: http.StatusNotFound, message: "not found", code: params.ErrNotFound, }, { path: "stats/counter/any?by=fortnight", status: http.StatusBadRequest, message: `invalid 'by' value "fortnight"`, code: params.ErrBadRequest, }, { path: "stats/counter/any?start=tomorrow", status: http.StatusBadRequest, message: `invalid 'start' value "tomorrow": parsing time "tomorrow" as "2006-01-02": cannot parse "tomorrow" as "2006"`, code: params.ErrBadRequest, }, { path: "stats/counter/any?stop=3", status: http.StatusBadRequest, message: `invalid 'stop' value "3": parsing time "3" as "2006-01-02": cannot parse "3" as "2006"`, code: params.ErrBadRequest, }} for i, test := range tests { c.Logf("test %d. %s", i, test.path) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(test.path), ExpectStatus: test.status, ExpectBody: params.Error{ Message: test.message, Code: test.code, }, }) } } func (s *StatsSuite) TestServerStatsUpdate(c *gc.C) { ref := charm.MustParseURL("~charmers/precise/wordpress-23") tests := []struct { path string status int body params.StatsUpdateRequest expectBody map[string]interface{} previousMonth bool }{{ path: "stats/update", status: http.StatusOK, body: params.StatsUpdateRequest{ Entries: []params.StatsUpdateEntry{{ Timestamp: time.Now(), CharmReference: charm.MustParseURL("~charmers/wordpress"), }}}, }, { path: "stats/update", status: http.StatusOK, body: params.StatsUpdateRequest{ Entries: []params.StatsUpdateEntry{{ Timestamp: time.Now(), CharmReference: ref, }}, }, }, { path: "stats/update", status: http.StatusOK, body: params.StatsUpdateRequest{ Entries: []params.StatsUpdateEntry{{ Timestamp: time.Now().AddDate(0, -1, 0), CharmReference: ref, }}, }, previousMonth: true, }} s.addPublicCharm(c, storetesting.Charms.CharmDir("wordpress"), newResolvedURL("~charmers/precise/wordpress-23", 23)) var countsBefore, countsAfter charmstore.AggregatedCounts for i, test := range tests { c.Logf("test %d. %s", i, test.path) var err error _, countsBefore, err = s.store.ArchiveDownloadCounts(ref, true) c.Assert(err, gc.IsNil) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(test.path), Method: "PUT", Username: testUsername, Password: testPassword, JSONBody: test.body, }) c.Assert(rec.Code, gc.Equals, test.status) _, countsAfter, err = s.store.ArchiveDownloadCounts(ref, true) c.Assert(err, gc.IsNil) c.Assert(countsAfter.Total-countsBefore.Total, gc.Equals, int64(1)) if test.previousMonth { c.Assert(countsAfter.LastDay-countsBefore.LastDay, gc.Equals, int64(0)) } else { c.Assert(countsAfter.LastDay-countsBefore.LastDay, gc.Equals, int64(1)) } } } func (s *StatsSuite) TestServerStatsArchiveDownloadOnPromulgatedEntity(c *gc.C) { ref := charm.MustParseURL("~charmers/precise/wordpress-23") path := "/stats/counter/archive-download:*" rurl := newResolvedURL("~charmers/precise/wordpress-23", 23) s.addPublicCharm(c, storetesting.Charms.CharmDir("wordpress"), rurl) s.store.SetPromulgated(rurl, true) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(path), Method: "GET", }) c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Body.String(), gc.Equals, `[{"Count":0}]`) rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("stats/update"), Method: "PUT", Username: testUsername, Password: testPassword, JSONBody: params.StatsUpdateRequest{ Entries: []params.StatsUpdateEntry{{ Timestamp: time.Now(), CharmReference: ref, }}}, }) c.Assert(rec.Code, gc.Equals, http.StatusOK) rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(path), Method: "GET", }) c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Body.String(), gc.Equals, `[{"Count":1}]`) } func (s *StatsSuite) TestServerStatsUpdateErrors(c *gc.C) { ref := charm.MustParseURL("~charmers/precise/wordpress-23") tests := []struct { path string status int body params.StatsUpdateRequest expectMessage string expectCode params.ErrorCode partialUpdate bool }{{ path: "stats/update", status: http.StatusInternalServerError, body: params.StatsUpdateRequest{ Entries: []params.StatsUpdateEntry{{ Timestamp: time.Now(), CharmReference: charm.MustParseURL("~charmers/precise/unknown-23"), }}, }, expectMessage: `cannot find entity for url cs:~charmers/precise/unknown-23: no matching charm or bundle for cs:~charmers/precise/unknown-23`, }, { path: "stats/update", status: http.StatusInternalServerError, body: params.StatsUpdateRequest{ Entries: []params.StatsUpdateEntry{{ Timestamp: time.Now(), CharmReference: charm.MustParseURL("~charmers/precise/unknown-23"), }, { Timestamp: time.Now(), CharmReference: charm.MustParseURL("~charmers/precise/wordpress-23"), }}, }, expectMessage: `cannot find entity for url cs:~charmers/precise/unknown-23: no matching charm or bundle for cs:~charmers/precise/unknown-23`, partialUpdate: true, }} s.addPublicCharm(c, storetesting.Charms.CharmDir("wordpress"), newResolvedURL("~charmers/precise/wordpress-23", 23)) for i, test := range tests { c.Logf("test %d. %s", i, test.path) var countsBefore charmstore.AggregatedCounts if test.partialUpdate { var err error _, countsBefore, err = s.store.ArchiveDownloadCounts(ref, true) c.Assert(err, gc.IsNil) } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(test.path), Method: "PUT", Username: testUsername, Password: testPassword, JSONBody: test.body, ExpectStatus: test.status, ExpectBody: params.Error{ Message: test.expectMessage, Code: test.expectCode, }, }) if test.partialUpdate { _, countsAfter, err := s.store.ArchiveDownloadCounts(ref, true) c.Assert(err, gc.IsNil) c.Assert(countsAfter.Total-countsBefore.Total, gc.Equals, int64(1)) c.Assert(countsAfter.LastDay-countsBefore.LastDay, gc.Equals, int64(1)) } } } func (s *StatsSuite) TestServerStatsUpdateNonAdmin(c *gc.C) { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("stats/update"), Method: "PUT", JSONBody: params.StatsUpdateRequest{ Entries: []params.StatsUpdateEntry{{ Timestamp: time.Now(), CharmReference: charm.MustParseURL("~charmers/precise/wordpress-23"), }}, }, ExpectStatus: http.StatusUnauthorized, ExpectBody: ¶ms.Error{ Message: "authentication failed: missing HTTP auth header", Code: params.ErrUnauthorized, }, }) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("stats/update"), Method: "PUT", Username: "brad", Password: "pitt", JSONBody: params.StatsUpdateRequest{ Entries: []params.StatsUpdateEntry{{ Timestamp: time.Now(), CharmReference: charm.MustParseURL("~charmers/precise/wordpress-23"), }}, }, ExpectStatus: http.StatusUnauthorized, ExpectBody: ¶ms.Error{ Message: "invalid user name or password", Code: params.ErrUnauthorized, }, }) } func (s *StatsSuite) TestStatsCounter(c *gc.C) { if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } for _, key := range [][]string{{"a", "b"}, {"a", "b"}, {"a", "c"}, {"a"}} { err := s.store.IncCounter(key) c.Assert(err, gc.IsNil) } var all []interface{} err := s.store.DB.StatCounters().Find(nil).All(&all) c.Assert(err, gc.IsNil) data, err := json.Marshal(all) c.Assert(err, gc.IsNil) c.Logf("%s", data) expected := map[string]int64{ "a:b": 2, "a:b:*": 0, "a:*": 3, "a": 1, "a:b:c": 0, } for counter, n := range expected { c.Logf("test %q", counter) url := storeURL("stats/counter/" + counter) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: url, ExpectBody: []params.Statistic{{ Count: n, }}, }) } } func (s *StatsSuite) TestStatsCounterList(c *gc.C) { if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } incs := [][]string{ {"a"}, {"a", "b"}, {"a", "b", "c"}, {"a", "b", "c"}, {"a", "b", "d"}, {"a", "b", "e"}, {"a", "f", "g"}, {"a", "f", "h"}, {"a", "i"}, {"j", "k"}, } for _, key := range incs { err := s.store.IncCounter(key) c.Assert(err, gc.IsNil) } tests := []struct { key string result []params.Statistic }{{ key: "a", result: []params.Statistic{{ Key: "a", Count: 1, }}, }, { key: "a:*", result: []params.Statistic{{ Key: "a:b:*", Count: 4, }, { Key: "a:f:*", Count: 2, }, { Key: "a:b", Count: 1, }, { Key: "a:i", Count: 1, }}, }, { key: "a:b:*", result: []params.Statistic{{ Key: "a:b:c", Count: 2, }, { Key: "a:b:d", Count: 1, }, { Key: "a:b:e", Count: 1, }}, }, { key: "a:*", result: []params.Statistic{{ Key: "a:b:*", Count: 4, }, { Key: "a:f:*", Count: 2, }, { Key: "a:b", Count: 1, }, { Key: "a:i", Count: 1, }}, }} for i, test := range tests { c.Logf("test %d: %s", i, test.key) url := storeURL("stats/counter/" + test.key + "?list=1") httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: url, ExpectBody: test.result, }) } } func (s *StatsSuite) TestStatsCounterBy(c *gc.C) { if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } incs := []struct { key []string day int }{ {[]string{"a"}, 1}, {[]string{"a"}, 1}, {[]string{"b"}, 1}, {[]string{"a", "b"}, 1}, {[]string{"a", "c"}, 1}, {[]string{"a"}, 3}, {[]string{"a", "b"}, 3}, {[]string{"b"}, 9}, {[]string{"b"}, 9}, {[]string{"a", "c", "d"}, 9}, {[]string{"a", "c", "e"}, 9}, {[]string{"a", "c", "f"}, 9}, } day := func(i int) time.Time { return time.Date(2012, time.May, i, 0, 0, 0, 0, time.UTC) } for i, inc := range incs { t := day(inc.day) // Ensure each entry is unique by adding // a sufficient increment for each test. t = t.Add(time.Duration(i) * charmstore.StatsGranularity) err := s.store.IncCounterAtTime(inc.key, t) c.Assert(err, gc.IsNil) } tests := []struct { request charmstore.CounterRequest result []params.Statistic }{{ request: charmstore.CounterRequest{ Key: []string{"a"}, Prefix: false, List: false, By: charmstore.ByDay, }, result: []params.Statistic{{ Date: "2012-05-01", Count: 2, }, { Date: "2012-05-03", Count: 1, }}, }, { request: charmstore.CounterRequest{ Key: []string{"a"}, Prefix: true, List: false, By: charmstore.ByDay, }, result: []params.Statistic{{ Date: "2012-05-01", Count: 2, }, { Date: "2012-05-03", Count: 1, }, { Date: "2012-05-09", Count: 3, }}, }, { request: charmstore.CounterRequest{ Key: []string{"a"}, Prefix: true, List: false, By: charmstore.ByDay, Start: time.Date(2012, 5, 2, 0, 0, 0, 0, time.UTC), }, result: []params.Statistic{{ Date: "2012-05-03", Count: 1, }, { Date: "2012-05-09", Count: 3, }}, }, { request: charmstore.CounterRequest{ Key: []string{"a"}, Prefix: true, List: false, By: charmstore.ByDay, Stop: time.Date(2012, 5, 4, 0, 0, 0, 0, time.UTC), }, result: []params.Statistic{{ Date: "2012-05-01", Count: 2, }, { Date: "2012-05-03", Count: 1, }}, }, { request: charmstore.CounterRequest{ Key: []string{"a"}, Prefix: true, List: false, By: charmstore.ByDay, Start: time.Date(2012, 5, 3, 0, 0, 0, 0, time.UTC), Stop: time.Date(2012, 5, 3, 0, 0, 0, 0, time.UTC), }, result: []params.Statistic{{ Date: "2012-05-03", Count: 1, }}, }, { request: charmstore.CounterRequest{ Key: []string{"a"}, Prefix: true, List: true, By: charmstore.ByDay, }, result: []params.Statistic{{ Key: "a:b", Date: "2012-05-01", Count: 1, }, { Key: "a:c", Date: "2012-05-01", Count: 1, }, { Key: "a:b", Date: "2012-05-03", Count: 1, }, { Key: "a:c:*", Date: "2012-05-09", Count: 3, }}, }, { request: charmstore.CounterRequest{ Key: []string{"a"}, Prefix: true, List: false, By: charmstore.ByWeek, }, result: []params.Statistic{{ Date: "2012-05-06", Count: 3, }, { Date: "2012-05-13", Count: 3, }}, }, { request: charmstore.CounterRequest{ Key: []string{"a"}, Prefix: true, List: true, By: charmstore.ByWeek, }, result: []params.Statistic{{ Key: "a:b", Date: "2012-05-06", Count: 2, }, { Key: "a:c", Date: "2012-05-06", Count: 1, }, { Key: "a:c:*", Date: "2012-05-13", Count: 3, }}, }} for i, test := range tests { flags := make(url.Values) url := storeURL("stats/counter/" + strings.Join(test.request.Key, ":")) if test.request.Prefix { url += ":*" } if test.request.List { flags.Set("list", "1") } if !test.request.Start.IsZero() { flags.Set("start", test.request.Start.Format("2006-01-02")) } if !test.request.Stop.IsZero() { flags.Set("stop", test.request.Stop.Format("2006-01-02")) } switch test.request.By { case charmstore.ByDay: flags.Set("by", "day") case charmstore.ByWeek: flags.Set("by", "week") } if len(flags) > 0 { url += "?" + flags.Encode() } c.Logf("test %d: %s", i, url) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: url, ExpectBody: test.result, }) } } func (s *StatsSuite) TestStatsEnabled(c *gc.C) { statsEnabled := func(url string) bool { req, _ := http.NewRequest("GET", url, nil) return v4.StatsEnabled(req) } c.Assert(statsEnabled("http://foo.com"), gc.Equals, true) c.Assert(statsEnabled("http://foo.com?stats=1"), gc.Equals, true) c.Assert(statsEnabled("http://foo.com?stats=0"), gc.Equals, false) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/router/0000775000175000017500000000000012672604603024530 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/router/router_test.go0000664000175000017500000021724212672604603027446 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package router // import "gopkg.in/juju/charmstore.v5-unstable/internal/router" import ( "bytes" "encoding/json" "fmt" "net/http" "net/http/httptest" "net/url" "sort" "strings" "sync" "sync/atomic" "github.com/juju/httprequest" jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/testing/httptesting" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/juju/charmstore.v5-unstable/audit" ) type RouterSuite struct { jujutesting.IsolationSuite } var _ = gc.Suite(&RouterSuite{}) var newResolvedURL = MustNewResolvedURL var routerGetTests = []struct { about string handlers Handlers urlStr string expectStatus int expectBody interface{} expectQueryCount int32 expectWillIncludeMetadata []string resolveURL func(*charm.URL) (*ResolvedURL, error) authorize func(*ResolvedURL, *http.Request) error exists func(*ResolvedURL, *http.Request) (bool, error) }{{ about: "global handler", handlers: Handlers{ Global: map[string]http.Handler{ "foo": HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { return ReqInfo{ Method: req.Method, Path: req.URL.Path, Form: req.Form, }, nil }), }, }, urlStr: "/foo", expectStatus: http.StatusOK, expectBody: ReqInfo{ Method: "GET", Path: "", }, }, { about: "global handler with sub-path and flags", handlers: Handlers{ Global: map[string]http.Handler{ "foo/bar/": HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { return ReqInfo{ Method: req.Method, Path: req.URL.Path, Form: req.Form, }, nil }), }, }, urlStr: "/foo/bar/a/b?a=1&b=two", expectStatus: http.StatusOK, expectBody: ReqInfo{ Path: "/a/b", Method: "GET", Form: url.Values{ "a": {"1"}, "b": {"two"}, }, }, }, { about: "invalid form", urlStr: "/foo?a=%", expectStatus: http.StatusInternalServerError, expectBody: params.Error{ Message: `cannot parse form: invalid URL escape "%"`, }, }, { about: "id handler", handlers: Handlers{ Id: map[string]IdHandler{ "foo": testIdHandler, }, }, urlStr: "/precise/wordpress-34/foo", expectStatus: http.StatusOK, expectBody: idHandlerTestResp{ Method: "GET", CharmURL: "cs:precise/wordpress-34", }, }, { about: "development id handler", handlers: Handlers{ Id: map[string]IdHandler{ "foo": testIdHandler, }, }, urlStr: "/development/trusty/wordpress-34/foo", expectStatus: http.StatusNotFound, expectBody: params.Error{ Code: params.ErrNotFound, Message: "not found", }, }, { about: "id handler with invalid channel", handlers: Handlers{ Id: map[string]IdHandler{ "foo": testIdHandler, }, }, urlStr: "/bad-wolf/trusty/wordpress-34/foo", expectStatus: http.StatusNotFound, expectBody: params.Error{ Code: params.ErrNotFound, Message: "not found", }, }, { about: "windows id handler", handlers: Handlers{ Id: map[string]IdHandler{ "foo": testIdHandler, }, }, urlStr: "/win81/visualstudio-2012/foo", expectStatus: http.StatusOK, expectBody: idHandlerTestResp{ Method: "GET", CharmURL: "cs:win81/visualstudio-2012", }, }, { about: "wily id handler", handlers: Handlers{ Id: map[string]IdHandler{ "foo": testIdHandler, }, }, urlStr: "/wily/wordpress-34/foo", expectStatus: http.StatusOK, expectBody: idHandlerTestResp{ Method: "GET", CharmURL: "cs:wily/wordpress-34", }, }, { about: "id handler with no series in id", handlers: Handlers{ Id: map[string]IdHandler{ "foo": testIdHandler, }, }, urlStr: "/wordpress-34/foo", expectStatus: http.StatusOK, expectBody: idHandlerTestResp{ Method: "GET", CharmURL: "cs:wordpress-34", }, }, { about: "id handler with no revision in id", handlers: Handlers{ Id: map[string]IdHandler{ "foo": testIdHandler, }, }, urlStr: "/precise/wordpress/foo", expectStatus: http.StatusOK, expectBody: idHandlerTestResp{ Method: "GET", CharmURL: "cs:precise/wordpress", }, }, { about: "id handler with channel and name only", handlers: Handlers{ Id: map[string]IdHandler{ "foo": testIdHandler, }, }, urlStr: "/development/wordpress/foo", expectStatus: http.StatusNotFound, expectBody: params.Error{ Code: params.ErrNotFound, Message: "not found", }, }, { about: "id handler with extra path", handlers: Handlers{ Id: map[string]IdHandler{ "foo/": testIdHandler, }, }, urlStr: "/precise/wordpress-34/foo/blah/arble", expectStatus: http.StatusOK, expectBody: idHandlerTestResp{ Method: "GET", CharmURL: "cs:precise/wordpress-34", Path: "/blah/arble", }, }, { about: "id handler with allowed extra path but none given", handlers: Handlers{ Id: map[string]IdHandler{ "foo/": testIdHandler, }, }, urlStr: "/precise/wordpress-34/foo", expectStatus: http.StatusNotFound, expectBody: params.Error{ Code: params.ErrNotFound, Message: "not found", }, }, { about: "id handler with unwanted extra path", handlers: Handlers{ Id: map[string]IdHandler{ "foo": testIdHandler, }, }, urlStr: "/precise/wordpress-34/foo/blah", expectStatus: http.StatusNotFound, expectBody: params.Error{ Code: params.ErrNotFound, Message: "not found", }, }, { about: "id handler with user", handlers: Handlers{ Id: map[string]IdHandler{ "foo": testIdHandler, }, }, urlStr: "/~joe/precise/wordpress-34/foo", expectStatus: http.StatusOK, expectBody: idHandlerTestResp{ Method: "GET", CharmURL: "cs:~joe/precise/wordpress-34", }, }, { about: "wily handler with user", handlers: Handlers{ Id: map[string]IdHandler{ "foo": testIdHandler, }, }, urlStr: "/~joe/wily/wordpress-34/foo", expectStatus: http.StatusOK, expectBody: idHandlerTestResp{ Method: "GET", CharmURL: "cs:~joe/wily/wordpress-34", }, }, { about: "id handler with user and extra path", handlers: Handlers{ Id: map[string]IdHandler{ "foo/": testIdHandler, }, }, urlStr: "/~joe/precise/wordpress-34/foo/blah/arble", expectStatus: http.StatusOK, expectBody: idHandlerTestResp{ Method: "GET", CharmURL: "cs:~joe/precise/wordpress-34", Path: "/blah/arble", }, }, { about: "development id handler with user and extra path", handlers: Handlers{ Id: map[string]IdHandler{ "foo/": testIdHandler, }, }, urlStr: "/~joe/development/precise/wordpress-34/foo/blah/arble", expectStatus: http.StatusNotFound, expectBody: params.Error{ Code: params.ErrNotFound, Message: "not found", }, }, { about: "id handler with user, invalid channel and extra path", handlers: Handlers{ Id: map[string]IdHandler{ "foo/": testIdHandler, }, }, urlStr: "/~joe/bad-wolf/precise/wordpress-34/foo/blah/arble", expectStatus: http.StatusNotFound, expectBody: params.Error{ Code: params.ErrNotFound, Message: "not found", }, }, { about: "id handler that returns an error", handlers: Handlers{ Id: map[string]IdHandler{ "foo/": errorIdHandler, }, }, urlStr: "/~joe/precise/wordpress-34/foo/blah/arble", expectStatus: http.StatusInternalServerError, expectBody: params.Error{ Message: "errorIdHandler error", }, }, { about: "id handler that returns a not-found error", handlers: Handlers{ Id: map[string]IdHandler{ "foo": func(charmId *charm.URL, w http.ResponseWriter, req *http.Request) error { return params.ErrNotFound }, }, }, urlStr: "/~joe/precise/wordpress-34/foo", expectStatus: http.StatusNotFound, expectBody: params.Error{ Message: "not found", Code: params.ErrNotFound, }, }, { about: "id handler that returns some other kind of coded error", handlers: Handlers{ Id: map[string]IdHandler{ "foo": func(charmId *charm.URL, w http.ResponseWriter, req *http.Request) error { return errgo.WithCausef(nil, params.ErrorCode("foo"), "a message") }, }, }, urlStr: "/~joe/precise/wordpress-34/foo", expectStatus: http.StatusInternalServerError, expectBody: params.Error{ Message: "a message", Code: "foo", }, }, { about: "id with unspecified series and revision, not resolved", handlers: Handlers{ Id: map[string]IdHandler{ "foo": testIdHandler, }, }, urlStr: "/~joe/wordpress/foo", resolveURL: resolveTo("precise", 34), expectStatus: http.StatusOK, expectBody: idHandlerTestResp{ Method: "GET", CharmURL: "cs:~joe/wordpress", }, }, { about: "id with error on resolving", handlers: Handlers{ Id: map[string]IdHandler{ "foo": testIdHandler, }, }, urlStr: "/wordpress/meta", resolveURL: resolveURLError(errgo.New("resolve URL error")), expectStatus: http.StatusInternalServerError, expectBody: params.Error{ Message: "resolve URL error", }, }, { about: "id with error on resolving that has a Cause", handlers: Handlers{ Id: map[string]IdHandler{ "foo": testIdHandler, }, }, urlStr: "/wordpress/meta", resolveURL: resolveURLError(params.ErrNotFound), expectStatus: http.StatusNotFound, expectBody: params.Error{ Message: "not found", Code: params.ErrNotFound, }, }, { about: "meta list", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), "bar": testMetaHandler(1), "bar/": testMetaHandler(2), "foo/": testMetaHandler(3), "baz": testMetaHandler(4), }, }, urlStr: "/precise/wordpress-42/meta", expectStatus: http.StatusOK, expectBody: []string{"bar", "baz", "foo"}, }, { about: "meta list at root", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), "bar": testMetaHandler(1), "bar/": testMetaHandler(2), "foo/": testMetaHandler(3), "baz": testMetaHandler(4), }, }, urlStr: "/meta", expectStatus: http.StatusOK, expectBody: []string{"bar", "baz", "foo"}, }, { about: "meta list at root with trailing /", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), "bar": testMetaHandler(1), "bar/": testMetaHandler(2), "foo/": testMetaHandler(3), "baz": testMetaHandler(4), }, }, urlStr: "/meta/", expectStatus: http.StatusOK, expectBody: []string{"bar", "baz", "foo"}, }, { about: "meta handler", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), }, }, urlStr: "/precise/wordpress-42/meta/foo", expectWillIncludeMetadata: []string{"foo"}, expectStatus: http.StatusOK, expectBody: &metaHandlerTestResp{ CharmURL: "cs:precise/wordpress-42", }, }, { about: "meta handler with additional elements", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo/": testMetaHandler(0), }, }, urlStr: "/precise/wordpress-42/meta/foo/bar/baz", expectWillIncludeMetadata: []string{"foo/bar/baz"}, expectStatus: http.StatusOK, expectBody: metaHandlerTestResp{ CharmURL: "cs:precise/wordpress-42", Path: "/bar/baz", }, }, { about: "meta handler with params", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), }, }, urlStr: "/precise/wordpress-42/meta/foo?one=a&two=b&one=c", expectWillIncludeMetadata: []string{"foo"}, expectStatus: http.StatusOK, expectBody: metaHandlerTestResp{ CharmURL: "cs:precise/wordpress-42", Flags: url.Values{ "one": {"a", "c"}, "two": {"b"}, }, }, }, { about: "meta handler that's not found", urlStr: "/precise/wordpress-42/meta/foo", expectWillIncludeMetadata: []string{"foo"}, expectStatus: http.StatusNotFound, expectBody: params.Error{ Code: params.ErrNotFound, Message: `unknown metadata "foo"`, }, }, { about: "meta sub-handler that's not found", urlStr: "/precise/wordpress-42/meta/foo/bar", expectWillIncludeMetadata: []string{"foo/bar"}, expectStatus: http.StatusNotFound, expectBody: params.Error{ Code: params.ErrNotFound, Message: `unknown metadata "foo/bar"`, }, }, { about: "meta handler with nil data", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": constMetaHandler(nil), }, }, urlStr: "/precise/wordpress-42/meta/foo", expectWillIncludeMetadata: []string{"foo"}, expectStatus: http.StatusNotFound, expectBody: params.Error{ Code: params.ErrMetadataNotFound, Message: "metadata not found", }, }, { about: "meta handler with typed nil data", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": constMetaHandler((*struct{})(nil)), }, }, urlStr: "/precise/wordpress-42/meta/foo", expectWillIncludeMetadata: []string{"foo"}, expectStatus: http.StatusNotFound, expectBody: params.Error{ Code: params.ErrMetadataNotFound, Message: "metadata not found", }, }, { about: "meta handler with field selector", urlStr: "/precise/wordpress-42/meta/foo", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": fieldSelectHandler("handler1", 0, "field1", "field2"), }, }, expectWillIncludeMetadata: []string{"foo"}, expectStatus: http.StatusOK, expectQueryCount: 1, expectBody: fieldSelectHandleGetInfo{ HandlerId: "handler1", Doc: fieldSelectQueryInfo{ Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), Selector: map[string]int{"field1": 1, "field2": 1}, }, Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), }, }, { about: "meta handler returning error with code", urlStr: "/precise/wordpress-42/meta/foo", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": errorMetaHandler(errgo.WithCausef(nil, params.ErrorCode("arble"), "a message")), }, }, expectWillIncludeMetadata: []string{"foo"}, expectStatus: http.StatusInternalServerError, expectBody: params.Error{ Code: "arble", Message: "a message", }, }, { about: "unauthorized meta handler", urlStr: "/precise/wordpress-42/meta/foo", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), }, }, authorize: neverAuthorize, expectWillIncludeMetadata: []string{"foo"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: "bad wolf", }, }, { about: "meta/any, no includes, id exists", urlStr: "/precise/wordpress-42/meta/any", expectStatus: http.StatusOK, expectBody: params.MetaAnyResponse{ Id: charm.MustParseURL("cs:precise/wordpress-42"), }, }, { about: "meta/any, no includes, id does not exist", urlStr: "/precise/wordpress/meta/any", resolveURL: resolveURLError(params.ErrNotFound), expectStatus: http.StatusNotFound, expectBody: params.Error{ Code: params.ErrNotFound, Message: "not found", }, }, { about: "meta/any, some includes all using same key", urlStr: "/precise/wordpress-42/meta/any?include=field1-1&include=field2&include=field1-2", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "field1-1": fieldSelectHandler("handler1", 0, "field1"), "field2": fieldSelectHandler("handler2", 0, "field2"), "field1-2": fieldSelectHandler("handler3", 0, "field1"), }, }, expectWillIncludeMetadata: []string{"field1-1", "field2", "field1-2"}, expectQueryCount: 1, expectStatus: http.StatusOK, expectBody: params.MetaAnyResponse{ Id: charm.MustParseURL("cs:precise/wordpress-42"), Meta: map[string]interface{}{ "field1-1": fieldSelectHandleGetInfo{ HandlerId: "handler1", Doc: fieldSelectQueryInfo{ Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), Selector: map[string]int{"field1": 1, "field2": 1}, }, Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), }, "field2": fieldSelectHandleGetInfo{ HandlerId: "handler2", Doc: fieldSelectQueryInfo{ Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), Selector: map[string]int{"field1": 1, "field2": 1}, }, Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), }, "field1-2": fieldSelectHandleGetInfo{ HandlerId: "handler3", Doc: fieldSelectQueryInfo{ Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), Selector: map[string]int{"field1": 1, "field2": 1}, }, Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), }, }, }, }, { about: "meta/any, includes with additional path elements", urlStr: "/precise/wordpress-42/meta/any?include=item1/foo&include=item2/bar&include=item1", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "item1/": fieldSelectHandler("handler1", 0, "field1"), "item2/": fieldSelectHandler("handler2", 0, "field2"), "item1": fieldSelectHandler("handler3", 0, "field3"), }, }, expectWillIncludeMetadata: []string{"item1/foo", "item2/bar", "item1"}, expectQueryCount: 1, expectStatus: http.StatusOK, expectBody: params.MetaAnyResponse{ Id: charm.MustParseURL("cs:precise/wordpress-42"), Meta: map[string]interface{}{ "item1/foo": fieldSelectHandleGetInfo{ HandlerId: "handler1", Doc: fieldSelectQueryInfo{ Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), Selector: map[string]int{"field1": 1, "field2": 1, "field3": 1}, }, Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), Path: "/foo", }, "item2/bar": fieldSelectHandleGetInfo{ HandlerId: "handler2", Doc: fieldSelectQueryInfo{ Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), Selector: map[string]int{"field1": 1, "field2": 1, "field3": 1}, }, Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), Path: "/bar", }, "item1": fieldSelectHandleGetInfo{ HandlerId: "handler3", Doc: fieldSelectQueryInfo{ Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), Selector: map[string]int{"field1": 1, "field2": 1, "field3": 1}, }, Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), }, }, }, }, { about: "meta/any, nil metadata omitted", urlStr: "/precise/wordpress-42/meta/any?include=ok&include=nil", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "ok": testMetaHandler(0), "nil": constMetaHandler(nil), "typednil": constMetaHandler((*struct{})(nil)), }, }, expectWillIncludeMetadata: []string{"ok", "nil"}, expectStatus: http.StatusOK, expectBody: params.MetaAnyResponse{ Id: charm.MustParseURL("cs:precise/wordpress-42"), Meta: map[string]interface{}{ "ok": metaHandlerTestResp{ CharmURL: "cs:precise/wordpress-42", }, }, }, }, { about: "meta/any, handler returns error with cause", urlStr: "/precise/wordpress-42/meta/any?include=error", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "error": errorMetaHandler(errgo.WithCausef(nil, params.ErrorCode("foo"), "a message")), }, }, expectWillIncludeMetadata: []string{"error"}, expectStatus: http.StatusInternalServerError, expectBody: params.Error{ Code: "foo", Message: "a message", }, }, { about: "bulk meta handler, single id", urlStr: "/meta/foo?id=precise/wordpress-42", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), }, }, expectWillIncludeMetadata: []string{"foo"}, expectStatus: http.StatusOK, expectBody: map[string]metaHandlerTestResp{ "precise/wordpress-42": { CharmURL: "cs:precise/wordpress-42", }, }, }, { about: "bulk meta handler, single id with invalid channel", urlStr: "/meta/foo?id=~user/bad-wolf/wily/wordpress-42", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), }, }, expectWillIncludeMetadata: []string{"foo"}, expectStatus: http.StatusBadRequest, expectBody: params.Error{ Code: params.ErrBadRequest, Message: `bad request: charm or bundle URL has invalid form: "~user/bad-wolf/wily/wordpress-42"`, }, }, { about: "bulk meta handler, several ids", urlStr: "/meta/foo?id=precise/wordpress-42&id=utopic/foo-32&id=django", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), }, }, expectWillIncludeMetadata: []string{"foo"}, expectStatus: http.StatusOK, expectBody: map[string]metaHandlerTestResp{ "precise/wordpress-42": { CharmURL: "cs:precise/wordpress-42", }, "utopic/foo-32": { CharmURL: "cs:utopic/foo-32", }, "django": { CharmURL: "cs:precise/django-0", }, }, }, { about: "bulk meta/any handler, several ids", urlStr: "/meta/any?id=precise/wordpress-42&id=utopic/foo-32&id=django-47&include=foo&include=bar/something", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), "bar/": testMetaHandler(1), }, }, expectWillIncludeMetadata: []string{"foo", "bar/something"}, expectStatus: http.StatusOK, expectBody: map[string]params.MetaAnyResponse{ "precise/wordpress-42": { Id: charm.MustParseURL("cs:precise/wordpress-42"), Meta: map[string]interface{}{ "foo": metaHandlerTestResp{ CharmURL: "cs:precise/wordpress-42", }, "bar/something": metaHandlerTestResp{ CharmURL: "cs:precise/wordpress-42", Path: "/something", }, }, }, "utopic/foo-32": { Id: charm.MustParseURL("cs:utopic/foo-32"), Meta: map[string]interface{}{ "foo": metaHandlerTestResp{ CharmURL: "cs:utopic/foo-32", }, "bar/something": metaHandlerTestResp{ CharmURL: "cs:utopic/foo-32", Path: "/something", }, }, }, "django-47": { Id: charm.MustParseURL("cs:precise/django-47"), Meta: map[string]interface{}{ "foo": metaHandlerTestResp{ CharmURL: "cs:precise/django-47", }, "bar/something": metaHandlerTestResp{ CharmURL: "cs:precise/django-47", Path: "/something", }, }, }, }, }, { about: "bulk meta/any handler, several ids, invalid channel", urlStr: "/meta/any?id=precise/wordpress-42&id=staging/trusty/django&include=foo&include=bar/something", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), "bar/": testMetaHandler(1), }, }, expectWillIncludeMetadata: []string{"foo", "bar/something"}, expectStatus: http.StatusBadRequest, expectBody: params.Error{ Code: params.ErrBadRequest, Message: `bad request: charm or bundle URL has invalid form: "staging/trusty/django"`, }, }, { about: "bulk meta/any handler, discharge required", urlStr: "/meta/any?id=precise/wordpress-42&include=foo", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), }, }, authorize: dischargeRequiredAuthorize, expectWillIncludeMetadata: []string{"foo"}, expectStatus: http.StatusInternalServerError, expectBody: params.Error{ Message: "discharge required", }, }, { about: "bulk meta/any handler, discharge required, ignore authorization", urlStr: "/meta/any?id=precise/wordpress-42&include=foo&ignore-auth=1", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), }, }, authorize: dischargeRequiredAuthorize, expectWillIncludeMetadata: []string{"foo"}, expectStatus: http.StatusOK, expectBody: map[string]params.MetaAnyResponse{}, }, { about: "bulk meta/any handler, some unauthorized, ignore authorization", urlStr: "/meta/any?id=precise/wordpress-42&id=utopic/foo-32&include=foo&ignore-auth=1", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), }, }, authorize: dischargeRequiredAuthorize, expectWillIncludeMetadata: []string{"foo"}, expectStatus: http.StatusOK, expectBody: map[string]params.MetaAnyResponse{ "utopic/foo-32": { Id: charm.MustParseURL("cs:utopic/foo-32"), Meta: map[string]interface{}{ "foo": metaHandlerTestResp{ CharmURL: "cs:utopic/foo-32", }, }, }, }, }, { about: "bulk meta/any handler, unauthorized", urlStr: "/meta/any?id=precise/wordpress-42&include=foo", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), }, }, authorize: neverAuthorize, expectWillIncludeMetadata: []string{"foo"}, expectStatus: http.StatusInternalServerError, expectBody: params.Error{ Message: "bad wolf", }, }, { about: "bulk meta/any handler, unauthorized, ignore authorization", urlStr: "/meta/any?id=precise/wordpress-42&include=foo&ignore-auth=1", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), }, }, authorize: neverAuthorize, expectWillIncludeMetadata: []string{"foo"}, expectStatus: http.StatusOK, expectBody: map[string]params.MetaAnyResponse{}, }, { about: "bulk meta/any handler, invalid ignore-auth flag", urlStr: "/meta/any?id=precise/wordpress-42&include=foo&ignore-auth=meh", expectStatus: http.StatusBadRequest, expectBody: params.Error{ Code: params.ErrBadRequest, Message: `bad request: unexpected bool value "meh" (must be "0" or "1")`, }, }, { about: "bulk meta handler with unresolved id", urlStr: "/meta/foo/bar?id=wordpress", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo/": testMetaHandler(0), }, }, resolveURL: resolveTo("precise", 100), expectWillIncludeMetadata: []string{"foo/bar"}, expectStatus: http.StatusOK, expectBody: map[string]metaHandlerTestResp{ "wordpress": { CharmURL: "cs:precise/wordpress-100", Path: "/bar", }, }, }, { about: "bulk meta handler with extra flags", urlStr: "/meta/foo/bar?id=wordpress&arble=bletch&z=w&z=p", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo/": testMetaHandler(0), }, }, resolveURL: resolveTo("precise", 100), expectWillIncludeMetadata: []string{"foo/bar"}, expectStatus: http.StatusOK, expectBody: map[string]metaHandlerTestResp{ "wordpress": { CharmURL: "cs:precise/wordpress-100", Path: "/bar", Flags: url.Values{ "arble": {"bletch"}, "z": {"w", "p"}, }, }, }, }, { about: "bulk meta handler with no ids", urlStr: "/meta/foo/bar", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo/": testMetaHandler(0), }, }, expectStatus: http.StatusBadRequest, expectBody: params.Error{ Code: params.ErrBadRequest, Message: "no ids specified in meta request", }, }, { about: "bulk meta handler with unresolvable id", urlStr: "/meta/foo?id=unresolved&id=~foo/precise/wordpress-23", resolveURL: func(url *charm.URL) (*ResolvedURL, error) { if url.Name == "unresolved" { return nil, params.ErrNotFound } return &ResolvedURL{URL: *url, PromulgatedRevision: 99}, nil }, handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), }, }, expectWillIncludeMetadata: []string{"foo"}, expectStatus: http.StatusOK, expectBody: map[string]metaHandlerTestResp{ "~foo/precise/wordpress-23": { CharmURL: "cs:precise/wordpress-99", }, }, }, { about: "bulk meta handler with id resolution error", urlStr: "/meta/foo?id=resolveerror&id=precise/wordpress-23", resolveURL: func(url *charm.URL) (*ResolvedURL, error) { if url.Name == "resolveerror" { return nil, errgo.Newf("an error") } return &ResolvedURL{URL: *url}, nil }, handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), }, }, expectWillIncludeMetadata: []string{"foo"}, expectStatus: http.StatusInternalServerError, expectBody: params.Error{ Message: "an error", }, }, { about: "bulk meta handler with some nil data", urlStr: "/meta/foo?id=bundle/something-24&id=precise/wordpress-23", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": selectiveIdHandler(map[string]interface{}{ "cs:bundle/something-24": "bundlefoo", }), }, }, expectWillIncludeMetadata: []string{"foo"}, expectStatus: http.StatusOK, expectBody: map[string]string{ "bundle/something-24": "bundlefoo", }, }, { about: "bulk meta handler with entity not found", urlStr: "/meta/foo?id=bundle/something-24&id=precise/wordpress-23", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": SingleIncludeHandler(func(id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { if id.URL.Revision == 23 { return nil, errgo.WithCausef(nil, params.ErrNotFound, "") } return "something", nil }), }, }, expectWillIncludeMetadata: []string{"foo"}, expectStatus: http.StatusOK, expectBody: map[string]string{ "bundle/something-24": "something", }, }, { about: "meta request with invalid entity reference", urlStr: "/robots.txt/meta/any", handlers: Handlers{}, expectStatus: http.StatusNotFound, expectBody: params.Error{ Code: params.ErrNotFound, Message: `not found: URL has invalid charm or bundle name: "robots.txt"`, }, }, { about: "bulk meta handler, invalid id", urlStr: "/meta/foo?id=robots.txt", handlers: Handlers{}, expectWillIncludeMetadata: []string{"foo"}, expectStatus: http.StatusBadRequest, expectBody: params.Error{ Code: params.ErrBadRequest, Message: `bad request: URL has invalid charm or bundle name: "robots.txt"`, }, }} // resolveTo returns a URL resolver that resolves // unspecified series and revision to the given series // and revision. func resolveTo(series string, revision int) func(*charm.URL) (*ResolvedURL, error) { return func(url *charm.URL) (*ResolvedURL, error) { var rurl ResolvedURL rurl.URL = *url if url.Series == "" { rurl.URL.Series = series } if url.Revision == -1 { rurl.URL.Revision = revision } if url.User == "" { rurl.URL.User = "charmers" rurl.PromulgatedRevision = revision } return &rurl, nil } } func resolveURLError(err error) func(*charm.URL) (*ResolvedURL, error) { return func(*charm.URL) (*ResolvedURL, error) { return nil, err } } func alwaysResolveURL(u *charm.URL) (*ResolvedURL, error) { u1 := *u if u1.Series == "" { u1.Series = "precise" } if u1.Revision == -1 { u1.Revision = 0 } promRev := -1 if u1.User == "" { u1.User = "charmers" promRev = u1.Revision } return newResolvedURL(u1.String(), promRev), nil } func (s *RouterSuite) TestRouterGet(c *gc.C) { for i, test := range routerGetTests { c.Logf("test %d: %s", i, test.about) ctxt := alwaysContext if test.resolveURL != nil { ctxt.resolveURL = test.resolveURL } if test.authorize != nil { ctxt.authorizeURL = test.authorize } resolved := false var includedMetadata []string origResolve := ctxt.resolveURL ctxt.resolveURL = func(id *charm.URL) (*ResolvedURL, error) { resolved = true return origResolve(id) } ctxt.willIncludeMetadata = func(incs []string) { if resolved { c.Errorf("ResolveURL called before WillIncludeMetadata") } includedMetadata = incs } router := New(&test.handlers, ctxt) // Note that fieldSelectHandler increments queryCount each time // a query is made. queryCount = 0 httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: router, URL: test.urlStr, ExpectStatus: test.expectStatus, ExpectBody: test.expectBody, }) c.Assert(queryCount, gc.Equals, test.expectQueryCount) c.Assert(includedMetadata, jc.DeepEquals, test.expectWillIncludeMetadata) } } type funcContext struct { resolveURL func(id *charm.URL) (*ResolvedURL, error) authorizeURL func(id *ResolvedURL, req *http.Request) error willIncludeMetadata func([]string) } func (ctxt funcContext) ResolveURL(id *charm.URL) (*ResolvedURL, error) { return ctxt.resolveURL(id) } func (ctxt funcContext) ResolveURLs(ids []*charm.URL) ([]*ResolvedURL, error) { rurls := make([]*ResolvedURL, len(ids)) for i, id := range ids { rurl, err := ctxt.resolveURL(id) if err != nil && errgo.Cause(err) != params.ErrNotFound { return nil, err } rurls[i] = rurl } return rurls, nil } func (ctxt funcContext) WillIncludeMetadata(includes []string) { ctxt.willIncludeMetadata(includes) } func (ctxt funcContext) AuthorizeEntity(id *ResolvedURL, req *http.Request) error { return ctxt.authorizeURL(id, req) } var parseBoolTests = []struct { value string result bool err bool }{{ value: "0", }, { value: "", }, { value: "1", result: true, }, { value: "invalid", err: true, }} func (s *RouterSuite) TestParseBool(c *gc.C) { for i, test := range parseBoolTests { c.Logf("test %d: %s", i, test.value) result, err := ParseBool(test.value) c.Assert(result, gc.Equals, test.result) if test.err { c.Assert(err, gc.ErrorMatches, "unexpected bool value .*") continue } c.Assert(err, jc.ErrorIsNil) } } var alwaysContext = funcContext{ resolveURL: alwaysResolveURL, authorizeURL: alwaysAuthorize, willIncludeMetadata: func([]string) {}, } func (s *RouterSuite) TestCORSHeaders(c *gc.C) { h := New(&Handlers{ Global: map[string]http.Handler{ "foo": http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {}), }, }, alwaysContext) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: h, URL: "/foo", }) c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Header().Get("Access-Control-Allow-Origin"), gc.Equals, "*") c.Assert(rec.Header().Get("Access-Control-Cache-Max-Age"), gc.Equals, "600") c.Assert(rec.Header().Get("Access-Control-Allow-Headers"), gc.Equals, "Bakery-Protocol-Version, Macaroons, X-Requested-With") c.Assert(rec.Header().Get("Access-Control-Allow-Methods"), gc.Equals, "DELETE,GET,HEAD,PUT,POST,OPTIONS") c.Assert(rec.Header().Get("Access-Control-Expose-Headers"), gc.Equals, "WWW-Authenticate") } func (s *RouterSuite) TestHTTPRequestPassedThroughToMeta(c *gc.C) { testReq, err := http.NewRequest("GET", "/wordpress/meta/foo", nil) c.Assert(err, gc.IsNil) doneQuery := false query := func(id *ResolvedURL, selector map[string]int, req *http.Request) (interface{}, error) { if req != testReq { return nil, fmt.Errorf("unexpected request found in Query") } doneQuery = true return 0, nil } doneGet := false handleGet := func(doc interface{}, id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { if req != testReq { return nil, fmt.Errorf("unexpected request found in HandleGet") } doneGet = true return 0, nil } donePut := false handlePut := func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { if req != testReq { return fmt.Errorf("unexpected request found in HandlePut") } donePut = true return nil } update := func(id *ResolvedURL, fields map[string]interface{}, entries []audit.Entry) error { return nil } h := New(&Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": NewFieldIncludeHandler(FieldIncludeHandlerParams{ Key: 0, Query: query, Fields: []string{"foo"}, HandleGet: handleGet, HandlePut: handlePut, Update: update, }), }, }, alwaysContext) resp := httptest.NewRecorder() h.ServeHTTP(resp, testReq) c.Assert(resp.Code, gc.Equals, http.StatusOK, gc.Commentf("response body: %s", resp.Body)) c.Assert(doneGet, jc.IsTrue) c.Assert(doneQuery, jc.IsTrue) testReq, err = http.NewRequest("PUT", "/wordpress/meta/foo", strings.NewReader(`"hello"`)) testReq.Header.Set("Content-Type", "application/json") c.Assert(err, gc.IsNil) resp = httptest.NewRecorder() h.ServeHTTP(resp, testReq) c.Assert(resp.Code, gc.Equals, http.StatusOK, gc.Commentf("response body: %s", resp.Body)) c.Assert(donePut, jc.IsTrue) } func (s *RouterSuite) TestOptionsHTTPMethod(c *gc.C) { h := New(&Handlers{}, alwaysContext) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: h, Method: "OPTIONS", URL: "/foo", Header: http.Header{"Origin": []string{"https://1.2.42.47"}}, }) c.Assert(rec.Code, gc.Equals, http.StatusOK) header := rec.Header() c.Assert(header.Get("Access-Control-Allow-Origin"), gc.Equals, "https://1.2.42.47") c.Assert(header.Get("Access-Control-Cache-Max-Age"), gc.Equals, "600") c.Assert(header.Get("Access-Control-Allow-Headers"), gc.Equals, "Bakery-Protocol-Version, Macaroons, X-Requested-With") c.Assert(header.Get("Access-Control-Allow-Methods"), gc.Equals, "DELETE,GET,HEAD,PUT,POST,OPTIONS") c.Assert(header.Get("Allow"), gc.Equals, "DELETE,GET,HEAD,PUT,POST") } var routerPutTests = []struct { about string handlers Handlers urlStr string body interface{} expectCode int expectBody interface{} expectRecordedCalls []interface{} resolveURL func(*charm.URL) (*ResolvedURL, error) }{{ about: "global handler", handlers: Handlers{ Global: map[string]http.Handler{ "foo": HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { return ReqInfo{ Method: req.Method, Path: req.URL.Path, Form: req.Form, }, nil }), }, }, urlStr: "/foo", expectCode: http.StatusOK, expectBody: ReqInfo{ Method: "PUT", Path: "", }, }, { about: "id handler", handlers: Handlers{ Id: map[string]IdHandler{ "foo": testIdHandler, }, }, urlStr: "/precise/wordpress-34/foo", expectCode: http.StatusOK, expectBody: idHandlerTestResp{ Method: "PUT", CharmURL: "cs:precise/wordpress-34", }, }, { about: "meta handler", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), }, }, urlStr: "/precise/wordpress-42/meta/foo", expectCode: http.StatusOK, body: "hello", expectRecordedCalls: []interface{}{ metaHandlerTestPutParams{ NumHandlers: 1, Id: "cs:precise/wordpress-42", Paths: []string{""}, Values: []interface{}{"hello"}, }, }, }, { about: "meta/any", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), "bar": testMetaHandler(1), }, }, urlStr: "/precise/wordpress-42/meta/any", body: params.MetaAnyResponse{ Meta: map[string]interface{}{ "foo": "foo-value", "bar": map[string]interface{}{ "bar-value1": 234.0, "bar-value2": "whee", }, }, }, expectRecordedCalls: []interface{}{ metaHandlerTestPutParams{ NumHandlers: 2, Id: "cs:precise/wordpress-42", Paths: []string{"", ""}, Values: []interface{}{ "foo-value", map[string]interface{}{ "bar-value1": 234.0, "bar-value2": "whee", }, }, }, }, }, { about: "meta/any with extra paths", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo/": testMetaHandler(0), "bar": testMetaHandler(1), }, }, urlStr: "/precise/wordpress-42/meta/any", body: params.MetaAnyResponse{ Meta: map[string]interface{}{ "foo/one": "foo-value-one", "foo/two": "foo-value-two", "bar": 1234.0, }, }, expectRecordedCalls: []interface{}{ metaHandlerTestPutParams{ NumHandlers: 3, Id: "cs:precise/wordpress-42", Paths: []string{"/one", "/two", ""}, Values: []interface{}{ "foo-value-one", "foo-value-two", 1234.0, }, }, }, }, { about: "bulk meta", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), }, }, urlStr: "/meta/foo", body: map[string]string{ "precise/wordpress-42": "forty two", "precise/foo-134": "blah", }, expectRecordedCalls: []interface{}{ metaHandlerTestPutParams{ NumHandlers: 1, Id: "cs:precise/foo-134", Paths: []string{""}, Values: []interface{}{"blah"}, }, metaHandlerTestPutParams{ NumHandlers: 1, Id: "cs:precise/wordpress-42", Paths: []string{""}, Values: []interface{}{"forty two"}, }, }, }, { about: "bulk meta any", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), "bar": testMetaHandler(1), "baz/": testMetaHandler(2), }, }, urlStr: "/meta/any", body: map[string]params.MetaAnyResponse{ "precise/wordpress-42": { Meta: map[string]interface{}{ "foo": "foo-wordpress-val", "bar": "bar-wordpress-val", }, }, "precise/mysql-134": { Meta: map[string]interface{}{ "foo": "foo-mysql-val", "baz/blah": "baz/blah-mysql-val", "baz/ppp": "baz/ppp-mysql-val", }, }, "trusty/django-47": { Meta: map[string]interface{}{ "foo": "foo-django-val", }, }, }, expectRecordedCalls: []interface{}{ metaHandlerTestPutParams{ NumHandlers: 3, Id: "cs:precise/mysql-134", Paths: []string{"", "/blah", "/ppp"}, Values: []interface{}{"foo-mysql-val", "baz/blah-mysql-val", "baz/ppp-mysql-val"}, }, metaHandlerTestPutParams{ NumHandlers: 2, Id: "cs:precise/wordpress-42", Paths: []string{"", ""}, Values: []interface{}{"foo-wordpress-val", "bar-wordpress-val"}, }, metaHandlerTestPutParams{ NumHandlers: 1, Id: "cs:trusty/django-47", Paths: []string{""}, Values: []interface{}{"foo-django-val"}, }, }, }, { about: "field include handler with bulk meta any", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": fieldSelectHandler("handler1", 0, "field1", "field2"), "bar": fieldSelectHandler("handler2", 0, "field3", "field4"), "baz/": fieldSelectHandler("handler3", 1, "field5"), }, }, urlStr: "/meta/any", body: map[string]params.MetaAnyResponse{ "precise/mysql-123": { Meta: map[string]interface{}{ "foo": "foo-mysql-val", "baz/blah": "baz/blah-mysql-val", "baz/ppp": "baz/ppp-mysql-val", }, }, "precise/wordpress-42": { Meta: map[string]interface{}{ "foo": "foo-wordpress-val", "bar": "bar-wordpress-val", }, }, }, expectRecordedCalls: []interface{}{ fieldSelectHandleUpdateInfo{ Id: "cs:precise/mysql-123", Fields: map[string]fieldSelectHandlePutInfo{ "field1": { Id: "cs:precise/mysql-123", Value: "foo-mysql-val", }, "field2": { Id: "cs:precise/mysql-123", Value: "foo-mysql-val", }, }, }, fieldSelectHandleUpdateInfo{ Id: "cs:precise/mysql-123", Fields: map[string]fieldSelectHandlePutInfo{ "field5/blah": { Id: "cs:precise/mysql-123", Value: "baz/blah-mysql-val", }, "field5/ppp": { Id: "cs:precise/mysql-123", Value: "baz/ppp-mysql-val", }, }, }, fieldSelectHandleUpdateInfo{ Id: "cs:precise/wordpress-42", Fields: map[string]fieldSelectHandlePutInfo{ "field1": { Id: "cs:precise/wordpress-42", Value: "foo-wordpress-val", }, "field2": { Id: "cs:precise/wordpress-42", Value: "foo-wordpress-val", }, "field3": { Id: "cs:precise/wordpress-42", Value: "bar-wordpress-val", }, "field4": { Id: "cs:precise/wordpress-42", Value: "bar-wordpress-val", }, }, }, }, }, { about: "field include handler with no HandlePut", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": NewFieldIncludeHandler(FieldIncludeHandlerParams{ Key: 0, }), }, }, urlStr: "/precise/wordpress-23/meta/foo", body: "something", expectCode: http.StatusInternalServerError, expectBody: params.Error{ Message: "PUT not supported", }, }, { about: "field include handler when HandlePut returns an error", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": NewFieldIncludeHandler(FieldIncludeHandlerParams{ Key: 0, HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { return errgo.WithCausef(nil, params.ErrNotFound, "message") }, }), }, }, urlStr: "/precise/wordpress-23/meta/foo", body: "something", expectCode: http.StatusNotFound, expectBody: params.Error{ Code: params.ErrNotFound, Message: "message", }, }, { about: "meta put to field include handler with several errors", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": NewFieldIncludeHandler(FieldIncludeHandlerParams{ Key: 0, HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { return errgo.WithCausef(nil, params.ErrNotFound, "foo error") }, Update: nopUpdate, }), "bar": NewFieldIncludeHandler(FieldIncludeHandlerParams{ Key: 0, HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { return errgo.New("bar error") }, Update: nopUpdate, }), "baz": NewFieldIncludeHandler(FieldIncludeHandlerParams{ Key: 0, HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { return nil }, Update: nopUpdate, }), }, }, urlStr: "/precise/wordpress-23/meta/any", body: params.MetaAnyResponse{ Meta: map[string]interface{}{ "foo": "one", "bar": "two", "baz": "three", }, }, expectCode: http.StatusInternalServerError, expectBody: params.Error{ Code: params.ErrMultipleErrors, Message: "multiple (2) errors", Info: map[string]*params.Error{ "foo": { Code: params.ErrNotFound, Message: "foo error", }, "bar": { Message: "bar error", }, }, }, }, { about: "meta/any put with update error", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo/": NewFieldIncludeHandler(FieldIncludeHandlerParams{ Key: 0, HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { if path == "/bad" { return fmt.Errorf("foo/bad error") } return nil }, Update: func(id *ResolvedURL, fields map[string]interface{}, entries []audit.Entry) error { return params.ErrBadRequest }, }), "bar": NewFieldIncludeHandler(FieldIncludeHandlerParams{ Key: 1, HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { return fmt.Errorf("bar error") }, }), }, }, urlStr: "/precise/wordpress-23/meta/any", body: params.MetaAnyResponse{ Meta: map[string]interface{}{ "foo/one": "one", "foo/two": "two", "foo/bad": "bad", "bar": "bar", }, }, expectCode: http.StatusInternalServerError, expectBody: params.Error{ Code: params.ErrMultipleErrors, Message: "multiple (4) errors", Info: map[string]*params.Error{ // All endpoints that share the same bulk key should // get the same error, as the update pertains to all of them, // but endpoints for which the HandlePut failed will // not be included in that. "foo/one": { Code: params.ErrBadRequest, Message: "bad request", }, "foo/two": { Code: params.ErrBadRequest, Message: "bad request", }, "foo/bad": { Message: "foo/bad error", }, "bar": { Message: "bar error", }, }, }, }, { about: "bulk meta/any put with several errors", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": NewFieldIncludeHandler(FieldIncludeHandlerParams{ Key: 0, HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { return nil }, Update: nopUpdate, }), "bar": NewFieldIncludeHandler(FieldIncludeHandlerParams{ Key: 0, HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { return errgo.WithCausef(nil, params.ErrNotFound, "bar error") }, Update: nopUpdate, }), }, }, resolveURL: func(id *charm.URL) (*ResolvedURL, error) { if id.Name == "bad" { return nil, params.ErrBadRequest } return &ResolvedURL{URL: *id}, nil }, urlStr: "/meta/any", body: map[string]params.MetaAnyResponse{ "precise/mysql-123": { Meta: map[string]interface{}{ "foo": "fooval", "bar": "barval", }, }, "bad": { Meta: map[string]interface{}{ "foo": "foo-wordpress-val", "bar": "bar-wordpress-val", }, }, }, expectCode: http.StatusInternalServerError, expectBody: params.Error{ Code: params.ErrMultipleErrors, Message: "multiple (2) errors", Info: map[string]*params.Error{ "precise/mysql-123": { Code: params.ErrMultipleErrors, Message: "multiple (1) errors", Info: map[string]*params.Error{ "bar": { Code: params.ErrNotFound, Message: "bar error", }, }, }, "bad": { Message: "bad request", Code: params.ErrBadRequest, }, }, }, }, { about: "meta put with unresolved URL", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), }, }, urlStr: "/wordpress/meta/foo", resolveURL: resolveTo("series", 245), expectCode: http.StatusOK, body: "hello", expectRecordedCalls: []interface{}{ metaHandlerTestPutParams{ NumHandlers: 1, Id: "cs:series/wordpress-245", Paths: []string{""}, Values: []interface{}{"hello"}, }, }, }, { about: "bulk put with unresolved URL", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), }, }, urlStr: "/meta/foo", resolveURL: resolveTo("series", 245), expectCode: http.StatusOK, body: map[string]string{ "wordpress": "hello", }, expectRecordedCalls: []interface{}{ metaHandlerTestPutParams{ NumHandlers: 1, Id: "cs:series/wordpress-245", Paths: []string{""}, Values: []interface{}{"hello"}, }, }, }, { about: "bulk put with ids specified in URL", handlers: Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), }, }, urlStr: "/meta/foo?id=wordpress", expectCode: http.StatusInternalServerError, expectBody: params.Error{ Message: "ids may not be specified in meta PUT request", }, }} func nopUpdate(id *ResolvedURL, fields map[string]interface{}, entries []audit.Entry) error { return nil } func (s *RouterSuite) TestRouterPut(c *gc.C) { for i, test := range routerPutTests { c.Logf("test %d: %s", i, test.about) ResetRecordedCalls() resolve := alwaysResolveURL if test.resolveURL != nil { resolve = test.resolveURL } bodyVal, err := json.Marshal(test.body) c.Assert(err, gc.IsNil) ctxt := alwaysContext ctxt.resolveURL = resolve router := New(&test.handlers, ctxt) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: router, URL: test.urlStr, Body: bytes.NewReader(bodyVal), Method: "PUT", Header: map[string][]string{ "Content-Type": {"application/json"}, }, ExpectStatus: test.expectCode, ExpectBody: test.expectBody, }) c.Assert(RecordedCalls(), jc.DeepEquals, test.expectRecordedCalls) } } var routerPutWithInvalidContentTests = []struct { about string urlStr string contentType string body string expectCode int expectBody interface{} }{{ about: "invalid content type with meta", urlStr: "/precise/wordpress-23/meta/foo", contentType: "foo/bar", expectCode: http.StatusBadRequest, expectBody: params.Error{ Message: `unexpected Content-Type "foo/bar"; expected "application/json"`, Code: params.ErrBadRequest, }, }, { about: "invalid content type with bulk meta", urlStr: "/meta/foo", contentType: "foo/bar", expectCode: http.StatusBadRequest, expectBody: params.Error{ Message: `unexpected Content-Type "foo/bar"; expected "application/json"`, Code: params.ErrBadRequest, }, }, { about: "bad JSON with meta", urlStr: "/precise/wordpress-23/meta/foo", contentType: "application/json", body: `"foo`, expectCode: http.StatusInternalServerError, expectBody: params.Error{ Message: `cannot unmarshal body: unexpected EOF`, }, }, { about: "bad JSON with bulk meta", urlStr: "/meta/foo", contentType: "application/json", body: `"foo`, expectCode: http.StatusInternalServerError, expectBody: params.Error{ Message: `cannot unmarshal body: unexpected EOF`, }, }} func (s *RouterSuite) TestRouterPutWithInvalidContent(c *gc.C) { for i, test := range routerPutWithInvalidContentTests { c.Logf("test %d: %s", i, test.about) handlers := &Handlers{ Meta: map[string]BulkIncludeHandler{ "foo": testMetaHandler(0), }, } router := New(handlers, alwaysContext) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: router, URL: test.urlStr, Body: strings.NewReader(test.body), Method: "PUT", Header: map[string][]string{ "Content-Type": {test.contentType}, }, ExpectStatus: test.expectCode, ExpectBody: test.expectBody, }) } } func alwaysExists(id *ResolvedURL, req *http.Request) (bool, error) { return true, nil } func alwaysAuthorize(id *ResolvedURL, req *http.Request) error { return nil } func neverAuthorize(id *ResolvedURL, req *http.Request) error { return errgo.WithCausef(nil, params.ErrUnauthorized, "bad wolf") } func dischargeRequiredAuthorize(id *ResolvedURL, req *http.Request) error { if id.String() == "cs:utopic/foo-32" { return nil } return httpbakery.NewDischargeRequiredError(nil, "/", errgo.New("discharge required")) } var getMetadataTests = []struct { id *ResolvedURL includes []string expectResult map[string]interface{} expectError string }{{ id: newResolvedURL("~charmers/precise/wordpress-34", 34), includes: []string{}, expectResult: map[string]interface{}{}, }, { id: newResolvedURL("~rog/precise/wordpress-2", -1), includes: []string{"item1", "item2", "test"}, expectResult: map[string]interface{}{ "item1": fieldSelectHandleGetInfo{ HandlerId: "handler1", Doc: fieldSelectQueryInfo{ Id: newResolvedURL("cs:~rog/precise/wordpress-2", -1), Selector: map[string]int{"item1": 1, "item2": 1}, }, Id: newResolvedURL("cs:~rog/precise/wordpress-2", -1), }, "item2": fieldSelectHandleGetInfo{ HandlerId: "handler2", Doc: fieldSelectQueryInfo{ Id: newResolvedURL("cs:~rog/precise/wordpress-2", -1), Selector: map[string]int{"item1": 1, "item2": 1}, }, Id: newResolvedURL("cs:~rog/precise/wordpress-2", -1), }, "test": &metaHandlerTestResp{ CharmURL: "cs:~rog/precise/wordpress-2", }, }, }, { id: newResolvedURL("~rog/precise/wordpress-2", -1), includes: []string{"mistaek"}, expectError: `unrecognized metadata name "mistaek"`, }} func (s *RouterSuite) TestGetMetadata(c *gc.C) { for i, test := range getMetadataTests { c.Logf("test %d: %q", i, test.includes) router := New(&Handlers{ Meta: map[string]BulkIncludeHandler{ "item1": fieldSelectHandler("handler1", 0, "item1"), "item2": fieldSelectHandler("handler2", 0, "item2"), "test": testMetaHandler(0), }, }, alwaysContext) result, err := router.GetMetadata(test.id, test.includes, nil) if test.expectError != "" { c.Assert(err, gc.ErrorMatches, test.expectError) c.Assert(result, gc.IsNil) continue } c.Assert(err, gc.IsNil) c.Assert(result, jc.DeepEquals, test.expectResult) } } var splitIdTests = []struct { path string expectURL string expectError string }{{ path: "precise/wordpress-23", expectURL: "cs:precise/wordpress-23", }, { path: "~user/precise/wordpress-23", expectURL: "cs:~user/precise/wordpress-23", }, { path: "wordpress", expectURL: "cs:wordpress", }, { path: "~user/wordpress", expectURL: "cs:~user/wordpress", }, { path: "", expectError: `URL has invalid charm or bundle name: ""`, }, { path: "~foo-bar-/wordpress", expectError: `charm or bundle URL has invalid user name: "~foo-bar-/wordpress"`, }} func (s *RouterSuite) TestSplitId(c *gc.C) { for i, test := range splitIdTests { c.Logf("test %d: %s", i, test.path) url, rest, err := splitId(test.path) if test.expectError != "" { c.Assert(err, gc.ErrorMatches, test.expectError) c.Assert(url, gc.IsNil) c.Assert(rest, gc.Equals, "") continue } c.Assert(err, gc.Equals, nil) c.Assert(url.String(), gc.Equals, test.expectURL) c.Assert(rest, gc.Equals, "") url, rest, err = splitId(test.path + "/some/more") c.Assert(err, gc.Equals, nil) c.Assert(url.String(), gc.Equals, test.expectURL) c.Assert(rest, gc.Equals, "/some/more") } } var handlerKeyTests = []struct { path string expectKey string expectRest string }{{ path: "/foo/bar", expectKey: "foo/", expectRest: "/bar", }, { path: "/foo", expectKey: "foo", expectRest: "", }, { path: "/foo/bar/baz", expectKey: "foo/", expectRest: "/bar/baz", }, { path: "/foo/", expectKey: "foo", expectRest: "", }, { path: "foo/", expectKey: "foo", expectRest: "", }} func (s *RouterSuite) TestHandlerKey(c *gc.C) { for i, test := range handlerKeyTests { c.Logf("test %d: %s", i, test.path) key, rest := handlerKey(test.path) c.Assert(key, gc.Equals, test.expectKey) c.Assert(rest, gc.Equals, test.expectRest) } } var splitPathTests = []struct { path string index int expectElem string expectRest string }{{ path: "/foo/bar", expectElem: "foo", expectRest: "/bar", }, { path: "foo/bar", expectElem: "foo", expectRest: "/bar", }, { path: "foo/", expectElem: "foo", expectRest: "/", }, { path: "/foo/bar/baz", expectElem: "foo", expectRest: "/bar/baz", }, { path: "/foo", expectElem: "foo", expectRest: "", }, { path: "/foo/bar/baz", index: 4, expectElem: "bar", expectRest: "/baz", }} func (s *RouterSuite) TestSplitPath(c *gc.C) { for i, test := range splitPathTests { c.Logf("test %d: %s", i, test.path) elem, index := splitPath(test.path, test.index) c.Assert(elem, gc.Equals, test.expectElem) c.Assert(index, jc.LessThan, len(test.path)+1) c.Assert(test.path[index:], gc.Equals, test.expectRest) } } func (s *RouterSuite) TestWriteJSON(c *gc.C) { rec := httptest.NewRecorder() type Number struct { N int } err := httprequest.WriteJSON(rec, http.StatusTeapot, Number{1234}) c.Assert(err, gc.IsNil) c.Assert(rec.Code, gc.Equals, http.StatusTeapot) c.Assert(rec.Body.String(), gc.Equals, `{"N":1234}`) c.Assert(rec.Header().Get("content-type"), gc.Equals, "application/json") } func (s *RouterSuite) TestWriteError(c *gc.C) { rec := httptest.NewRecorder() WriteError(rec, errgo.Newf("an error")) var errResp params.Error err := json.Unmarshal(rec.Body.Bytes(), &errResp) c.Assert(err, gc.IsNil) c.Assert(errResp, gc.DeepEquals, params.Error{Message: "an error"}) c.Assert(rec.Code, gc.Equals, http.StatusInternalServerError) rec = httptest.NewRecorder() errResp0 := params.Error{ Message: "a message", Code: "some code", } WriteError(rec, &errResp0) var errResp1 params.Error err = json.Unmarshal(rec.Body.Bytes(), &errResp1) c.Assert(err, gc.IsNil) c.Assert(errResp1, gc.DeepEquals, errResp0) c.Assert(rec.Code, gc.Equals, http.StatusInternalServerError) } func (s *RouterSuite) TestServeMux(c *gc.C) { mux := NewServeMux() mux.Handle("/data", HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { return Foo{"hello"}, nil })) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: mux, URL: "/data", ExpectBody: Foo{"hello"}, }) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: mux, URL: "/foo", ExpectStatus: http.StatusNotFound, ExpectBody: params.Error{ Message: `no handler for "/foo"`, Code: params.ErrNotFound, }, }) } var handlerTests = []struct { about string handler http.Handler urlStr string expectStatus int expectBody interface{} }{{ about: "handleErrors, normal error", handler: HandleErrors(func(http.ResponseWriter, *http.Request) error { return errgo.Newf("an error") }), urlStr: "", expectStatus: http.StatusInternalServerError, expectBody: params.Error{ Message: "an error", }, }, { about: "handleErrors, error with code", handler: HandleErrors(func(http.ResponseWriter, *http.Request) error { return ¶ms.Error{ Message: "something went wrong", Code: "snafu", } }), urlStr: "", expectStatus: http.StatusInternalServerError, expectBody: params.Error{ Message: "something went wrong", Code: "snafu", }, }, { about: "handleErrors, no error", handler: HandleErrors(func(w http.ResponseWriter, req *http.Request) error { w.WriteHeader(http.StatusTeapot) return nil }), expectStatus: http.StatusTeapot, }, { about: "handleErrors, params error", handler: HandleErrors(func(w http.ResponseWriter, req *http.Request) error { return params.ErrMetadataNotFound }), expectStatus: http.StatusNotFound, expectBody: params.Error{ Message: "metadata not found", Code: params.ErrMetadataNotFound, }, }, { about: "handleErrors, wrapped params error", handler: HandleErrors(func(w http.ResponseWriter, req *http.Request) error { err := params.ErrMetadataNotFound return errgo.NoteMask(err, "annotation", errgo.Is(params.ErrMetadataNotFound)) }), expectStatus: http.StatusNotFound, expectBody: params.Error{ Message: "annotation: metadata not found", Code: params.ErrMetadataNotFound, }, }, { about: "handleErrors: error - bad request", handler: HandleErrors(func(w http.ResponseWriter, req *http.Request) error { return params.ErrBadRequest }), expectStatus: http.StatusBadRequest, expectBody: params.Error{ Message: "bad request", Code: params.ErrBadRequest, }, }, { about: "handleErrors: error - forbidden", handler: HandleErrors(func(w http.ResponseWriter, req *http.Request) error { return params.ErrForbidden }), expectStatus: http.StatusForbidden, expectBody: params.Error{ Message: "forbidden", Code: params.ErrForbidden, }, }, { about: "handleJSON, normal case", handler: HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { return Foo{"hello"}, nil }), expectStatus: http.StatusOK, expectBody: Foo{"hello"}, }, { about: "handleJSON, error case", handler: HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { return nil, errgo.Newf("an error") }), expectStatus: http.StatusInternalServerError, expectBody: params.Error{ Message: "an error", }, }, { about: "NotFoundHandler", handler: NotFoundHandler(), expectStatus: http.StatusNotFound, expectBody: params.Error{ Message: "not found", Code: params.ErrNotFound, }, }} type Foo struct { S string } type ReqInfo struct { Path string Method string Form url.Values `json:",omitempty"` } func (s *RouterSuite) TestHandlers(c *gc.C) { for i, test := range handlerTests { c.Logf("test %d: %s", i, test.about) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: test.handler, URL: "", ExpectStatus: test.expectStatus, ExpectBody: test.expectBody, }) } } var resolvedURLTests = []struct { rurl *ResolvedURL expectPreferredURL *charm.URL expectPromulgatedURL *charm.URL }{{ rurl: MustNewResolvedURL("~charmers/precise/wordpress-23", 4), expectPreferredURL: charm.MustParseURL("precise/wordpress-4"), expectPromulgatedURL: charm.MustParseURL("precise/wordpress-4"), }, { rurl: MustNewResolvedURL("~charmers/precise/wordpress-23", -1), expectPreferredURL: charm.MustParseURL("~charmers/precise/wordpress-23"), }, { rurl: withPreferredSeries(MustNewResolvedURL("~charmers/wordpress-42", 0), "trusty"), expectPreferredURL: charm.MustParseURL("trusty/wordpress-0"), expectPromulgatedURL: charm.MustParseURL("wordpress-0"), }, { rurl: withPreferredSeries(MustNewResolvedURL("~charmers/wordpress-42", -1), "trusty"), expectPreferredURL: charm.MustParseURL("~charmers/trusty/wordpress-42"), }} func withPreferredSeries(r *ResolvedURL, series string) *ResolvedURL { r.PreferredSeries = series return r } func (*RouterSuite) TestResolvedURL(c *gc.C) { testMethod := func(name string, rurl *ResolvedURL, m func() *charm.URL, expect *charm.URL) { c.Logf("- method %s", name) u := m() c.Assert(u, jc.DeepEquals, expect) // Ensure it's not aliased. c.Assert(u, gc.Not(gc.Equals), &rurl.URL) } for i, test := range resolvedURLTests { c.Logf("test %d: %#v", i, test.rurl) testMethod("PromulgatedURL", test.rurl, test.rurl.PromulgatedURL, test.expectPromulgatedURL) testMethod("PreferredURL", test.rurl, test.rurl.PreferredURL, test.expectPreferredURL) } } func errorIdHandler(charmId *charm.URL, w http.ResponseWriter, req *http.Request) error { return errgo.Newf("errorIdHandler error") } type idHandlerTestResp struct { Method string CharmURL string Path string } func testIdHandler(charmId *charm.URL, w http.ResponseWriter, req *http.Request) error { httprequest.WriteJSON(w, http.StatusOK, idHandlerTestResp{ CharmURL: charmId.String(), Path: req.URL.Path, Method: req.Method, }) return nil } type metaHandlerTestResp struct { CharmURL string Path string Flags url.Values } var testMetaGetHandler = SingleIncludeHandler( func(id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { if len(flags) == 0 { flags = nil } return &metaHandlerTestResp{ CharmURL: id.String(), Path: path, Flags: flags, }, nil }, ) type testMetaHandler int func (testMetaHandler) Key() interface{} { type testMetaHandlerKey struct{} return testMetaHandlerKey{} } func (testMetaHandler) HandleGet(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, flags url.Values, req *http.Request) ([]interface{}, error) { results := make([]interface{}, len(hs)) for i, h := range hs { _ = h.(testMetaHandler) if len(flags) == 0 { flags = nil } results[i] = &metaHandlerTestResp{ CharmURL: id.String(), Path: paths[i], Flags: flags, } } return results, nil } type metaHandlerTestPutParams struct { Id string NumHandlers int Paths []string Values []interface{} } func (testMetaHandler) HandlePut(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, rawValues []*json.RawMessage, req *http.Request) []error { // Handlers are provided in arbitrary order, // so we order them (and their associated paths // and values) to enable easier testing. keys := make(sort.StringSlice, len(hs)) for i, h := range hs { // Sort by handler primary, path secondary. keys[i] = fmt.Sprintf("%d.%s", int(h.(testMetaHandler)), paths[i]) } sort.Sort(groupSort{ key: keys, other: []swapper{ sort.StringSlice(paths), swapFunc(func(i, j int) { rawValues[i], rawValues[j] = rawValues[j], rawValues[i] }), }, }) values := make([]interface{}, len(rawValues)) for i, val := range rawValues { err := json.Unmarshal(*val, &values[i]) if err != nil { panic(err) } } RecordCall(metaHandlerTestPutParams{ NumHandlers: len(hs), Id: id.String(), Paths: paths, Values: values, }) return nil } // constMetaHandler returns a handler that always returns the given // value. func constMetaHandler(val interface{}) BulkIncludeHandler { return SingleIncludeHandler( func(id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { return val, nil }, ) } func errorMetaHandler(err error) BulkIncludeHandler { return SingleIncludeHandler( func(id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { return nil, err }, ) } type fieldSelectQueryInfo struct { Id *ResolvedURL Selector map[string]int } type fieldSelectHandleGetInfo struct { HandlerId string Doc fieldSelectQueryInfo Id *ResolvedURL Path string Flags url.Values } type fieldSelectHandleUpdateInfo struct { Id string Fields map[string]fieldSelectHandlePutInfo } type fieldSelectHandlePutInfo struct { Id string Path string Value interface{} } var queryCount int32 var ( callRecordsMutex sync.Mutex callRecords byJSON ) // RecordCall adds a value that can be retrieved later with // RecordedCalls. // // This is used to check the parameters passed to // handlers that do not return results. func RecordCall(x interface{}) { callRecordsMutex.Lock() defer callRecordsMutex.Unlock() callRecords = append(callRecords, x) } // ResetRecordedCalls clears the call records. func ResetRecordedCalls() { callRecordsMutex.Lock() defer callRecordsMutex.Unlock() callRecords = nil } // RecordedCalls returns the values passed to RecordCall, // ordered by their JSON serialization. func RecordedCalls() []interface{} { callRecordsMutex.Lock() defer callRecordsMutex.Unlock() sort.Sort(callRecords) return callRecords } // byJSON implements sort.Interface, ordering its // elements lexicographically by marshaled JSON // representation. type byJSON []interface{} func (b byJSON) Less(i, j int) bool { idata, err := json.Marshal(b[i]) if err != nil { panic(err) } jdata, err := json.Marshal(b[j]) if err != nil { panic(err) } return bytes.Compare(idata, jdata) < 0 } func (b byJSON) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b byJSON) Len() int { return len(b) } // fieldSelectHandler returns a BulkIncludeHandler that returns // information about the call for testing purposes. // When the GET handler is invoked, it returns a fieldSelectHandleGetInfo value // with the given handlerId. Key holds the grouping key, // and fields holds the fields to select. // // When the PUT handler is invoked SetCallRecord is called with // a fieldSelectHandlePutInfo value holding the parameters that were // provided. func fieldSelectHandler(handlerId string, key interface{}, fields ...string) BulkIncludeHandler { query := func(id *ResolvedURL, selector map[string]int, req *http.Request) (interface{}, error) { atomic.AddInt32(&queryCount, 1) return fieldSelectQueryInfo{ Id: id, Selector: selector, }, nil } handleGet := func(doc interface{}, id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { if len(flags) == 0 { flags = nil } return fieldSelectHandleGetInfo{ HandlerId: handlerId, Doc: doc.(fieldSelectQueryInfo), Id: id, Path: path, Flags: flags, }, nil } handlePut := func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { var vali interface{} err := json.Unmarshal(*val, &vali) if err != nil { panic(err) } for _, field := range fields { updater.UpdateField(field+path, fieldSelectHandlePutInfo{ Id: id.String(), Value: vali, }, nil) } return nil } update := func(id *ResolvedURL, fields map[string]interface{}, entries []audit.Entry) error { // We make information on how update and handlePut have // been called by calling SetCallRecord with the above // parameters. The fields will have been created by // handlePut, and therefore are known to contain // fieldSelectHandlePutInfo values. We convert the // values to static types so that it is more obvious // what the values in fieldSelectHandleUpdateInfo.Fields // contain. infoFields := make(map[string]fieldSelectHandlePutInfo) for name, val := range fields { infoFields[name] = val.(fieldSelectHandlePutInfo) } RecordCall(fieldSelectHandleUpdateInfo{ Id: id.String(), Fields: infoFields, }) return nil } return NewFieldIncludeHandler(FieldIncludeHandlerParams{ Key: key, Query: query, Fields: fields, HandleGet: handleGet, HandlePut: handlePut, Update: update, }) } // selectiveIdHandler handles metadata by returning the // data found in the map for the requested id. func selectiveIdHandler(m map[string]interface{}) BulkIncludeHandler { return SingleIncludeHandler(func(id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { return m[id.String()], nil }) } type swapper interface { Swap(i, j int) } type swapFunc func(i, j int) func (f swapFunc) Swap(i, j int) { f(i, j) } // groupSort is an implementation of sort.Interface // that keeps a set of secondary values sorted according // to the same criteria as key. type groupSort struct { key sort.Interface other []swapper } func (g groupSort) Less(i, j int) bool { return g.key.Less(i, j) } func (g groupSort) Swap(i, j int) { g.key.Swap(i, j) for _, o := range g.other { o.Swap(i, j) } } func (g groupSort) Len() int { return g.key.Len() } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/router/util.go0000664000175000017500000001700412672604603026036 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package router // import "gopkg.in/juju/charmstore.v5-unstable/internal/router" import ( "encoding/json" "fmt" "io/ioutil" "mime" "net/http" "strings" "github.com/juju/httprequest" "github.com/juju/loggo" "gopkg.in/errgo.v1" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/macaroon-bakery.v1/httpbakery" ) var logger = loggo.GetLogger("charmstore.internal.router") // WriteError can be used to write an error response. var WriteError = errorToResp.WriteError // JSONHandler represents a handler that returns a JSON value. // The provided header can be used to set response headers. type JSONHandler func(http.Header, *http.Request) (interface{}, error) // ErrorHandler represents a handler that can return an error. type ErrorHandler func(http.ResponseWriter, *http.Request) error // HandleJSON converts from a JSONHandler function to an http.Handler. func HandleJSON(h JSONHandler) http.Handler { // We can't use errorToResp.HandleJSON directly because // we still use old-style handlers in charmstore, so we // insert shim functions to do the conversion. handleJSON := errorToResp.HandleJSON( func(p httprequest.Params) (interface{}, error) { return h(p.Response.Header(), p.Request) }, ) return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { handleJSON(w, req, nil) }) } // HandleJSON converts from a ErrorHandler function to an http.Handler. func HandleErrors(h ErrorHandler) http.Handler { // We can't use errorToResp.HandleErrors directly because // we still use old-style handlers in charmstore, so we // insert shim functions to do the conversion. handleErrors := errorToResp.HandleErrors( func(p httprequest.Params) error { return h(p.Response, p.Request) }, ) return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { handleErrors(w, req, nil) }) } var errorToResp httprequest.ErrorMapper = func(err error) (int, interface{}) { status, body := errorToResp1(err) logger.Infof("error response %d; %s", status, errgo.Details(err)) return status, body } func errorToResp1(err error) (int, interface{}) { // Allow bakery errors to be returned as the bakery would // like them, so that httpbakery.Client.Do will work. if err, ok := errgo.Cause(err).(*httpbakery.Error); ok { return httpbakery.ErrorToResponse(err) } errorBody := errorResponseBody(err) status := http.StatusInternalServerError switch errorBody.Code { case params.ErrNotFound, params.ErrMetadataNotFound: status = http.StatusNotFound case params.ErrBadRequest, params.ErrInvalidEntity: status = http.StatusBadRequest case params.ErrForbidden, params.ErrEntityIdNotAllowed: status = http.StatusForbidden case params.ErrUnauthorized: status = http.StatusUnauthorized case params.ErrMethodNotAllowed: // TODO(rog) from RFC 2616, section 4.7: An Allow header // field MUST be present in a 405 (Method Not Allowed) // response. // Perhaps we should not ever return StatusMethodNotAllowed. status = http.StatusMethodNotAllowed case params.ErrServiceUnavailable: status = http.StatusServiceUnavailable } return status, errorBody } // errorResponse returns an appropriate error // response for the provided error. func errorResponseBody(err error) *params.Error { errResp := ¶ms.Error{ Message: err.Error(), } cause := errgo.Cause(err) if coder, ok := cause.(errorCoder); ok { errResp.Code = coder.ErrorCode() } if infoer, ok := cause.(errorInfoer); ok { errResp.Info = infoer.ErrorInfo() } return errResp } type errorInfoer interface { ErrorInfo() map[string]*params.Error } type errorCoder interface { ErrorCode() params.ErrorCode } // multiError holds multiple errors. type multiError map[string]error func (err multiError) Error() string { return fmt.Sprintf("multiple (%d) errors", len(err)) } func (err multiError) ErrorCode() params.ErrorCode { return params.ErrMultipleErrors } func (err multiError) ErrorInfo() map[string]*params.Error { m := make(map[string]*params.Error) for key, err := range err { m[key] = errorResponseBody(err) } return m } // NotFoundHandler is like http.NotFoundHandler except it // returns a JSON error response. func NotFoundHandler() http.Handler { return HandleErrors(func(w http.ResponseWriter, req *http.Request) error { return errgo.WithCausef(nil, params.ErrNotFound, params.ErrNotFound.Error()) }) } func NewServeMux() *ServeMux { return &ServeMux{http.NewServeMux()} } // ServeMux is like http.ServeMux but returns // JSON errors when pages are not found. type ServeMux struct { *http.ServeMux } func (mux *ServeMux) ServeHTTP(w http.ResponseWriter, req *http.Request) { if req.RequestURI == "*" { mux.ServeMux.ServeHTTP(w, req) return } h, pattern := mux.Handler(req) if pattern == "" { WriteError(w, errgo.WithCausef(nil, params.ErrNotFound, "no handler for %q", req.URL.Path)) return } h.ServeHTTP(w, req) } // RelativeURLPath returns a relative URL path that is lexically // equivalent to targpath when interpreted by url.URL.ResolveReference. // On success, the returned path will always be non-empty and relative // to basePath, even if basePath and targPath share no elements. // // An error is returned if basePath or targPath are not absolute paths. func RelativeURLPath(basePath, targPath string) (string, error) { if !strings.HasPrefix(basePath, "/") { return "", errgo.Newf("non-absolute base URL") } if !strings.HasPrefix(targPath, "/") { return "", errgo.Newf("non-absolute target URL") } baseParts := strings.Split(basePath, "/") targParts := strings.Split(targPath, "/") // For the purposes of dotdot, the last element of // the paths are irrelevant. We save the last part // of the target path for later. lastElem := targParts[len(targParts)-1] baseParts = baseParts[0 : len(baseParts)-1] targParts = targParts[0 : len(targParts)-1] // Find the common prefix between the two paths: var i int for ; i < len(baseParts); i++ { if i >= len(targParts) || baseParts[i] != targParts[i] { break } } dotdotCount := len(baseParts) - i targOnly := targParts[i:] result := make([]string, 0, dotdotCount+len(targOnly)+1) for i := 0; i < dotdotCount; i++ { result = append(result, "..") } result = append(result, targOnly...) result = append(result, lastElem) final := strings.Join(result, "/") if final == "" { // If the final result is empty, the last element must // have been empty, so the target was slash terminated // and there were no previous elements, so "." // is appropriate. final = "." } return final, nil } // TODO(mhilton) This is not an ideal place for UnmarshalJSONResponse, // maybe it should be in httprequest somewhere? // UnmarshalJSONResponse unmarshals resp.Body into v. If errorF is not // nil and resp.StatusCode indicates an error has occured (>= 400) then // the result of calling errorF with resp is returned. func UnmarshalJSONResponse(resp *http.Response, v interface{}, errorF func(*http.Response) error) error { if errorF != nil && resp.StatusCode >= http.StatusBadRequest { return errgo.Mask(errorF(resp), errgo.Any) } mt, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) if err != nil { return errgo.Notef(err, "cannot parse content type") } if mt != "application/json" { return errgo.Newf("unexpected content type %q", mt) } body, err := ioutil.ReadAll(resp.Body) if err != nil { return errgo.Notef(err, "cannot read response body") } if err := json.Unmarshal(body, v); err != nil { return errgo.Notef(err, "cannot unmarshal response") } return nil } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/router/util_test.go0000664000175000017500000001531112672604603027074 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package router_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/router" import ( "encoding/json" "errors" "io/ioutil" "net/http" "net/url" "strings" jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/juju/charmstore.v5-unstable/internal/router" ) type utilSuite struct { jujutesting.LoggingSuite } var _ = gc.Suite(&utilSuite{}) var relativeURLTests = []struct { base string target string expect string expectError string }{{ expectError: "non-absolute base URL", }, { base: "/foo", expectError: "non-absolute target URL", }, { base: "foo", expectError: "non-absolute base URL", }, { base: "/foo", target: "foo", expectError: "non-absolute target URL", }, { base: "/foo", target: "/bar", expect: "bar", }, { base: "/foo/", target: "/bar", expect: "../bar", }, { base: "/bar", target: "/foo/", expect: "foo/", }, { base: "/foo/", target: "/bar/", expect: "../bar/", }, { base: "/foo/bar", target: "/bar/", expect: "../bar/", }, { base: "/foo/bar/", target: "/bar/", expect: "../../bar/", }, { base: "/foo/bar/baz", target: "/foo/targ", expect: "../targ", }, { base: "/foo/bar/baz/frob", target: "/foo/bar/one/two/", expect: "../one/two/", }, { base: "/foo/bar/baz/", target: "/foo/targ", expect: "../../targ", }, { base: "/foo/bar/baz/frob/", target: "/foo/bar/one/two/", expect: "../../one/two/", }, { base: "/foo/bar", target: "/foot/bar", expect: "../foot/bar", }, { base: "/foo/bar/baz/frob", target: "/foo/bar", expect: "../../bar", }, { base: "/foo/bar/baz/frob/", target: "/foo/bar", expect: "../../../bar", }, { base: "/foo/bar/baz/frob/", target: "/foo/bar/", expect: "../../", }, { base: "/foo/bar/baz", target: "/foo/bar/other", expect: "other", }, { base: "/foo/bar/", target: "/foo/bar/", expect: ".", }, { base: "/foo/bar", target: "/foo/bar", expect: "bar", }, { base: "/foo/bar/", target: "/foo/bar/", expect: ".", }, { base: "/foo/bar", target: "/foo/", expect: ".", }, { base: "/foo", target: "/", expect: ".", }, { base: "/foo/", target: "/", expect: "../", }, { base: "/foo/bar", target: "/", expect: "../", }, { base: "/foo/bar/", target: "/", expect: "../../", }} func (*utilSuite) TestRelativeURL(c *gc.C) { for i, test := range relativeURLTests { c.Logf("test %d: %q %q", i, test.base, test.target) // Sanity check the test itself. if test.expectError == "" { baseURL := &url.URL{Path: test.base} expectURL := &url.URL{Path: test.expect} targetURL := baseURL.ResolveReference(expectURL) c.Check(targetURL.Path, gc.Equals, test.target, gc.Commentf("resolve reference failure (%q + %q != %q)", test.base, test.expect, test.target)) } result, err := router.RelativeURLPath(test.base, test.target) if test.expectError != "" { c.Assert(err, gc.ErrorMatches, test.expectError) c.Assert(result, gc.Equals, "") } else { c.Assert(err, gc.IsNil) c.Check(result, gc.Equals, test.expect) } } } type errorReader struct { err error } func (e errorReader) Read([]byte) (int, error) { return 0, e.err } var unmarshalJSONResponseTests = []struct { about string resp *http.Response errorF func(*http.Response) error expectValue interface{} expectError string expectErrorCause error }{{ about: "unmarshal object", resp: &http.Response{ StatusCode: http.StatusOK, Header: http.Header{ "Content-Type": {"application/json"}, }, Body: ioutil.NopCloser(strings.NewReader(`"OK"`)), }, errorF: func(*http.Response) error { return errors.New("unexpected error") }, expectValue: "OK", }, { about: "error response with function", resp: &http.Response{ StatusCode: http.StatusBadRequest, Header: http.Header{ "Content-Type": {"application/json"}, }, Body: ioutil.NopCloser(strings.NewReader(`"OK"`)), }, errorF: func(*http.Response) error { return errors.New("expected error") }, expectError: "expected error", }, { about: "error response without function", resp: &http.Response{ StatusCode: http.StatusInternalServerError, Header: http.Header{ "Content-Type": {"application/json"}, }, Body: ioutil.NopCloser(strings.NewReader(`"OK"`)), }, expectValue: "OK", }, { about: "unparsable content type", resp: &http.Response{ StatusCode: http.StatusOK, Header: http.Header{ "Content-Type": {"application/"}, }, Body: ioutil.NopCloser(strings.NewReader(`"OK"`)), }, errorF: func(*http.Response) error { return errors.New("expected error") }, expectError: "cannot parse content type: mime: expected token after slash", }, { about: "wrong content type", resp: &http.Response{ StatusCode: http.StatusOK, Header: http.Header{ "Content-Type": {"text/plain"}, }, Body: ioutil.NopCloser(strings.NewReader(`"OK"`)), }, errorF: func(*http.Response) error { return errors.New("expected error") }, expectError: `unexpected content type "text/plain"`, }, { about: "read error", resp: &http.Response{ StatusCode: http.StatusOK, Header: http.Header{ "Content-Type": {"application/json"}, }, Body: ioutil.NopCloser(errorReader{errors.New("read error")}), }, errorF: func(*http.Response) error { return errors.New("unexpected error") }, expectError: `cannot read response body: read error`, }, { about: "read error", resp: &http.Response{ StatusCode: http.StatusOK, Header: http.Header{ "Content-Type": {"application/json"}, }, Body: ioutil.NopCloser(strings.NewReader(`"OK`)), }, errorF: func(*http.Response) error { return errors.New("unexpected error") }, expectError: `cannot unmarshal response: unexpected end of JSON input`, }, { about: "error with cause", resp: &http.Response{ StatusCode: http.StatusBadRequest, Header: http.Header{ "Content-Type": {"application/json"}, }, Body: ioutil.NopCloser(strings.NewReader(`"OK"`)), }, errorF: func(*http.Response) error { return errgo.WithCausef(nil, errors.New("expected error"), "an error message") }, expectError: "an error message", expectErrorCause: errors.New("expected error"), }} func (*utilSuite) TestUnmarshalJSONObject(c *gc.C) { for i, test := range unmarshalJSONResponseTests { c.Logf("%d. %s", i, test.about) var v json.RawMessage err := router.UnmarshalJSONResponse(test.resp, &v, test.errorF) if test.expectError != "" { c.Assert(err, gc.ErrorMatches, test.expectError) if test.expectErrorCause != nil { c.Assert(errgo.Cause(err), jc.DeepEquals, test.expectErrorCause) } continue } c.Assert(err, gc.IsNil) c.Assert(string(v), jc.JSONEquals, test.expectValue) } } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/router/singleinclude.go0000664000175000017500000000332212672604603027704 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package router // import "gopkg.in/juju/charmstore.v5-unstable/internal/router" import ( "encoding/json" "net/http" "net/url" "gopkg.in/errgo.v1" ) var _ BulkIncludeHandler = SingleIncludeHandler(nil) // SingleIncludeHandler implements BulkMetaHander for a non-batching // metadata retrieval function that can perform a GET only. type SingleIncludeHandler func(id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) // Key implements BulkMetadataHander.Key. func (h SingleIncludeHandler) Key() interface{} { // Use a local type so that we are guaranteed that nothing // other than SingleIncludeHandler can generate that key. type singleMetaHandlerKey struct{} return singleMetaHandlerKey(singleMetaHandlerKey{}) } // HandleGet implements BulkMetadataHander.HandleGet. func (h SingleIncludeHandler) HandleGet(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, flags url.Values, req *http.Request) ([]interface{}, error) { results := make([]interface{}, len(hs)) for i, h := range hs { h := h.(SingleIncludeHandler) result, err := h(id, paths[i], flags, req) if err != nil { // TODO(rog) include index of failed handler. return nil, errgo.Mask(err, errgo.Any) } results[i] = result } return results, nil } var errPutNotImplemented = errgo.New("PUT not implemented") // HandlePut implements BulkMetadataHander.HandlePut. func (h SingleIncludeHandler) HandlePut(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, values []*json.RawMessage, req *http.Request) []error { errs := make([]error, len(hs)) for i := range hs { errs[i] = errPutNotImplemented } return errs } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/router/package_test.go0000664000175000017500000000042712672604603027514 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package router_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/router" import ( "testing" gc "gopkg.in/check.v1" ) func TestPackage(t *testing.T) { gc.TestingT(t) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/router/router.go0000664000175000017500000007270012672604603026405 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. // The router package implements an HTTP request router for charm store // HTTP requests. package router // import "gopkg.in/juju/charmstore.v5-unstable/internal/router" import ( "encoding/json" "fmt" "net/http" "net/url" "reflect" "sort" "strings" "sync" "github.com/juju/httprequest" "github.com/juju/utils/parallel" "gopkg.in/errgo.v1" charm "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/juju/charmstore.v5-unstable/internal/series" ) // Implementation note on error handling: // // We use errgo.Any only when necessary, so that we can see at a glance // which are the possible places that could be returning an error with a // Cause (the only kind of error that can end up setting an HTTP status // code) // BulkIncludeHandler represents a metadata handler that can // handle multiple metadata "include" requests in a single batch. // // For simple metadata handlers that cannot be // efficiently combined, see SingleIncludeHandler. // // All handlers may assume that http.Request.ParseForm // has been called to parse the URL form values. type BulkIncludeHandler interface { // Key returns a value that will be used to group handlers // together in preparation for a call to HandleGet or HandlePut. // The key should be comparable for equality. // Please do not return NaN. That would be silly, OK? Key() interface{} // HandleGet returns the results of invoking all the given handlers // on the given charm or bundle id. Each result is held in // the respective element of the returned slice. // // All of the handlers' Keys will be equal to the receiving handler's // Key. // // Each item in paths holds the remaining metadata path // for the handler in the corresponding position // in hs after the prefix in Handlers.Meta has been stripped, // and flags holds all the URL query values. // // TODO(rog) document indexed errors. HandleGet(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, flags url.Values, req *http.Request) ([]interface{}, error) // HandlePut invokes a PUT request on all the given handlers on // the given charm or bundle id. If there is an error, the // returned errors slice should contain one element for each element // in paths. The error for handler hs[i] should be returned in errors[i]. // If there is no error, an empty slice should be returned. // // Each item in paths holds the remaining metadata path // for the handler in the corresponding position // in hs after the prefix in Handlers.Meta has been stripped, // and flags holds all the url query values. HandlePut(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, values []*json.RawMessage, req *http.Request) []error } // IdHandler handles a charm store request rooted at the given id. // The request path (req.URL.Path) holds the URL path after // the id has been stripped off. type IdHandler func(charmId *charm.URL, w http.ResponseWriter, req *http.Request) error // Handlers specifies how HTTP requests will be routed // by the router. All errors returned by the handlers will // be processed by WriteError with their Cause left intact. // This means that, for example, if they return an error // with a Cause that is params.ErrNotFound, the HTTP // status code will reflect that (assuming the error has // not been absorbed by the bulk metadata logic). type Handlers struct { // Global holds handlers for paths not matched by Meta or Id. // The map key is the path; the value is the handler that will // be used to handle that path. // // Path matching is by matched by longest-prefix - the same as // http.ServeMux. // // Note that, unlike http.ServeMux, the prefix is stripped // from the URL path before the hander is invoked, // matching the behaviour of the other handlers. Global map[string]http.Handler // Id holds handlers for paths which correspond to a single // charm or bundle id other than the meta path. The map key // holds the first element of the path, which may end in a // trailing slash (/) to indicate that longer paths are allowed // too. Id map[string]IdHandler // Meta holds metadata handlers for paths under the meta // endpoint. The map key holds the first element of the path, // which may end in a trailing slash (/) to indicate that longer // paths are allowed too. Meta map[string]BulkIncludeHandler } // Router represents a charm store HTTP request router. type Router struct { // Context holds context that the router was created with. Context Context handlers *Handlers handler http.Handler } // ResolvedURL represents a URL that has been resolved by resolveURL. type ResolvedURL struct { // URL holds the canonical URL for the entity, as used as a key into // the Entities collection. URL.User should always be non-empty // and URL.Revision should never be -1. URL.Series will only be non-empty // if the URL refers to a multi-series charm. URL charm.URL // PreferredSeries holds the series to return in PreferredURL // if URL itself contains no series. PreferredSeries string // PromulgatedRevision holds the revision of the promulgated version of the // charm or -1 if the corresponding entity is not promulgated. PromulgatedRevision int } // MustNewResolvedURL returns a new ResolvedURL by parsing // the entity URL in urlStr. The promulgatedRev parameter // specifies the value of PromulgatedRevision in the returned // value. // // This function panics if urlStr cannot be parsed as a charm.URL // or if it is not fully specified, including user and revision. func MustNewResolvedURL(urlStr string, promulgatedRev int) *ResolvedURL { url := mustParseURL(urlStr) if url.User == "" || url.Revision == -1 { panic(fmt.Errorf("incomplete url %v", urlStr)) } return &ResolvedURL{ URL: *url, PromulgatedRevision: promulgatedRev, } } // PreferredURL returns the promulgated URL for the given id if there is // one, otherwise it returns the non-promulgated URL. The returned // *charm.URL may be modified freely. // // If id.PreferredSeries is non-empty, the returns charm URL // will always have a non-empty series. func (id *ResolvedURL) PreferredURL() *charm.URL { u := id.URL if u.Series == "" && id.PreferredSeries != "" { u.Series = id.PreferredSeries } if id.PromulgatedRevision == -1 { return &u } u.User = "" u.Revision = id.PromulgatedRevision return &u } // PromulgatedURL returns the promulgated URL for id if there // is one, or nil otherwise. func (id *ResolvedURL) PromulgatedURL() *charm.URL { if id.PromulgatedRevision == -1 { return nil } u := id.URL u.User = "" u.Revision = id.PromulgatedRevision return &u } func (id *ResolvedURL) GoString() string { // Make the URL member visible as a string // rather than as a set of members. var gid = struct { URL string PreferredSeries string PromulgatedRevision int }{ URL: id.URL.String(), PreferredSeries: id.PreferredSeries, PromulgatedRevision: id.PromulgatedRevision, } return fmt.Sprintf("%#v", gid) } // String returns the preferred string representation of u. // It prefers to use the promulgated URL when there is one. func (u *ResolvedURL) String() string { return u.PreferredURL().String() } // Context provides contextual information for a router. type Context interface { // ResolveURL will be called to resolve ids in // router paths - it should return the fully // resolved URL corresponding to the given id. // If the entity referred to by the URL does not // exist, it should return an error with a params.ErrNotFound // cause. ResolveURL(id *charm.URL) (*ResolvedURL, error) // ResolveURLs is like ResolveURL but resolves multiple URLs // at the same time. The length of the returned slice should // be len(ids); any entities that are not found should be represented // by nil elements. ResolveURLs(ids []*charm.URL) ([]*ResolvedURL, error) // The AuthorizeEntity function will be called to authorize requests // to any BulkIncludeHandlers. All other handlers are expected // to handle their own authorization. AuthorizeEntity(id *ResolvedURL, req *http.Request) error // WillIncludeMetadata informs the context that the given metadata // includes will be required in the request. This allows the context // to prime any cache fetches to fetch this data when early // fetches which may not require the metadata are made. // This method should ignore any unrecognized names. WillIncludeMetadata(includes []string) } // New returns a charm store router that will route requests to // the given handlers and retrieve metadata from the given database. // // The Context argument provides additional context to the // router. Any errors returned by the context methods will // have their cause preserved when creating the error return // as for the handlers. func New( handlers *Handlers, ctxt Context, ) *Router { r := &Router{ handlers: handlers, Context: ctxt, } mux := NewServeMux() mux.Handle("/meta/", http.StripPrefix("/meta", HandleErrors(r.serveBulkMeta))) for path, handler := range r.handlers.Global { path = "/" + path prefix := strings.TrimSuffix(path, "/") mux.Handle(path, http.StripPrefix(prefix, handler)) } mux.Handle("/", HandleErrors(r.serveIds)) r.handler = mux return r } // ServeHTTP implements http.Handler.ServeHTTP. func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { // Allow cross-domain access from anywhere, including AJAX // requests. An AJAX request will add an X-Requested-With: // XMLHttpRequest header, which is a non-standard header, and // hence will require a pre-flight request, so we need to // specify that that header is allowed, and we also need to // implement the OPTIONS method so that the pre-flight request // can work. // See https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS header := w.Header() header.Set("Access-Control-Allow-Origin", "*") header.Set("Access-Control-Allow-Headers", "Bakery-Protocol-Version, Macaroons, X-Requested-With") header.Set("Access-Control-Allow-Credentials", "true") header.Set("Access-Control-Cache-Max-Age", "600") header.Set("Access-Control-Allow-Methods", "DELETE,GET,HEAD,PUT,POST,OPTIONS") header.Set("Access-Control-Expose-Headers", "WWW-Authenticate") if req.Method == "OPTIONS" { // We cheat here and say that all methods are allowed, // even though any individual endpoint will allow // only a subset of these. This means we can avoid // putting OPTIONS handling in every endpoint, // and it shouldn't actually matter in practice. header.Set("Allow", "DELETE,GET,HEAD,PUT,POST") header.Set("Access-Control-Allow-Origin", req.Header.Get("Origin")) return } if err := req.ParseForm(); err != nil { WriteError(w, errgo.Notef(err, "cannot parse form")) return } r.handler.ServeHTTP(w, req) } // Handlers returns the set of handlers that the router was created with. // This should not be changed. func (r *Router) Handlers() *Handlers { return r.handlers } // serveIds serves requests that may be rooted at a charm or bundle id. func (r *Router) serveIds(w http.ResponseWriter, req *http.Request) error { // We can ignore a trailing / because we do not return any // relative URLs. If we start to return relative URL redirects, // we will need to redirect non-slash-terminated URLs // to slash-terminated URLs. // http://cdivilly.wordpress.com/2014/03/11/why-trailing-slashes-on-uris-are-important/ path := strings.TrimSuffix(req.URL.Path, "/") url, path, err := splitId(path) if err != nil { return errgo.WithCausef(err, params.ErrNotFound, "") } key, path := handlerKey(path) if key == "" { return errgo.WithCausef(nil, params.ErrNotFound, "") } handler := r.handlers.Id[key] if handler != nil { req.URL.Path = path err := handler(url, w, req) // Note: preserve error cause from handlers. return errgo.Mask(err, errgo.Any) } if key != "meta/" && key != "meta" { return errgo.WithCausef(nil, params.ErrNotFound, params.ErrNotFound.Error()) } req.URL.Path = path return r.serveMeta(url, w, req) } func idHandlerNeedsResolveURL(req *http.Request) bool { return req.Method != "POST" && req.Method != "PUT" } // handlerKey returns a key that can be used to look up a handler at the // given path, and the remaining path elements. If there is no possible // key, the returned key is empty. func handlerKey(path string) (key, rest string) { path = strings.TrimPrefix(path, "/") key, i := splitPath(path, 0) if key == "" { // TODO what *should* we get if we GET just an id? return "", rest } if i < len(path)-1 { // There are more elements, so include the / character // that terminates the element. return path[0 : i+1], path[i:] } return key, "" } func (r *Router) serveMeta(id *charm.URL, w http.ResponseWriter, req *http.Request) error { switch req.Method { case "GET", "HEAD": r.willIncludeMetadata(req) rurl, err := r.Context.ResolveURL(id) if err != nil { // Note: preserve error cause from ResolveURL. return errgo.Mask(err, errgo.Any) } resp, err := r.serveMetaGet(rurl, req) if err != nil { // Note: preserve error causes from meta handlers. return errgo.Mask(err, errgo.Any) } httprequest.WriteJSON(w, http.StatusOK, resp) return nil case "PUT": rurl, err := r.Context.ResolveURL(id) if err != nil { // Note: preserve error cause from ResolveURL. return errgo.Mask(err, errgo.Any) } // Put requests don't return any data unless there's // an error. return r.serveMetaPut(rurl, req) } return params.ErrMethodNotAllowed } // willIncludeMetadata notifies the context about any metadata // that will probably be required by the request, so that initial // fetches (for example by ResolveURL) can fetch additional // data too. The request is assumed to be for a /meta request, // with the actual meta path in req.Path (e.g. /any, /metaname). func (r *Router) willIncludeMetadata(req *http.Request) { // We assume that any "include" attribute is an included metadata // specifier. This is perhaps arguable, but it's currently true, // including more fields can't do any harm and it's a simple rule. // Note that we must call this method before resolving the URL. includes := req.Form["include"] if path := strings.TrimPrefix(req.URL.Path, "/"); path != "" && path != "any" { includes = append(includes, path) } r.Context.WillIncludeMetadata(includes) } func (r *Router) serveMetaGet(rurl *ResolvedURL, req *http.Request) (interface{}, error) { // TODO: consider whether we might want the capability to // have different permissions for different meta endpoints. if err := r.Context.AuthorizeEntity(rurl, req); err != nil { return nil, errgo.Mask(err, errgo.Any) } key, path := handlerKey(req.URL.Path) if key == "" { // GET id/meta // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmeta return r.metaNames(), nil } if key == "any" { return r.serveMetaGetAny(rurl, req) } if handler := r.handlers.Meta[key]; handler != nil { results, err := handler.HandleGet([]BulkIncludeHandler{handler}, rurl, []string{path}, req.Form, req) if err != nil { // Note: preserve error cause from handlers. return nil, errgo.Mask(err, errgo.Any) } result := results[0] if isNull(result) { return nil, params.ErrMetadataNotFound } return results[0], nil } return nil, errgo.WithCausef(nil, params.ErrNotFound, "unknown metadata %q", strings.TrimPrefix(req.URL.Path, "/")) } // GET id/meta/any?[include=meta[&include=meta...]] // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaany func (r *Router) serveMetaGetAny(id *ResolvedURL, req *http.Request) (interface{}, error) { includes := req.Form["include"] if len(includes) == 0 { return params.MetaAnyResponse{Id: id.PreferredURL()}, nil } meta, err := r.GetMetadata(id, includes, req) if err != nil { // Note: preserve error cause from handlers. return nil, errgo.Mask(err, errgo.Any) } return params.MetaAnyResponse{ Id: id.PreferredURL(), Meta: meta, }, nil } const jsonContentType = "application/json" func unmarshalJSONBody(req *http.Request, val interface{}) error { if ct := req.Header.Get("Content-Type"); ct != jsonContentType { return errgo.WithCausef(nil, params.ErrBadRequest, "unexpected Content-Type %q; expected %q", ct, jsonContentType) } dec := json.NewDecoder(req.Body) if err := dec.Decode(val); err != nil { return errgo.Notef(err, "cannot unmarshal body") } return nil } // serveMetaPut serves a PUT request to the metadata for the given id. // The metadata to be put is in the request body. // PUT /$id/meta/... func (r *Router) serveMetaPut(id *ResolvedURL, req *http.Request) error { if err := r.Context.AuthorizeEntity(id, req); err != nil { return errgo.Mask(err, errgo.Any) } var body json.RawMessage if err := unmarshalJSONBody(req, &body); err != nil { return errgo.Mask(err, errgo.Is(params.ErrBadRequest)) } return r.serveMetaPutBody(id, req, &body) } // serveMetaPutBody serves a PUT request to the metadata for the given id. // The metadata to be put is in body. // This method is used both for individual metadata PUTs and // also bulk metadata PUTs. func (r *Router) serveMetaPutBody(id *ResolvedURL, req *http.Request, body *json.RawMessage) error { key, path := handlerKey(req.URL.Path) if key == "" { return params.ErrForbidden } if key == "any" { // PUT id/meta/any var bodyMeta struct { Meta map[string]*json.RawMessage } if err := json.Unmarshal(*body, &bodyMeta); err != nil { return errgo.Notef(err, "cannot unmarshal body") } if err := r.PutMetadata(id, bodyMeta.Meta, req); err != nil { return errgo.Mask(err, errgo.Any) } return nil } if handler := r.handlers.Meta[key]; handler != nil { errs := handler.HandlePut( []BulkIncludeHandler{handler}, id, []string{path}, []*json.RawMessage{body}, req, ) if len(errs) > 0 && errs[0] != nil { // Note: preserve error cause from handlers. return errgo.Mask(errs[0], errgo.Any) } return nil } return errgo.WithCausef(nil, params.ErrNotFound, "") } // isNull reports whether the given value will encode to // a null JSON value. func isNull(val interface{}) bool { if val == nil { return true } v := reflect.ValueOf(val) if kind := v.Kind(); kind != reflect.Map && kind != reflect.Ptr && kind != reflect.Slice { return false } return v.IsNil() } // metaNames returns a slice of all the metadata endpoint names. func (r *Router) metaNames() []string { names := make([]string, 0, len(r.handlers.Meta)) for name := range r.handlers.Meta { // Ensure that we don't generate duplicate entries // when there's an entry for both "x" and "x/". trimmed := strings.TrimSuffix(name, "/") if trimmed != name && r.handlers.Meta[trimmed] != nil { continue } names = append(names, trimmed) } sort.Strings(names) return names } // serveBulkMeta serves bulk metadata requests (requests to /meta/...). func (r *Router) serveBulkMeta(w http.ResponseWriter, req *http.Request) error { switch req.Method { case "GET", "HEAD": // A bare meta returns all endpoints. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#bulk-requests-and-missing-metadata if req.URL.Path == "/" || req.URL.Path == "" { httprequest.WriteJSON(w, http.StatusOK, r.metaNames()) return nil } resp, err := r.serveBulkMetaGet(req) if err != nil { return errgo.Mask(err, errgo.Any) } httprequest.WriteJSON(w, http.StatusOK, resp) return nil case "PUT": return r.serveBulkMetaPut(req) default: return params.ErrMethodNotAllowed } } // serveBulkMetaGet serves the "bulk" metadata retrieval endpoint // that can return information on several ids at once. // // GET meta/$endpoint?id=$id0[&id=$id1...][$otherflags] // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-metaendpoint func (r *Router) serveBulkMetaGet(req *http.Request) (interface{}, error) { ids := req.Form["id"] if len(ids) == 0 { return nil, errgo.WithCausef(nil, params.ErrBadRequest, "no ids specified in meta request") } delete(req.Form, "id") ignoreAuth, err := ParseBool(req.Form.Get("ignore-auth")) if err != nil { return nil, errgo.WithCausef(err, params.ErrBadRequest, "") } delete(req.Form, "ignore-auth") r.willIncludeMetadata(req) urls := make([]*charm.URL, len(ids)) for i, id := range ids { url, err := parseURL(id) if err != nil { return nil, errgo.WithCausef(err, params.ErrBadRequest, "") } urls[i] = url } rurls, err := r.Context.ResolveURLs(urls) if err != nil { // Note: preserve error cause from resolveURL. return nil, errgo.Mask(err, errgo.Any) } result := make(map[string]interface{}) for i, rurl := range rurls { if rurl == nil { // URLs not found will be omitted from the result. // https://github.com/juju/charmstore/blob/v4/docs/API.md#bulk-requests-and-missing-metadata continue } meta, err := r.serveMetaGet(rurl, req) if cause := errgo.Cause(err); cause == params.ErrNotFound || cause == params.ErrMetadataNotFound || (ignoreAuth && isAuthorizationError(cause)) { // The relevant data does not exist, or it is not public and client // asked not to authorize. // https://github.com/juju/charmstore/blob/v4/docs/API.md#bulk-requests-and-missing-metadata continue } if err != nil { return nil, errgo.Mask(err) } result[ids[i]] = meta } return result, nil } // ParseBool returns the boolean value represented by the string. // It accepts "1" or "0". Any other value returns an error. func ParseBool(value string) (bool, error) { switch value { case "0", "": return false, nil case "1": return true, nil } return false, errgo.Newf(`unexpected bool value %q (must be "0" or "1")`, value) } // isAuthorizationError reports whether the given error cause is an // authorization error. func isAuthorizationError(cause error) bool { if cause == params.ErrUnauthorized { return true } _, ok := cause.(*httpbakery.Error) return ok } // serveBulkMetaPut serves a bulk PUT request to several ids. // PUT /meta/$endpoint // See https://github.com/juju/charmstore/blob/v4/docs/API.md#put-metaendpoint func (r *Router) serveBulkMetaPut(req *http.Request) error { if len(req.Form["id"]) > 0 { return fmt.Errorf("ids may not be specified in meta PUT request") } var ids map[string]*json.RawMessage if err := unmarshalJSONBody(req, &ids); err != nil { return errgo.Mask(err, errgo.Is(params.ErrBadRequest)) } var multiErr multiError for id, val := range ids { if err := r.serveBulkMetaPutOne(req, id, val); err != nil { if multiErr == nil { multiErr = make(multiError) } multiErr[id] = errgo.Mask(err, errgo.Any) } } if len(multiErr) != 0 { return multiErr } return nil } // serveBulkMetaPutOne serves a PUT to a single id as part of a bulk PUT // request. It's in a separate function to make the error handling easier. func (r *Router) serveBulkMetaPutOne(req *http.Request, id string, val *json.RawMessage) error { url, err := parseURL(id) if err != nil { return errgo.Mask(err) } rurl, err := r.Context.ResolveURL(url) if err != nil { // Note: preserve error cause from resolveURL. return errgo.Mask(err, errgo.Any) } if err := r.Context.AuthorizeEntity(rurl, req); err != nil { return errgo.Mask(err, errgo.Any) } if err := r.serveMetaPutBody(rurl, req, val); err != nil { return errgo.Mask(err, errgo.Any) } return nil } // MetaHandler returns the meta handler for the given meta // path by looking it up in the Meta map. func (r *Router) MetaHandler(metaPath string) BulkIncludeHandler { key, _ := handlerKey(metaPath) return r.handlers.Meta[key] } // maxMetadataConcurrency specifies the maximum number // of goroutines started to service a given GetMetadata request. // 5 is enough to more that cover the number of metadata // group handlers in the current API. const maxMetadataConcurrency = 5 // GetMetadata retrieves metadata for the given charm or bundle id, // including information as specified by the includes slice. func (r *Router) GetMetadata(id *ResolvedURL, includes []string, req *http.Request) (map[string]interface{}, error) { groups := make(map[interface{}][]BulkIncludeHandler) includesByGroup := make(map[interface{}][]string) for _, include := range includes { handler := r.MetaHandler(include) if handler == nil { return nil, errgo.Newf("unrecognized metadata name %q", include) } // Get the key that lets us group this handler into the // correct bulk group. key := handler.Key() groups[key] = append(groups[key], handler) includesByGroup[key] = append(includesByGroup[key], include) } results := make(map[string]interface{}) // TODO when the number of groups is 1 (a common case, // using parallel.NewRun is actually slowing things down // by creating a goroutine). We could optimise it so that // it doesn't actually create a goroutine in that case. run := parallel.NewRun(maxMetadataConcurrency) var mu sync.Mutex for _, g := range groups { g := g run.Do(func() error { // We know that we must have at least one element in the // slice here. We could use any member of the slice to // actually handle the request, so arbitrarily choose // g[0]. Note that g[0].Key() is equal to g[i].Key() for // every i in the slice. groupIncludes := includesByGroup[g[0].Key()] // Paths contains all the path elements after // the handler key has been stripped off. // TODO(rog) BUG shouldn't this be len(groupIncludes) ? paths := make([]string, len(g)) for i, include := range groupIncludes { _, paths[i] = handlerKey(include) } groupResults, err := g[0].HandleGet(g, id, paths, nil, req) if err != nil { // TODO(rog) if it's a BulkError, attach // the original include path to error (the BulkError // should contain the index of the failed one). return errgo.Mask(err, errgo.Any) } mu.Lock() for i, result := range groupResults { // Omit nil results from map. Note: omit statically typed // nil results too to make it easy for handlers to return // possibly nil data with a static type. // https://github.com/juju/charmstore/blob/v4/docs/API.md#bulk-requests-and-missing-metadata if !isNull(result) { results[groupIncludes[i]] = result } } mu.Unlock() return nil }) } if err := run.Wait(); err != nil { // We could have got multiple errors, but we'll only return one of them. return nil, errgo.Mask(err.(parallel.Errors)[0], errgo.Any) } return results, nil } // PutMetadata puts metadata for the given id. Each key in data holds // the name of a metadata endpoint; its associated value // holds the value to be written. func (r *Router) PutMetadata(id *ResolvedURL, data map[string]*json.RawMessage, req *http.Request) error { groups := make(map[interface{}][]BulkIncludeHandler) valuesByGroup := make(map[interface{}][]*json.RawMessage) pathsByGroup := make(map[interface{}][]string) for path, body := range data { handler := r.MetaHandler(path) if handler == nil { return errgo.Newf("unrecognized metadata name %q", path) } // Get the key that lets us group this handler into the // correct bulk group. key := handler.Key() groups[key] = append(groups[key], handler) valuesByGroup[key] = append(valuesByGroup[key], body) // Paths contains all the path elements after // the handler key has been stripped off. pathsByGroup[key] = append(pathsByGroup[key], path) } var multiErr multiError for _, g := range groups { // We know that we must have at least one element in the // slice here. We could use any member of the slice to // actually handle the request, so arbitrarily choose // g[0]. Note that g[0].Key() is equal to g[i].Key() for // every i in the slice. key := g[0].Key() paths := pathsByGroup[key] // The paths passed to the handler contain all the path elements // after the handler key has been stripped off. strippedPaths := make([]string, len(paths)) for i, path := range paths { _, strippedPaths[i] = handlerKey(path) } errs := g[0].HandlePut(g, id, strippedPaths, valuesByGroup[key], req) if len(errs) > 0 { if multiErr == nil { multiErr = make(multiError) } if len(errs) != len(paths) { return fmt.Errorf("unexpected error count; expected %d, got %q", len(paths), errs) } for i, err := range errs { if err != nil { multiErr[paths[i]] = err } } } } if len(multiErr) != 0 { return multiErr } return nil } // splitPath returns the first path element // after path[i:] and the start of the next // element. // // For example, splitPath("/foo/bar/bzr", 4) returns ("bar", 8). func splitPath(path string, i int) (elem string, nextIndex int) { if i < len(path) && path[i] == '/' { i++ } j := strings.Index(path[i:], "/") if j == -1 { return path[i:], len(path) } j += i return path[i:j], j } // splitId splits the given URL path into a charm or bundle // URL and the rest of the path. func splitId(path string) (url *charm.URL, rest string, err error) { path = strings.TrimPrefix(path, "/") part, i := splitPath(path, 0) // Skip ~. if strings.HasPrefix(part, "~") { part, i = splitPath(path, i) } // Skip series. if _, ok := series.Series[part]; ok { part, i = splitPath(path, i) } // part should now contain the charm name, // and path[0:i] should contain the entire // charm id. urlStr := strings.TrimSuffix(path[0:i], "/") url, err = parseURL(urlStr) if err != nil { return nil, "", errgo.Mask(err) } return url, path[i:], nil } func mustParseURL(s string) *charm.URL { u, err := parseURL(s) if err != nil { panic(err) } return u } func parseURL(s string) (*charm.URL, error) { u, err := charm.ParseURL(s) if err != nil { return nil, err } if u.Channel != "" { return nil, errgo.Newf("charmstore ids must not contain a channel") } return u, nil } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/router/fieldinclude.go0000664000175000017500000001421312672604603027507 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package router // import "gopkg.in/juju/charmstore.v5-unstable/internal/router" import ( "encoding/json" "net/http" "net/url" "gopkg.in/errgo.v1" "gopkg.in/juju/charmstore.v5-unstable/audit" ) // A FieldQueryFunc is used to retrieve a metadata document for the given URL, // selecting only those fields specified in keys of the given selector. type FieldQueryFunc func(id *ResolvedURL, selector map[string]int, req *http.Request) (interface{}, error) // FieldUpdater records field changes made by a FieldUpdateFunc. type FieldUpdater struct { fields map[string]interface{} entries []audit.Entry search bool } // UpdateField requests that the provided field is updated with // the given value. func (u *FieldUpdater) UpdateField(fieldName string, val interface{}, entry *audit.Entry) { u.fields[fieldName] = val if entry != nil { u.entries = append(u.entries, *entry) } } // UpdateSearch requests that search records are updated. func (u *FieldUpdater) UpdateSearch() { u.search = true } // A FieldUpdateFunc is used to update a metadata document for the // given id. For each field in fields, it should set that field to // its corresponding value in the metadata document. type FieldUpdateFunc func(id *ResolvedURL, fields map[string]interface{}, entries []audit.Entry) error // A FieldUpdateSearchFunc is used to update a search document for the // given id. For each field in fields, it should set that field to // its corresponding value in the search document. type FieldUpdateSearchFunc func(id *ResolvedURL, fields map[string]interface{}) error // A FieldGetFunc returns some data from the given document. The // document will have been returned from an earlier call to the // associated QueryFunc. type FieldGetFunc func(doc interface{}, id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) // FieldPutFunc sets using the given FieldUpdater corresponding to fields to be set // in the metadata document for the given id. The path holds the metadata path // after the initial prefix has been removed. type FieldPutFunc func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error // FieldIncludeHandlerParams specifies the parameters for NewFieldIncludeHandler. type FieldIncludeHandlerParams struct { // Key is used to group together similar FieldIncludeHandlers // (the same query should be generated for any given key). Key interface{} // Query is used to retrieve the document from the database for // GET requests. The fields passed to the query will be the // union of all fields found in all the handlers in the bulk // request. Query FieldQueryFunc // Fields specifies which fields are required by the given handler. Fields []string // Handle actually returns the data from the document retrieved // by Query, for GET requests. HandleGet FieldGetFunc // HandlePut generates update operations for a PUT // operation. HandlePut FieldPutFunc // Update is used to update the document in the database for // PUT requests. Update FieldUpdateFunc // UpdateSearch is used to update the document in the search // database for PUT requests. UpdateSearch FieldUpdateSearchFunc } // FieldIncludeHandler implements BulkIncludeHandler by // making a single request with a number of aggregated fields. type FieldIncludeHandler struct { P FieldIncludeHandlerParams } // NewFieldIncludeHandler returns a BulkIncludeHandler that will perform // only a single database query for several requests. See FieldIncludeHandlerParams // for more detail. // // See in ../v4/api.go for an example of its use. func NewFieldIncludeHandler(p FieldIncludeHandlerParams) *FieldIncludeHandler { return &FieldIncludeHandler{p} } // Key implements BulkIncludeHandler.Key. func (h *FieldIncludeHandler) Key() interface{} { return h.P.Key } // HandlePut implements BulkIncludeHandler.HandlePut. func (h *FieldIncludeHandler) HandlePut(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, values []*json.RawMessage, req *http.Request) []error { updater := &FieldUpdater{ fields: make(map[string]interface{}), entries: make([]audit.Entry, 0), } var errs []error errCount := 0 setError := func(i int, err error) { if errs == nil { errs = make([]error, len(hs)) } if errs[i] == nil { errs[i] = err errCount++ } } for i, h := range hs { h := h.(*FieldIncludeHandler) if h.P.HandlePut == nil { setError(i, errgo.New("PUT not supported")) continue } if err := h.P.HandlePut(id, paths[i], values[i], updater, req); err != nil { setError(i, errgo.Mask(err, errgo.Any)) } } if errCount == len(hs) { // Every HandlePut request has drawn an error, // no need to call Update. return errs } if err := h.P.Update(id, updater.fields, updater.entries); err != nil { for i := range hs { setError(i, err) } } if updater.search { if err := h.P.UpdateSearch(id, updater.fields); err != nil { for i := range hs { setError(i, err) } } } return errs } // HandleGet implements BulkIncludeHandler.HandleGet. func (h *FieldIncludeHandler) HandleGet(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, flags url.Values, req *http.Request) ([]interface{}, error) { funcs := make([]FieldGetFunc, len(hs)) selector := make(map[string]int) // Extract the handler functions and union all the fields. for i, h := range hs { h := h.(*FieldIncludeHandler) funcs[i] = h.P.HandleGet for _, field := range h.P.Fields { selector[field] = 1 } } // Make the single query. doc, err := h.P.Query(id, selector, req) if err != nil { // Note: preserve error cause from handlers. return nil, errgo.Mask(err, errgo.Any) } // Call all the handlers with the resulting query document. results := make([]interface{}, len(hs)) for i, f := range funcs { var err error results[i], err = f(doc, id, paths[i], flags, req) if err != nil { // TODO correlate error with handler (perhaps return // an error that identifies the slice position of the handler that // failed). // Note: preserve error cause from handlers. return nil, errgo.Mask(err, errgo.Any) } } return results, nil } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/0000775000175000017500000000000012672604603023542 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/content_test.go0000664000175000017500000003574012672604603026613 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( "bytes" "fmt" "io" "io/ioutil" "net/http" "path/filepath" "sort" "strings" jc "github.com/juju/testing/checkers" "github.com/juju/testing/httptesting" "github.com/juju/xml" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" "gopkg.in/juju/charmstore.v5-unstable/internal/v5" ) var serveDiagramErrorsTests = []struct { about string url string expectStatus int expectBody interface{} }{{ about: "entity not found", url: "~charmers/bundle/foo-23/diagram.svg", expectStatus: http.StatusNotFound, expectBody: params.Error{ Code: params.ErrNotFound, Message: `no matching charm or bundle for cs:~charmers/bundle/foo-23`, }, }, { about: "diagram for a charm", url: "~charmers/wordpress/diagram.svg", expectStatus: http.StatusNotFound, expectBody: params.Error{ Code: params.ErrNotFound, Message: "diagrams not supported for charms", }, }} func (s *APISuite) TestServeDiagramErrors(c *gc.C) { id := newResolvedURL("cs:~charmers/trusty/wordpress-42", 42) s.addPublicCharmFromRepo(c, "wordpress", id) id = newResolvedURL("cs:~charmers/bundle/nopositionbundle-42", 42) s.addPublicBundleFromRepo(c, "wordpress-simple", id, true) for i, test := range serveDiagramErrorsTests { c.Logf("test %d: %s", i, test.about) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(test.url), ExpectStatus: test.expectStatus, ExpectBody: test.expectBody, }) } } func (s *APISuite) TestServeDiagram(c *gc.C) { bundle := storetesting.NewBundle(&charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "wordpress": { Charm: "wordpress", Annotations: map[string]string{ "gui-x": "100", "gui-y": "200", }, }, "mysql": { Charm: "utopic/mysql-23", Annotations: map[string]string{ "gui-x": "200", "gui-y": "200", }, }, }, }, ) url := newResolvedURL("cs:~charmers/bundle/wordpressbundle-42", 42) s.addRequiredCharms(c, bundle) err := s.store.AddBundleWithArchive(url, bundle) c.Assert(err, gc.IsNil) s.setPublic(c, url) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("bundle/wordpressbundle/diagram.svg"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %q", rec.Body.Bytes())) c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") assertCacheControl(c, rec.Header(), true) // Check that the output contains valid XML with an SVG tag, // but don't check the details of the output so that this test doesn't // break every time the jujusvg presentation changes. // Also check that we get an image for each service containing the charm // icon link. assertXMLContains(c, rec.Body.Bytes(), map[string]func(xml.Token) bool{ "svg element": isStartElementWithName("svg"), "wordpress icon": isStartElementWithAttr("image", "href", "../../wordpress/icon.svg"), "mysql icon": isStartElementWithAttr("image", "href", "../../utopic/mysql-23/icon.svg"), }) // Do the same check again, but with the short form of the id; // the relative links should change accordingly. rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("wordpressbundle/diagram.svg"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %q", rec.Body.Bytes())) // Check that the output contains valid XML with an SVG tag, // but don't check the details of the output so that this test doesn't // break every time the jujusvg presentation changes. // Also check that we get an image for each service containing the charm // icon link. assertXMLContains(c, rec.Body.Bytes(), map[string]func(xml.Token) bool{ "svg element": isStartElementWithName("svg"), "wordpress icon": isStartElementWithAttr("image", "href", "../wordpress/icon.svg"), "mysql icon": isStartElementWithAttr("image", "href", "../utopic/mysql-23/icon.svg"), }) } func (s *APISuite) TestServeDiagramNoPosition(c *gc.C) { bundle := storetesting.NewBundle( &charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "wordpress": { Charm: "wordpress", }, "mysql": { Charm: "utopic/mysql-23", Annotations: map[string]string{ "gui-x": "200", "gui-y": "200", }, }, }, }) url := newResolvedURL("cs:~charmers/bundle/wordpressbundle-42", 42) s.addRequiredCharms(c, bundle) err := s.store.AddBundleWithArchive(url, bundle) c.Assert(err, gc.IsNil) s.setPublic(c, url) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("bundle/wordpressbundle/diagram.svg"), }) // Check that the request succeeds and has the expected content type. c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %q", rec.Body.Bytes())) c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") } var serveReadMeTests = []struct { name string expectNotFound bool }{{ name: "README.md", }, { name: "README.rst", }, { name: "readme", }, { name: "README", }, { name: "ReadMe.Txt", }, { name: "README.ex", }, { name: "", expectNotFound: true, }, { name: "readme-youtube-subscribe.html", expectNotFound: true, }, { name: "readme Dutch.txt", expectNotFound: true, }, { name: "readme Dutch.txt", expectNotFound: true, }, { name: "README.debugging", expectNotFound: true, }} func (s *APISuite) TestServeReadMe(c *gc.C) { url := newResolvedURL("cs:~charmers/precise/wordpress-0", -1) for i, test := range serveReadMeTests { c.Logf("test %d: %s", i, test.name) wordpress := storetesting.Charms.ClonedDir(c.MkDir(), "wordpress") content := fmt.Sprintf("some content %d", i) if test.name != "" { err := ioutil.WriteFile(filepath.Join(wordpress.Path, test.name), []byte(content), 0666) c.Assert(err, gc.IsNil) } url.URL.Revision = i s.addPublicCharm(c, wordpress, url) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(url.URL.Path() + "/readme"), }) if test.expectNotFound { c.Assert(rec.Code, gc.Equals, http.StatusNotFound) c.Assert(rec.Body.String(), jc.JSONEquals, params.Error{ Code: params.ErrNotFound, Message: "not found", }) } else { c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Body.String(), gc.DeepEquals, content) assertCacheControl(c, rec.Header(), true) } } } func charmWithExtraFile(c *gc.C, name, file, content string) *charm.CharmDir { ch := storetesting.Charms.ClonedDir(c.MkDir(), name) err := ioutil.WriteFile(filepath.Join(ch.Path, file), []byte(content), 0666) c.Assert(err, gc.IsNil) return ch } func (s *APISuite) TestServeIcon(c *gc.C) { content := `an icon, really` expected := `an icon, really` wordpress := charmWithExtraFile(c, "wordpress", "icon.svg", content) url := newResolvedURL("cs:~charmers/precise/wordpress-0", -1) err := s.store.AddCharmWithArchive(url, wordpress) c.Assert(err, gc.IsNil) s.setPublic(c, url) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(url.URL.Path() + "/icon.svg"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Body.String(), gc.Equals, expected) c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") assertCacheControl(c, rec.Header(), true) // Test with revision -1 noRevURL := url.URL noRevURL.Revision = -1 rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(noRevURL.Path() + "/icon.svg"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Body.String(), gc.Equals, expected) c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") assertCacheControl(c, rec.Header(), true) // Reload the charm with an icon that already has viewBox. wordpress = storetesting.Charms.ClonedDir(c.MkDir(), "wordpress") err = ioutil.WriteFile(filepath.Join(wordpress.Path, "icon.svg"), []byte(expected), 0666) c.Assert(err, gc.IsNil) url.URL.Revision++ err = s.store.AddCharmWithArchive(url, wordpress) c.Assert(err, gc.IsNil) s.setPublic(c, url) // Check that we still get expected svg. rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(url.URL.Path() + "/icon.svg"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Body.String(), gc.Equals, expected) c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") } func (s *APISuite) TestServeBundleIcon(c *gc.C) { s.addPublicBundleFromRepo(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/something-32", 32), true) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("~charmers/bundle/something-32/icon.svg"), ExpectStatus: http.StatusNotFound, ExpectBody: params.Error{ Code: params.ErrNotFound, Message: "icons not supported for bundles", }, }) } func (s *APISuite) TestServeDefaultIcon(c *gc.C) { wordpress := storetesting.Charms.ClonedDir(c.MkDir(), "wordpress") url := newResolvedURL("cs:~charmers/precise/wordpress-0", 0) s.addPublicCharm(c, wordpress, url) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(url.URL.Path() + "/icon.svg"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Body.String(), gc.Equals, v5.DefaultIcon) c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") assertCacheControl(c, rec.Header(), true) } func (s *APISuite) TestServeDefaultIconForBadXML(c *gc.C) { for i, content := range []string{ "\x89\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44", // Technically this XML is not bad - we just can't parse it because // it's got internally defined character entities. Nonetheless, we treat // it as "bad" for the time being. cloudfoundrySVG, } { wordpress := charmWithExtraFile(c, "wordpress", "icon.svg", content) url := newResolvedURL("cs:~charmers/precise/wordpress-0", -1) url.URL.Revision = i s.addPublicCharm(c, wordpress, url) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(url.URL.Path() + "/icon.svg"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Body.String(), gc.Equals, v5.DefaultIcon) c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") assertCacheControl(c, rec.Header(), true) } } func (s *APISuite) TestProcessIconWorksOnDefaultIcon(c *gc.C) { var buf bytes.Buffer err := v5.ProcessIcon(&buf, strings.NewReader(v5.DefaultIcon)) c.Assert(err, gc.IsNil) assertXMLEqual(c, buf.Bytes(), []byte(v5.DefaultIcon)) } func (s *APISuite) TestProcessIconDoesNotQuoteNewlines(c *gc.C) { // Note: this is important because Chrome does not like // to see before the opening tag. icon := ` ` var buf bytes.Buffer err := v5.ProcessIcon(&buf, strings.NewReader(icon)) c.Assert(err, gc.IsNil) if strings.Contains(buf.String(), "&#x") { c.Errorf("newlines were quoted in processed icon output") } } // assertXMLEqual assers that the xml contained in the // two slices is equal, without caring about namespace // declarations or attribute ordering. func assertXMLEqual(c *gc.C, body []byte, expect []byte) { decBody := xml.NewDecoder(bytes.NewReader(body)) decExpect := xml.NewDecoder(bytes.NewReader(expect)) for i := 0; ; i++ { tok0, err0 := decBody.Token() tok1, err1 := decExpect.Token() if err1 != nil { c.Assert(err0, gc.NotNil) c.Assert(err0.Error(), gc.Equals, err1.Error()) break } ok, err := tokenEqual(tok0, tok1) if !ok { c.Logf("got %#v", tok0) c.Logf("want %#v", tok1) c.Fatalf("mismatch at token %d: %v", i, err) } } } func tokenEqual(tok0, tok1 xml.Token) (bool, error) { tok0 = canonicalXMLToken(tok0) tok1 = canonicalXMLToken(tok1) return jc.DeepEqual(tok0, tok1) } func canonicalXMLToken(tok xml.Token) xml.Token { start, ok := tok.(xml.StartElement) if !ok { return tok } // Remove all namespace-defining attributes. j := 0 for _, attr := range start.Attr { if attr.Name.Local == "xmlns" && attr.Name.Space == "" || attr.Name.Space == "xmlns" { continue } start.Attr[j] = attr j++ } start.Attr = start.Attr[0:j] sort.Sort(attrByName(start.Attr)) return start } type attrByName []xml.Attr func (a attrByName) Len() int { return len(a) } func (a attrByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a attrByName) Less(i, j int) bool { if a[i].Name.Space != a[j].Name.Space { return a[i].Name.Space < a[j].Name.Space } return a[i].Name.Local < a[j].Name.Local } // assertXMLContains asserts that the XML in body is well formed, and // contains at least one token that satisfies each of the functions in need. func assertXMLContains(c *gc.C, body []byte, need map[string]func(xml.Token) bool) { dec := xml.NewDecoder(bytes.NewReader(body)) for { tok, err := dec.Token() if err == io.EOF { break } c.Assert(err, gc.IsNil) for what, f := range need { if f(tok) { delete(need, what) } } } c.Assert(need, gc.HasLen, 0, gc.Commentf("body:\n%s", body)) } func isStartElementWithName(name string) func(xml.Token) bool { return func(tok xml.Token) bool { startElem, ok := tok.(xml.StartElement) return ok && startElem.Name.Local == name } } func isStartElementWithAttr(name, attr, val string) func(xml.Token) bool { return func(tok xml.Token) bool { startElem, ok := tok.(xml.StartElement) if !ok { return false } for _, a := range startElem.Attr { if a.Name.Local == attr && a.Value == val { return true } } return false } } const cloudfoundrySVG = ` ]> content omitted ` ��������������������������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/archive_test.go��������������������0000664�0001750�0001750�00000155004�12672604603�026556� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( "archive/zip" "bytes" "crypto/sha256" "encoding/base64" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/http/httptest" "net/url" "os" "sort" "strconv" "strings" "sync" "time" jc "github.com/juju/testing/checkers" "github.com/juju/testing/httptesting" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" charmtesting "gopkg.in/juju/charmrepo.v2-unstable/testing" "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/macaroon.v1" "gopkg.in/mgo.v2/bson" "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" "gopkg.in/juju/charmstore.v5-unstable/internal/router" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/stats" "gopkg.in/juju/charmstore.v5-unstable/internal/v5" ) type commonArchiveSuite struct { commonSuite } type ArchiveSuite struct { commonArchiveSuite } var _ = gc.Suite(&ArchiveSuite{}) func (s *ArchiveSuite) SetUpSuite(c *gc.C) { s.enableIdentity = true s.commonSuite.SetUpSuite(c) } func (s *ArchiveSuite) TestGetCharmWithTerms(c *gc.C) { client := httpbakery.NewHTTPClient() id := newResolvedURL("cs:~charmers/precise/terms-0", -1) s.addPublicCharm(c, storetesting.NewCharm(&charm.Meta{ Terms: []string{"terms-1/1", "terms-2/5"}, }), id) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("~charmers/precise/terms-0/archive"), Do: bakeryDo(client), }) c.Assert(rec.Code, gc.Equals, http.StatusUnauthorized) } func (s *ArchiveSuite) TestGet(c *gc.C) { id := newResolvedURL("cs:~charmers/precise/wordpress-0", -1) ch := storetesting.NewCharm(nil) s.addPublicCharm(c, ch, id) rec := s.assertArchiveDownload( c, "~charmers/precise/wordpress-0", nil, ch.Bytes(), ) c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, "cs:~charmers/precise/wordpress-0") assertCacheControl(c, rec.Header(), true) // Check that the HTTP range logic is plugged in OK. If this // is working, we assume that the whole thing is working OK, // as net/http is well-tested. rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("~charmers/precise/wordpress-0/archive"), Header: http.Header{"Range": {"bytes=10-100"}}, }) c.Assert(rec.Code, gc.Equals, http.StatusPartialContent, gc.Commentf("body: %q", rec.Body.Bytes())) c.Assert(rec.Body.Bytes(), gc.HasLen, 100-10+1) c.Assert(rec.Body.Bytes(), gc.DeepEquals, ch.Bytes()[10:101]) c.Assert(rec.Header().Get(params.ContentHashHeader), gc.Equals, hashOfBytes(ch.Bytes())) c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, "cs:~charmers/precise/wordpress-0") assertCacheControl(c, rec.Header(), true) } func (s *ArchiveSuite) TestGetWithPartialId(c *gc.C) { id := newResolvedURL("cs:~charmers/precise/wordpress-0", -1) ch := storetesting.NewCharm(nil) s.addPublicCharm(c, ch, id) rec := s.assertArchiveDownload( c, "~charmers/wordpress", nil, ch.Bytes(), ) // The complete entity id can be retrieved from the response header. c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, id.URL.String()) } func (s *ArchiveSuite) TestGetPromulgatedWithPartialId(c *gc.C) { id := newResolvedURL("cs:~charmers/utopic/wordpress-42", 42) ch := storetesting.NewCharm(nil) s.addPublicCharm(c, ch, id) rec := s.assertArchiveDownload( c, "wordpress", nil, ch.Bytes(), ) c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, id.PromulgatedURL().String()) } func (s *ArchiveSuite) TestGetCounters(c *gc.C) { if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } for i, id := range []*router.ResolvedURL{ newResolvedURL("~who/utopic/mysql-42", 42), } { c.Logf("test %d: %s", i, id) ch := storetesting.NewCharm(nil) s.addPublicCharm(c, ch, id) // Download the charm archive using the API, which should increment // the download counts. s.assertArchiveDownload( c, id.URL.Path(), nil, ch.Bytes(), ) // Check that the downloads count for the entity has been updated. key := []string{params.StatsArchiveDownload, "utopic", "mysql", id.URL.User, "42"} stats.CheckCounterSum(c, s.store, key, false, 1) // Check that the promulgated download count for the entity has also been updated key = []string{params.StatsArchiveDownloadPromulgated, "utopic", "mysql", "", "42"} stats.CheckCounterSum(c, s.store, key, false, 1) } } func (s *ArchiveSuite) TestGetCountersDisabled(c *gc.C) { id := newResolvedURL("~charmers/utopic/mysql-42", 42) ch := storetesting.NewCharm(nil) s.addPublicCharm(c, ch, id) // Download the charm archive using the API, passing stats=0. s.assertArchiveDownload( c, "", &httptesting.DoRequestParams{URL: storeURL("~charmers/utopic/mysql-42/archive?stats=0")}, ch.Bytes(), ) // Check that the downloads count for the entity has not been updated. key := []string{params.StatsArchiveDownload, "utopic", "mysql", "", "42"} stats.CheckCounterSum(c, s.store, key, false, 0) } var archivePostErrorsTests = []struct { about string url string noContentLength bool noHash bool entity charmstore.ArchiverTo expectStatus int expectMessage string expectCode params.ErrorCode }{{ about: "revision specified", url: "~charmers/precise/wordpress-23", expectStatus: http.StatusBadRequest, expectMessage: "revision specified, but should not be specified", expectCode: params.ErrBadRequest, }, { about: "no hash given", url: "~charmers/precise/wordpress", noHash: true, expectStatus: http.StatusBadRequest, expectMessage: "hash parameter not specified", expectCode: params.ErrBadRequest, }, { about: "no content length", url: "~charmers/precise/wordpress", noContentLength: true, expectStatus: http.StatusBadRequest, expectMessage: "Content-Length not specified", expectCode: params.ErrBadRequest, }, { about: "invalid channel", url: "~charmers/bad-wolf/trusty/wordpress", expectStatus: http.StatusNotFound, expectMessage: "not found", expectCode: params.ErrNotFound, }, { about: "no series", url: "~charmers/juju-gui", expectStatus: http.StatusForbidden, expectMessage: "series not specified in url or charm metadata", expectCode: params.ErrEntityIdNotAllowed, }, { about: "url series not in metadata", url: "~charmers/precise/juju-gui", entity: storetesting.NewCharm(&charm.Meta{ Series: []string{"trusty"}, }), expectStatus: http.StatusForbidden, expectMessage: `"precise" series not listed in charm metadata`, expectCode: params.ErrEntityIdNotAllowed, }, { about: "bad combination of series", url: "~charmers/juju-gui", entity: storetesting.NewCharm(&charm.Meta{ Series: []string{"precise", "win10"}, }), expectStatus: http.StatusBadRequest, expectMessage: `cannot mix series from ubuntu and windows in single charm`, expectCode: params.ErrInvalidEntity, }, { about: "unknown series", url: "~charmers/juju-gui", entity: storetesting.NewCharm(&charm.Meta{ Series: []string{"precise", "nosuchseries"}, }), expectStatus: http.StatusBadRequest, expectMessage: `unrecognized series "nosuchseries" in metadata`, expectCode: params.ErrInvalidEntity, }} func (s *ArchiveSuite) TestPostErrors(c *gc.C) { type exoticReader struct { io.Reader } for i, test := range archivePostErrorsTests { c.Logf("test %d: %s", i, test.about) if test.entity == nil { test.entity = storetesting.NewCharm(nil) } blob, hashSum := getBlob(test.entity) body := io.Reader(blob) if test.noContentLength { // net/http will automatically add a Content-Length header // if it sees *strings.Reader, but not if it's a type it doesn't // know about. body = exoticReader{body} } path := storeURL(test.url) + "/archive" if !test.noHash { path += "?hash=" + hashSum } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: path, Method: "POST", Header: http.Header{ "Content-Type": {"application/zip"}, }, Body: body, Username: testUsername, Password: testPassword, ExpectStatus: test.expectStatus, ExpectBody: params.Error{ Message: test.expectMessage, Code: test.expectCode, }, }) } } func (s *ArchiveSuite) TestConcurrentUploads(c *gc.C) { wordpress := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") f, err := os.Open(wordpress.Path) c.Assert(err, gc.IsNil) var buf bytes.Buffer _, err = io.Copy(&buf, f) c.Assert(err, gc.IsNil) hash, _ := hashOf(bytes.NewReader(buf.Bytes())) srv := httptest.NewServer(s.srv) defer srv.Close() // Our strategy for testing concurrent uploads is as follows: We // repeat uploading a bunch of simultaneous uploads to the same // charm. Each upload should either succeed, or fail with an // ErrDuplicateUpload error. We make sure that all replies are // like this, and that at least one duplicate upload error is // found, so that we know we've tested that error path. errorBodies := make(chan io.ReadCloser) // upload performs one upload of the testing charm. // It sends the response body on the errorBodies channel when // it finds an error response. upload := func() { c.Logf("uploading") body := bytes.NewReader(buf.Bytes()) url := srv.URL + storeURL("~charmers/precise/wordpress/archive?hash="+hash) req, err := http.NewRequest("POST", url, body) c.Assert(err, gc.IsNil) req.Header.Set("Content-Type", "application/zip") req.SetBasicAuth(testUsername, testPassword) resp, err := http.DefaultClient.Do(req) if !c.Check(err, gc.IsNil) { return } if resp.StatusCode == http.StatusOK { resp.Body.Close() return } errorBodies <- resp.Body } // The try loop continues concurrently uploading // charms until it is told to stop (by closing the try // channel). It then signals that it has terminated // by closing errorBodies. try := make(chan struct{}) go func(try chan struct{}) { for _ = range try { var wg sync.WaitGroup for p := 0; p < 5; p++ { wg.Add(1) go func() { upload() wg.Done() }() } wg.Wait() } close(errorBodies) }(try) // We continue the loop until we have found an // error (or the maximum iteration count has // been exceeded). foundError := false count := 0 loop: for { select { case body, ok := <-errorBodies: if !ok { // The try loop has terminated, // so we need to stop too. break loop } dec := json.NewDecoder(body) var errResp params.Error err := dec.Decode(&errResp) body.Close() c.Assert(err, gc.IsNil) c.Assert(errResp, jc.DeepEquals, params.Error{ Message: "duplicate upload", Code: params.ErrDuplicateUpload, }) // We've found the error we're looking for, // so we signal to the try loop that it can stop. // We will process any outstanding error bodies, // before seeing errorBodies closed and exiting // the loop. foundError = true if try != nil { close(try) try = nil } case try <- struct{}{}: // In cases we've seen, the actual maximum value of // count is 1, but let's allow for serious scheduler vagaries. if count++; count > 200 { c.Fatalf("200 tries with no duplicate error") } } } if !foundError { c.Errorf("no duplicate-upload errors found") } } func (s *ArchiveSuite) TestPostCharm(c *gc.C) { s.discharge = dischargeForUser("charmers") // A charm that did not exist before should get revision 0. s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-0", -1), "wordpress", nil) // Subsequent charm uploads should increment the revision by 1. s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-1", -1), "mysql", nil) // Subsequent charm uploads should increment the revision by 1. s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-2", -1), "wordpress", nil) // Retrieving the unpublished version returns the latest charm. rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("~charmers/wordpress/archive?channel=unpublished"), Do: bakeryDo(nil), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, "cs:~charmers/precise/wordpress-2") } func (s *ArchiveSuite) TestPostCurrentVersion(c *gc.C) { s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-0", -1), "wordpress", nil) // Subsequent charm uploads should not increment the revision by 1. s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-0", -1), "wordpress", nil) } func (s *ArchiveSuite) TestPostMultiSeriesCharm(c *gc.C) { // A charm that did not exist before should get revision 0. s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/juju-gui-0", -1), "multi-series", nil) } func (s *ArchiveSuite) TestPostMultiSeriesCharmRevisionAfterAllSingleSeriesOnes(c *gc.C) { // Create some single series versions of the charm s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/vivid/juju-gui-1", -1), "mysql", nil) s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/trusty/juju-gui-12", -1), "mysql", nil) s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/precise/juju-gui-44", -1), "mysql", nil) // Check that the new multi-series revision takes the a revision // number larger than the largest of all the single series // revisions. s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/juju-gui-45", -1), "multi-series", nil) } func (s *ArchiveSuite) TestPostMultiSeriesPromulgatedRevisionAfterAllSingleSeriesOnes(c *gc.C) { // Create some single series versions of the charm s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/vivid/juju-gui-1", 0), "mysql", nil) s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/trusty/juju-gui-12", 9), "mysql", nil) s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/precise/juju-gui-44", 33), "mysql", nil) // Check that the new multi-series promulgated revision takes the // a revision number larger than the largest of all the single // series revisions. s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/juju-gui-45", 34), "multi-series", nil) } func (s *ArchiveSuite) TestPostSingleSeriesCharmWhenMultiSeriesVersionExists(c *gc.C) { s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/juju-gui-0", -1), "multi-series", nil) s.assertUploadCharmError( c, "POST", charm.MustParseURL("~charmers/saucy/juju-gui-0"), nil, "wordpress", nil, http.StatusForbidden, params.Error{ Message: "charm name duplicates multi-series charm name cs:~charmers/juju-gui-0", Code: params.ErrEntityIdNotAllowed, }, ) } func (s *ArchiveSuite) TestPutCharmWithChannel(c *gc.C) { s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/precise/juju-gui-0", -1), "wordpress", []params.Channel{params.DevelopmentChannel}) s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/precise/juju-gui-1", -1), "wordpress", []params.Channel{params.StableChannel}) s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/precise/juju-gui-2", -1), "wordpress", []params.Channel{params.StableChannel, params.DevelopmentChannel}) } func (s *ArchiveSuite) TestPutCharmWithInvalidChannel(c *gc.C) { s.assertUploadCharmError( c, "PUT", charm.MustParseURL("~charmers/saucy/juju-gui-0"), nil, "wordpress", []params.Channel{params.DevelopmentChannel, "bad"}, http.StatusBadRequest, params.Error{ Message: `invalid channel "bad" specified in request`, Code: params.ErrBadRequest, }, ) s.assertUploadCharmError( c, "PUT", charm.MustParseURL("~charmers/saucy/juju-gui-0"), nil, "wordpress", []params.Channel{params.UnpublishedChannel}, http.StatusBadRequest, params.Error{ Message: `cannot put entity into channel "unpublished"`, Code: params.ErrBadRequest, }, ) } func (s *ArchiveSuite) TestPutCharm(c *gc.C) { s.assertUploadCharm( c, "PUT", newResolvedURL("~charmers/precise/wordpress-3", 3), "wordpress", nil, ) s.assertUploadCharm( c, "PUT", newResolvedURL("~charmers/precise/wordpress-1", -1), "wordpress", nil, ) // Check that we get a duplicate-upload error if we try to // upload to the same revision again. s.assertUploadCharmError( c, "PUT", charm.MustParseURL("~charmers/precise/wordpress-3"), nil, "mysql", nil, http.StatusInternalServerError, params.Error{ Message: "duplicate upload", Code: params.ErrDuplicateUpload, }, ) // Check we get an error if promulgated url already uploaded. s.assertUploadCharmError( c, "PUT", charm.MustParseURL("~charmers/precise/wordpress-4"), charm.MustParseURL("precise/wordpress-3"), "wordpress", nil, http.StatusInternalServerError, params.Error{ Message: "duplicate upload", Code: params.ErrDuplicateUpload, }, ) // Check we get an error if promulgated url has user. s.assertUploadCharmError( c, "PUT", charm.MustParseURL("~charmers/precise/wordpress-4"), charm.MustParseURL("~charmers/precise/wordpress-4"), "mysql", nil, http.StatusBadRequest, params.Error{ Message: "promulgated URL cannot have a user", Code: params.ErrBadRequest, }, ) // Check we get an error if promulgated url has different name. s.assertUploadCharmError( c, "PUT", charm.MustParseURL("~charmers/precise/wordpress-4"), charm.MustParseURL("precise/mysql-4"), "mysql", nil, http.StatusBadRequest, params.Error{ Message: "promulgated URL has incorrect charm name", Code: params.ErrBadRequest, }, ) } func (s *ArchiveSuite) TestPostBundle(c *gc.C) { // Upload the required charms. for _, rurl := range []*router.ResolvedURL{ newResolvedURL("cs:~charmers/utopic/mysql-42", 42), newResolvedURL("cs:~charmers/utopic/wordpress-47", 47), newResolvedURL("cs:~charmers/utopic/logging-1", 1), } { err := s.store.AddCharmWithArchive(rurl, storetesting.Charms.CharmArchive(c.MkDir(), rurl.URL.Name)) c.Assert(err, gc.IsNil) err = s.store.Publish(rurl, params.StableChannel) c.Assert(err, gc.IsNil) } // A bundle that did not exist before should get revision 0. s.assertUploadBundle(c, "POST", newResolvedURL("~charmers/bundle/wordpress-simple-0", -1), "wordpress-simple") // Subsequent bundle uploads should increment the // revision by 1. s.assertUploadBundle(c, "POST", newResolvedURL("~charmers/bundle/wordpress-simple-1", -1), "wordpress-with-logging") // Uploading the same archive twice should not increment the revision... s.assertUploadBundle(c, "POST", newResolvedURL("~charmers/bundle/wordpress-simple-1", -1), "wordpress-with-logging") // ... but uploading an archive used by a previous revision should. s.assertUploadBundle(c, "POST", newResolvedURL("~charmers/bundle/wordpress-simple-2", -1), "wordpress-simple") } func (s *ArchiveSuite) TestPostHashMismatch(c *gc.C) { content := []byte("some content") hash, _ := hashOf(bytes.NewReader(content)) // Corrupt the content. copy(content, "bogus") path := fmt.Sprintf("~charmers/precise/wordpress/archive?hash=%s", hash) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(path), Method: "POST", Header: http.Header{ "Content-Type": {"application/zip"}, }, Body: bytes.NewReader(content), Username: testUsername, Password: testPassword, ExpectStatus: http.StatusInternalServerError, ExpectBody: params.Error{ Message: "cannot put archive blob: hash mismatch", }, }) } func invalidZip() io.ReadSeeker { return strings.NewReader("invalid zip content") } func (s *ArchiveSuite) TestPostInvalidCharmZip(c *gc.C) { s.assertCannotUpload(c, "~charmers/precise/wordpress", invalidZip(), http.StatusBadRequest, params.ErrInvalidEntity, "cannot read charm archive: zip: not a valid zip file") } func (s *ArchiveSuite) TestPostInvalidBundleZip(c *gc.C) { s.assertCannotUpload(c, "~charmers/bundle/wordpress", invalidZip(), http.StatusBadRequest, params.ErrInvalidEntity, "cannot read bundle archive: zip: not a valid zip file") } var postInvalidCharmMetadataTests = []struct { about string spec charmtesting.CharmSpec expectError string }{{ about: "bad provider relation name", spec: charmtesting.CharmSpec{ Meta: ` name: foo summary: bar description: d provides: relation-name: interface: baz `, }, expectError: "relation relation-name has almost certainly not been changed from the template", }, { about: "bad provider interface name", spec: charmtesting.CharmSpec{ Meta: ` name: foo summary: bar description: d provides: baz: interface: interface-name `, }, expectError: "interface interface-name in relation baz has almost certainly not been changed from the template", }, { about: "bad requirer relation name", spec: charmtesting.CharmSpec{ Meta: ` name: foo summary: bar description: d requires: relation-name: interface: baz `, }, expectError: "relation relation-name has almost certainly not been changed from the template", }, { about: "bad requirer interface name", spec: charmtesting.CharmSpec{ Meta: ` name: foo summary: bar description: d requires: baz: interface: interface-name `, }, expectError: "interface interface-name in relation baz has almost certainly not been changed from the template", }, { about: "bad peer relation name", spec: charmtesting.CharmSpec{ Meta: ` name: foo summary: bar description: d peers: relation-name: interface: baz `, }, expectError: "relation relation-name has almost certainly not been changed from the template", }, { about: "bad peer interface name", spec: charmtesting.CharmSpec{ Meta: ` name: foo summary: bar description: d peers: baz: interface: interface-name `, }, expectError: "interface interface-name in relation baz has almost certainly not been changed from the template", }} func (s *ArchiveSuite) TestPostInvalidCharmMetadata(c *gc.C) { for i, test := range postInvalidCharmMetadataTests { c.Logf("test %d: %s", i, test.about) ch := charmtesting.NewCharm(c, test.spec) r := bytes.NewReader(ch.ArchiveBytes()) s.assertCannotUpload(c, "~charmers/trusty/wordpress", r, http.StatusBadRequest, params.ErrInvalidEntity, test.expectError) } } func (s *ArchiveSuite) TestPostInvalidBundleData(c *gc.C) { path := storetesting.Charms.BundleArchivePath(c.MkDir(), "bad") f, err := os.Open(path) c.Assert(err, gc.IsNil) defer f.Close() // Here we exercise both bundle internal verification (bad relation) and // validation with respect to charms (wordpress and mysql are missing). expectErr := `bundle verification failed: [` + `"relation [\"foo:db\" \"mysql:server\"] refers to service \"foo\" not defined in this bundle",` + `"service \"mysql\" refers to non-existent charm \"mysql\"",` + `"service \"wordpress\" refers to non-existent charm \"wordpress\""]` s.assertCannotUpload(c, "~charmers/bundle/wordpress", f, http.StatusBadRequest, params.ErrInvalidEntity, expectErr) } func (s *ArchiveSuite) TestPostCounters(c *gc.C) { if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-0", -1), "wordpress", nil) // Check that the upload count for the entity has been updated. key := []string{params.StatsArchiveUpload, "precise", "wordpress", "charmers"} stats.CheckCounterSum(c, s.store, key, false, 1) } func (s *ArchiveSuite) TestPostFailureCounters(c *gc.C) { if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } hash, _ := hashOf(invalidZip()) doPost := func(url string, expectCode int) { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(url), Method: "POST", Header: http.Header{ "Content-Type": {"application/zip"}, }, Body: invalidZip(), Username: testUsername, Password: testPassword, }) c.Assert(rec.Code, gc.Equals, expectCode, gc.Commentf("body: %s", rec.Body.Bytes())) } // Send a first invalid request (revision specified). doPost("~charmers/utopic/wordpress-42/archive", http.StatusBadRequest) // Send a second invalid request (no hash). doPost("~charmers/utopic/wordpress/archive", http.StatusBadRequest) // Send a third invalid request (invalid zip). doPost("~charmers/utopic/wordpress/archive?hash="+hash, http.StatusBadRequest) // Check that the failed upload count for the entity has been updated. key := []string{params.StatsArchiveFailedUpload, "utopic", "wordpress", "charmers"} stats.CheckCounterSum(c, s.store, key, false, 3) } func (s *ArchiveSuite) TestUploadOfCurrentCharmReadsFully(c *gc.C) { s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-0", -1), "wordpress", nil) ch := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") f, err := os.Open(ch.Path) c.Assert(err, gc.IsNil) defer f.Close() // Calculate blob hashes. hash := blobstore.NewHash() _, err = io.Copy(hash, f) c.Assert(err, gc.IsNil) hashSum := fmt.Sprintf("%x", hash.Sum(nil)) // Simulate upload of current version h := s.handler(c) defer h.Close() b := bytes.NewBuffer([]byte("test body")) r, err := http.NewRequest("POST", "/~charmers/precise/wordpress/archive?hash="+hashSum, b) c.Assert(err, gc.IsNil) r.Header.Set("Content-Type", "application/zip") r.SetBasicAuth(testUsername, testPassword) rec := httptest.NewRecorder() h.ServeHTTP(rec, r) httptesting.AssertJSONResponse( c, rec, http.StatusOK, params.ArchiveUploadResponse{ Id: charm.MustParseURL("~charmers/precise/wordpress-0"), }, ) c.Assert(b.Len(), gc.Equals, 0) } func (s *ArchiveSuite) assertCannotUpload(c *gc.C, id string, content io.ReadSeeker, httpStatus int, errorCode params.ErrorCode, errorMessage string) { hash, size := hashOf(content) _, err := content.Seek(0, 0) c.Assert(err, gc.IsNil) path := fmt.Sprintf("%s/archive?hash=%s", id, hash) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(path), Method: "POST", ContentLength: size, Header: http.Header{ "Content-Type": {"application/zip"}, }, Body: content, Username: testUsername, Password: testPassword, ExpectStatus: httpStatus, ExpectBody: params.Error{ Message: errorMessage, Code: errorCode, }, }) // TODO(rog) check that the uploaded blob has been deleted, // by checking that no new blobs have been added to the blob store. } // assertUploadCharm uploads the testing charm with the given name // through the API. The URL must hold the expected revision // that the charm will be given when uploaded. func (s *commonArchiveSuite) assertUploadCharm(c *gc.C, method string, url *router.ResolvedURL, charmName string, chans []params.Channel) *charm.CharmArchive { ch := storetesting.Charms.CharmArchive(c.MkDir(), charmName) id, size := s.assertUpload(c, method, url, ch.Path, nil) s.assertEntityInfo(c, entityInfo{ Id: id, Meta: entityMetaInfo{ ArchiveSize: ¶ms.ArchiveSizeResponse{Size: size}, CharmMeta: ch.Meta(), CharmConfig: ch.Config(), CharmActions: ch.Actions(), }, }) return ch } // assertUploadBundle uploads the testing bundle with the given name // through the API. The URL must hold the expected revision // that the bundle will be given when uploaded. func (s *commonArchiveSuite) assertUploadBundle(c *gc.C, method string, url *router.ResolvedURL, bundleName string) { path := storetesting.Charms.BundleArchivePath(c.MkDir(), bundleName) b, err := charm.ReadBundleArchive(path) c.Assert(err, gc.IsNil) id, size := s.assertUpload(c, method, url, path, nil) s.assertEntityInfo(c, entityInfo{ Id: id, Meta: entityMetaInfo{ ArchiveSize: ¶ms.ArchiveSizeResponse{Size: size}, BundleMeta: b.Data(), }, }, ) } func (s *commonArchiveSuite) assertUpload(c *gc.C, method string, url *router.ResolvedURL, fileName string, chans []params.Channel) (id *charm.URL, size int64) { f, err := os.Open(fileName) c.Assert(err, gc.IsNil) defer f.Close() // Calculate blob hashes. hash := blobstore.NewHash() hash256 := sha256.New() size, err = io.Copy(io.MultiWriter(hash, hash256), f) c.Assert(err, gc.IsNil) hashSum := fmt.Sprintf("%x", hash.Sum(nil)) hash256Sum := fmt.Sprintf("%x", hash256.Sum(nil)) _, err = f.Seek(0, 0) c.Assert(err, gc.IsNil) uploadURL := url.URL if method == "POST" { uploadURL.Revision = -1 } path := fmt.Sprintf("%s/archive?hash=%s", uploadURL.Path(), hashSum) for _, c := range chans { path += fmt.Sprintf("&channel=%s", c) } expectId := uploadURL.WithRevision(url.URL.Revision) expectedPromulgatedId := url.PromulgatedURL() if expectedPromulgatedId != nil { path += fmt.Sprintf("&promulgated=%s", expectedPromulgatedId.String()) } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(path), Method: method, ContentLength: size, Header: http.Header{ "Content-Type": {"application/zip"}, }, Body: f, Username: testUsername, Password: testPassword, ExpectBody: params.ArchiveUploadResponse{ Id: expectId, PromulgatedId: expectedPromulgatedId, }, }) // Make sure that the entity can be found in // all the channels we tried to publish it in // and not in any others. expectChans := map[params.Channel]bool{ params.UnpublishedChannel: true, } for _, ch := range chans { expectChans[ch] = true } for _, ch := range []params.Channel{ params.UnpublishedChannel, params.DevelopmentChannel, params.StableChannel, } { _, err := s.store.FindBestEntity(&url.URL, ch, nil) if expectChans[ch] { c.Assert(err, gc.IsNil) } else { c.Assert(err, gc.NotNil) c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) } } entity, err := s.store.FindEntity(url, nil) c.Assert(err, gc.IsNil) c.Assert(entity.BlobHash, gc.Equals, hashSum) if url.URL.Series != "" { c.Assert(entity.BlobHash256, gc.Equals, hash256Sum) } c.Assert(entity.PromulgatedURL, gc.DeepEquals, url.PromulgatedURL()) c.Assert(entity.Development, gc.Equals, false) // Test that the expected entry has been created // in the blob store. r, _, err := s.store.BlobStore.Open(entity.BlobName) c.Assert(err, gc.IsNil) r.Close() return expectId, size } // assertUploadCharmError attempts to upload the testing charm with the // given name through the API, checking that the attempt fails with the // specified error. The URL must hold the expected revision that the // charm will be given when uploaded. func (s *ArchiveSuite) assertUploadCharmError(c *gc.C, method string, url, purl *charm.URL, charmName string, chans []params.Channel, expectStatus int, expectBody interface{}) { ch := storetesting.Charms.CharmDir(charmName) s.assertUploadError(c, method, url, purl, ch, chans, expectStatus, expectBody) } // assertUploadError asserts that we get an error when uploading // the contents of the given file to the given url and promulgated URL. // The reason this method does not take a *router.ResolvedURL // is so that we can test what happens when an inconsistent promulgated URL // is passed in. func (s *ArchiveSuite) assertUploadError(c *gc.C, method string, url, purl *charm.URL, entity charmstore.ArchiverTo, chans []params.Channel, expectStatus int, expectBody interface{}) { blob, hashSum := getBlob(entity) uploadURL := *url if method == "POST" { uploadURL.Revision = -1 } path := fmt.Sprintf("%s/archive?hash=%s", uploadURL.Path(), hashSum) for _, c := range chans { path += fmt.Sprintf("&channel=%s", c) } if purl != nil { path += fmt.Sprintf("&promulgated=%s", purl.String()) } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(path), Method: method, ContentLength: int64(blob.Len()), Header: http.Header{ "Content-Type": {"application/zip"}, }, Body: blob, Username: testUsername, Password: testPassword, ExpectStatus: expectStatus, ExpectBody: expectBody, }) } // getBlob returns the contents and blob checksum of the given entity. func getBlob(entity charmstore.ArchiverTo) (blob *bytes.Buffer, hash string) { blob = new(bytes.Buffer) err := entity.ArchiveTo(blob) if err != nil { panic(err) } h := blobstore.NewHash() h.Write(blob.Bytes()) hash = fmt.Sprintf("%x", h.Sum(nil)) return blob, hash } var archiveFileErrorsTests = []struct { about string path string expectStatus int expectMessage string expectCode params.ErrorCode }{{ about: "entity not found", path: "~charmers/trusty/no-such-42/archive/icon.svg", expectStatus: http.StatusNotFound, expectMessage: `no matching charm or bundle for cs:~charmers/trusty/no-such-42`, expectCode: params.ErrNotFound, }, { about: "directory listing", path: "~charmers/utopic/wordpress-0/archive/hooks", expectStatus: http.StatusForbidden, expectMessage: "directory listing not allowed", expectCode: params.ErrForbidden, }, { about: "file not found", path: "~charmers/utopic/wordpress-0/archive/no-such", expectStatus: http.StatusNotFound, expectMessage: `file "no-such" not found in the archive`, expectCode: params.ErrNotFound, }, { about: "no permissions", path: "~charmers/utopic/mysql-0/archive/metadata.yaml", expectStatus: http.StatusUnauthorized, expectMessage: `unauthorized: access denied for user "bob"`, expectCode: params.ErrUnauthorized, }} func (s *ArchiveSuite) TestArchiveFileErrors(c *gc.C) { s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/utopic/wordpress-0", 0)) id, _ := s.addPublicCharmFromRepo(c, "mysql", newResolvedURL("cs:~charmers/utopic/mysql-0", 0)) err := s.store.SetPerms(&id.URL, "stable.read", "no-one") c.Assert(err, gc.IsNil) s.discharge = dischargeForUser("bob") for i, test := range archiveFileErrorsTests { c.Logf("test %d: %s", i, test.about) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(test.path), Do: bakeryDo(nil), Method: "GET", ExpectStatus: test.expectStatus, ExpectBody: params.Error{ Message: test.expectMessage, Code: test.expectCode, }, }) } } func (s *ArchiveSuite) TestArchiveFileGet(c *gc.C) { ch := storetesting.Charms.CharmArchive(c.MkDir(), "all-hooks") id := newResolvedURL("cs:~charmers/utopic/all-hooks-0", 0) s.addPublicCharm(c, ch, id) zipFile, err := zip.OpenReader(ch.Path) c.Assert(err, gc.IsNil) defer zipFile.Close() // Check a file in the root directory. s.assertArchiveFileContents(c, zipFile, "~charmers/utopic/all-hooks-0/archive/metadata.yaml") // Check a file in a subdirectory. s.assertArchiveFileContents(c, zipFile, "~charmers/utopic/all-hooks-0/archive/hooks/install") } // assertArchiveFileContents checks that the response returned by the // serveArchiveFile endpoint is correct for the given archive and URL path. func (s *ArchiveSuite) assertArchiveFileContents(c *gc.C, zipFile *zip.ReadCloser, path string) { // For example: trusty/django/archive/hooks/install -> hooks/install. filePath := strings.SplitN(path, "/archive/", 2)[1] // Retrieve the expected bytes. var expectBytes []byte for _, file := range zipFile.File { if file.Name == filePath { r, err := file.Open() c.Assert(err, gc.IsNil) defer r.Close() expectBytes, err = ioutil.ReadAll(r) c.Assert(err, gc.IsNil) break } } c.Assert(expectBytes, gc.Not(gc.HasLen), 0) // Make the request. url := storeURL(path) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: url, }) // Ensure the response is what we expect. c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Body.Bytes(), gc.DeepEquals, expectBytes) headers := rec.Header() c.Assert(headers.Get("Content-Length"), gc.Equals, strconv.Itoa(len(expectBytes))) // We only have text files in the charm repository used for tests. c.Assert(headers.Get("Content-Type"), gc.Equals, "text/plain; charset=utf-8") assertCacheControl(c, rec.Header(), true) } func (s *ArchiveSuite) TestDelete(c *gc.C) { // Add a charm to the database (including the archive). id := "~charmers/utopic/mysql-42" url := newResolvedURL(id, -1) err := s.store.AddCharmWithArchive(url, storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) c.Assert(err, gc.IsNil) // Retrieve the corresponding entity. var entity mongodoc.Entity err = s.store.DB.Entities().FindId(&url.URL).Select(bson.D{{"blobname", 1}}).One(&entity) c.Assert(err, gc.IsNil) // Delete the charm using the API. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(id + "/archive"), Method: "DELETE", Username: testUsername, Password: testPassword, ExpectStatus: http.StatusMethodNotAllowed, ExpectBody: params.Error{ Message: `DELETE not allowed`, Code: params.ErrMethodNotAllowed, }, }) // TODO(mhilton) reinstate this check when DELETE is re-enabled. // // The entity has been deleted. // count, err := s.store.DB.Entities().FindId(url).Count() // c.Assert(err, gc.IsNil) // c.Assert(count, gc.Equals, 0) // // // The blob has been deleted. // _, _, err = s.store.BlobStore.Open(entity.BlobName) // c.Assert(err, gc.ErrorMatches, "resource.*not found") } func (s *ArchiveSuite) TestDeleteSpecificCharm(c *gc.C) { // Add a couple of charms to the database. for _, id := range []string{"~charmers/trusty/mysql-42", "~charmers/utopic/mysql-42", "~charmers/utopic/mysql-47"} { err := s.store.AddCharmWithArchive( newResolvedURL(id, -1), storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) c.Assert(err, gc.IsNil) } // Delete the second charm using the API. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("~charmers/utopic/mysql-42/archive"), Method: "DELETE", Username: testUsername, Password: testPassword, ExpectStatus: http.StatusMethodNotAllowed, ExpectBody: params.Error{ Message: `DELETE not allowed`, Code: params.ErrMethodNotAllowed, }, }) // The other two charms are still present in the database. urls := []*charm.URL{ charm.MustParseURL("~charmers/trusty/mysql-42"), charm.MustParseURL("~charmers/utopic/mysql-47"), } count, err := s.store.DB.Entities().Find(bson.D{{ "_id", bson.D{{"$in", urls}}, }}).Count() c.Assert(err, gc.IsNil) c.Assert(count, gc.Equals, 2) } func (s *ArchiveSuite) TestDeleteNotFound(c *gc.C) { // Try to delete a non existing charm using the API. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("~charmers/utopic/no-such-0/archive"), Method: "DELETE", Username: testUsername, Password: testPassword, ExpectStatus: http.StatusMethodNotAllowed, ExpectBody: params.Error{ Message: `DELETE not allowed`, Code: params.ErrMethodNotAllowed, }, }) } // TODO(mhilton) reinstate this test when DELETE is re-enabled. //func (s *ArchiveSuite) TestDeleteError(c *gc.C) { // // Add a charm to the database (not including the archive). // id := "~charmers/utopic/mysql-42" // url := newResolvedURL(id, -1) // err := s.store.AddCharmWithArchive(url, storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) // c.Assert(err, gc.IsNil) // // err = s.store.DB.Entities().UpdateId(&url.URL, bson.M{ // "$set": bson.M{ // "blobname": "no-such-name", // }, // }) // c.Assert(err, gc.IsNil) // // TODO update entity to change BlobName to "no-such-name" // // // Try to delete the charm using the API. // httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ // Handler: s.srv, // URL: storeURL(id + "/archive"), // Method: "DELETE", // Username: testUsername, // Password: testPassword, // ExpectStatus: http.StatusInternalServerError, // ExpectBody: params.Error{ // Message: `cannot delete "cs:~charmers/utopic/mysql-42": cannot remove blob no-such-name: resource at path "global/no-such-name" not found`, // }, // }) //} // TODO(mhilton) reinstate this test when DELETE is re-enabled //.func (s *ArchiveSuite) TestDeleteCounters(c *gc.C) { // if !storetesting.MongoJSEnabled() { // c.Skip("MongoDB JavaScript not available") // } // // // Add a charm to the database (including the archive). // id := "~charmers/utopic/mysql-42" // err := s.store.AddCharmWithArchive( // newResolvedURL(id, -1), // storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) // c.Assert(err, gc.IsNil) // // // Delete the charm using the API. // rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ // Handler: s.srv, // Method: "DELETE", // URL: storeURL(id + "/archive"), // Username: testUsername, // Password: testPassword, // }) // c.Assert(rec.Code, gc.Equals, http.StatusOK) // // // Check that the delete count for the entity has been updated. // key := []string{params.StatsArchiveDelete, "utopic", "mysql", "charmers", "42"} // stats.CheckCounterSum(c, s.store, key, false, 1) //} type basicAuthArchiveSuite struct { commonSuite } var _ = gc.Suite(&basicAuthArchiveSuite{}) func (s *basicAuthArchiveSuite) TestPostAuthErrors(c *gc.C) { s.checkAuthErrors(c, "POST", "~charmers/utopic/django/archive") } // TODO(mhilton) reinstate this test when DELETE is re-enabled. //func (s *basicAuthArchiveSuite) TestDeleteAuthErrors(c *gc.C) { // err := s.store.AddCharmWithArchive( // newResolvedURL("~charmers/utopic/django-42", 42), // storetesting.Charms.CharmArchive(c.MkDir(), "wordpress"), // ) // c.Assert(err, gc.IsNil) // s.checkAuthErrors(c, "DELETE", "utopic/django-42/archive") //} func (s *basicAuthArchiveSuite) TestPostErrorReadsFully(c *gc.C) { h := s.handler(c) defer h.Close() b := strings.NewReader("test body") r, err := http.NewRequest("POST", "/~charmers/trusty/wordpress/archive", b) c.Assert(err, gc.IsNil) r.Header.Set("Content-Type", "application/zip") r.SetBasicAuth(testUsername, testPassword) rec := httptest.NewRecorder() h.ServeHTTP(rec, r) c.Assert(rec.Code, gc.Equals, http.StatusBadRequest) c.Assert(b.Len(), gc.Equals, 0) } func (s *basicAuthArchiveSuite) TestPostAuthErrorReadsFully(c *gc.C) { h := s.handler(c) defer h.Close() b := strings.NewReader("test body") r, err := http.NewRequest("POST", "/~charmers/trusty/wordpress/archive", b) c.Assert(err, gc.IsNil) r.Header.Set("Content-Type", "application/zip") rec := httptest.NewRecorder() h.ServeHTTP(rec, r) c.Assert(rec.Code, gc.Equals, http.StatusUnauthorized) c.Assert(b.Len(), gc.Equals, 0) } var archiveAuthErrorsTests = []struct { about string header http.Header username string password string expectMessage string }{{ about: "no credentials", expectMessage: "authentication failed: missing HTTP auth header", }, { about: "invalid encoding", header: http.Header{ "Authorization": {"Basic not-a-valid-base64"}, }, expectMessage: "authentication failed: invalid HTTP auth encoding", }, { about: "invalid header", header: http.Header{ "Authorization": {"Basic " + base64.StdEncoding.EncodeToString([]byte("invalid"))}, }, expectMessage: "authentication failed: invalid HTTP auth contents", }, { about: "invalid credentials", username: "no-such", password: "exterminate!", expectMessage: "invalid user name or password", }} func (s *basicAuthArchiveSuite) checkAuthErrors(c *gc.C, method, url string) { for i, test := range archiveAuthErrorsTests { c.Logf("test %d: %s", i, test.about) if test.header == nil { test.header = http.Header{} } if method == "POST" { test.header.Add("Content-Type", "application/zip") } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(url), Method: method, Header: test.header, Username: test.username, Password: test.password, ExpectStatus: http.StatusUnauthorized, ExpectBody: params.Error{ Message: test.expectMessage, Code: params.ErrUnauthorized, }, }) } } // entityInfo holds all the information we want to find // out about a charm or bundle uploaded to the store. type entityInfo struct { Id *charm.URL Meta entityMetaInfo } type entityMetaInfo struct { ArchiveSize *params.ArchiveSizeResponse `json:"archive-size,omitempty"` CharmMeta *charm.Meta `json:"charm-metadata,omitempty"` CharmConfig *charm.Config `json:"charm-config,omitempty"` CharmActions *charm.Actions `json:"charm-actions,omitempty"` BundleMeta *charm.BundleData `json:"bundle-metadata,omitempty"` } func (s *commonArchiveSuite) assertEntityInfo(c *gc.C, expect entityInfo) { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL( expect.Id.Path() + "/meta/any" + "?include=archive-size" + "&include=charm-metadata" + "&include=charm-config" + "&include=charm-actions" + "&include=bundle-metadata", ), Username: testUsername, Password: testPassword, ExpectBody: expect, }) } func (s *ArchiveSuite) TestArchiveFileGetHasCORSHeaders(c *gc.C) { id := "~charmers/precise/wordpress-0" s.assertUploadCharm(c, "POST", newResolvedURL(id, -1), "wordpress", nil) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(fmt.Sprintf("%s/archive/metadata.yaml", id)), }) headers := rec.Header() c.Assert(len(headers["Access-Control-Allow-Origin"]), gc.Equals, 1) c.Assert(len(headers["Access-Control-Allow-Headers"]), gc.Equals, 1) c.Assert(headers["Access-Control-Allow-Origin"][0], gc.Equals, "*") c.Assert(headers["Access-Control-Cache-Max-Age"][0], gc.Equals, "600") c.Assert(headers["Access-Control-Allow-Headers"][0], gc.Equals, "Bakery-Protocol-Version, Macaroons, X-Requested-With") } var getNewPromulgatedRevisionTests = []struct { about string id *charm.URL expectRev int }{{ about: "no base entity", id: charm.MustParseURL("cs:~mmouse/trusty/mysql-14"), expectRev: -1, }, { about: "not promulgated", id: charm.MustParseURL("cs:~dduck/trusty/mysql-14"), expectRev: -1, }, { about: "not yet promulgated", id: charm.MustParseURL("cs:~goofy/trusty/mysql-14"), expectRev: 0, }, { about: "existing promulgated", id: charm.MustParseURL("cs:~pluto/trusty/mariadb-14"), expectRev: 4, }, { about: "previous promulgated by different user", id: charm.MustParseURL("cs:~tom/trusty/sed-1"), expectRev: 5, }, { about: "many previous promulgated revisions", id: charm.MustParseURL("cs:~tom/trusty/awk-5"), expectRev: 5, }} func (s *ArchiveSuite) TestGetNewPromulgatedRevision(c *gc.C) { charms := []string{ "cs:~dduck/trusty/mysql-14", "14 cs:~goofy/precise/mysql-14", "3 cs:~pluto/trusty/mariadb-5", "0 cs:~tom/trusty/sed-0", "cs:~jerry/trusty/sed-2", "4 cs:~jerry/trusty/sed-3", "0 cs:~tom/trusty/awk-0", "1 cs:~tom/trusty/awk-1", "2 cs:~tom/trusty/awk-2", "3 cs:~tom/trusty/awk-3", "4 cs:~tom/trusty/awk-4", } for _, url := range charms { ch := storetesting.NewCharm(new(charm.Meta)) err := s.store.AddCharmWithArchive(mustParseResolvedURL(url), ch) c.Assert(err, gc.IsNil) } handler := s.handler(c) defer handler.Close() for i, test := range getNewPromulgatedRevisionTests { c.Logf("%d. %s", i, test.about) rev, err := v5.GetNewPromulgatedRevision(handler, test.id) c.Assert(err, gc.IsNil) c.Assert(rev, gc.Equals, test.expectRev) } } func hashOfBytes(data []byte) string { hash := blobstore.NewHash() hash.Write(data) return fmt.Sprintf("%x", hash.Sum(nil)) } func hashOf(r io.Reader) (hashSum string, size int64) { hash := blobstore.NewHash() n, err := io.Copy(hash, r) if err != nil { panic(err) } return fmt.Sprintf("%x", hash.Sum(nil)), n } // assertCacheControl asserts that the cache control headers are // appropriately set. The isPublic parameter specifies // whether the id in the request represents a public charm or bundle. func assertCacheControl(c *gc.C, h http.Header, isPublic bool) { if isPublic { seconds := v5.ArchiveCachePublicMaxAge / time.Second c.Assert(h.Get("Cache-Control"), gc.Equals, fmt.Sprintf("public, max-age=%d", seconds)) } else { c.Assert(h.Get("Cache-Control"), gc.Equals, "no-cache, must-revalidate") } } type ArchiveSearchSuite struct { commonSuite } var _ = gc.Suite(&ArchiveSearchSuite{}) func (s *ArchiveSearchSuite) SetUpSuite(c *gc.C) { s.enableES = true s.commonSuite.SetUpSuite(c) } func (s *ArchiveSearchSuite) SetUpTest(c *gc.C) { s.commonSuite.SetUpTest(c) // TODO (frankban): remove this call when removing the legacy counts logic. patchLegacyDownloadCountsEnabled(s.AddCleanup, false) } func (s *ArchiveSearchSuite) TestGetSearchUpdate(c *gc.C) { if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } for i, id := range []string{"~charmers/wily/mysql-42", "~who/wily/mysql-42"} { c.Logf("test %d: %s", i, id) url := newResolvedURL(id, -1) // Add a charm to the database. s.addPublicCharm(c, storetesting.NewCharm(nil), url) // Download the charm archive using the API. rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(id + "/archive"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) // Check that the search record for the entity has been updated. stats.CheckSearchTotalDownloads(c, s.store, &url.URL, 1) } } type ArchiveSuiteWithTerms struct { commonArchiveSuite } var _ = gc.Suite(&ArchiveSuiteWithTerms{}) func (s *ArchiveSuiteWithTerms) SetUpSuite(c *gc.C) { s.commonSuite.SetUpSuite(c) s.enableTerms = true s.enableIdentity = true } func (s *ArchiveSuiteWithTerms) SetUpTest(c *gc.C) { s.commonSuite.SetUpTest(c) s.discharge = dischargeForUser("bob") } func (s *ArchiveSuiteWithTerms) TestGetUserHasAgreedToTermsAndConditions(c *gc.C) { termsDischargeAccessed := false s.dischargeTerms = func(cond, args string) ([]checkers.Caveat, error) { termsDischargeAccessed = true if cond != "has-agreed" { return nil, errgo.New("unexpected condition") } terms := strings.Fields(args) sort.Strings(terms) if strings.Join(terms, " ") != "terms-1/1 terms-2/5" { return nil, errgo.New("unexpected terms in condition") } return nil, nil } client := httpbakery.NewHTTPClient() ch := storetesting.NewCharm(&charm.Meta{ Terms: []string{"terms-1/1", "terms-2/5"}, }) s.addPublicCharm(c, ch, newResolvedURL("cs:~charmers/precise/terms-0", -1)) ch1 := storetesting.NewCharm(&charm.Meta{ Terms: []string{"terms-3/1", "terms-4/5"}, }) s.addPublicCharm(c, ch1, newResolvedURL("cs:~charmers/precise/terms1-0", -1)) s.assertArchiveDownload( c, "~charmers/precise/terms-0", &httptesting.DoRequestParams{ Do: bakeryDo(client), }, ch.Bytes(), ) c.Assert(termsDischargeAccessed, gc.Equals, true) termsDischargeAccessed = false s.dischargeTerms = func(cond, args string) ([]checkers.Caveat, error) { termsDischargeAccessed = true return nil, errgo.New("user has not agreed to specified terms and conditions") } archiveUrl := storeURL("~charmers/precise/terms1-0/archive") httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: archiveUrl, Do: bakeryDo(client), ExpectError: ".*third party refused discharge: cannot discharge: user has not agreed to specified terms and conditions", }) c.Assert(termsDischargeAccessed, gc.Equals, true) } func (s *ArchiveSuiteWithTerms) TestGetArchiveWithBlankMacaroon(c *gc.C) { termsDischargeAccessed := false s.dischargeTerms = func(cond, args string) ([]checkers.Caveat, error) { termsDischargeAccessed = true return nil, errgo.New("user has not agreed to specified terms and conditions") } s.addPublicCharm(c, storetesting.NewCharm(&charm.Meta{ Terms: []string{"terms-1/1", "terms-2/5"}, }), newResolvedURL("cs:~charmers/precise/terms-0", -1)) archiveUrl := storeURL("~charmers/precise/terms-0/archive") client := httpbakery.NewHTTPClient() var gotBody json.RawMessage httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("macaroon"), Do: bakeryDo(client), ExpectStatus: http.StatusOK, ExpectBody: httptesting.BodyAsserter(func(c *gc.C, m json.RawMessage) { gotBody = m }), }) c.Assert(gotBody, gc.NotNil) var m macaroon.Macaroon err := json.Unmarshal(gotBody, &m) c.Assert(err, jc.ErrorIsNil) bClient := httpbakery.NewClient() ms, err := bClient.DischargeAll(&m) c.Assert(err, jc.ErrorIsNil) u, err := url.Parse("http://127.0.0.1") c.Assert(err, jc.ErrorIsNil) err = httpbakery.SetCookie(client.Jar, u, ms) c.Assert(err, jc.ErrorIsNil) httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: archiveUrl, Do: bakeryDo(client), ExpectError: ".*third party refused discharge: cannot discharge: user has not agreed to specified terms and conditions", }) c.Assert(termsDischargeAccessed, gc.Equals, true) } func (s *ArchiveSuiteWithTerms) TestGetUserHasNotAgreedToTerms(c *gc.C) { s.dischargeTerms = func(_, _ string) ([]checkers.Caveat, error) { return nil, errgo.New("user has not agreed to specified terms and conditions") } client := httpbakery.NewHTTPClient() s.addPublicCharm(c, storetesting.NewCharm(&charm.Meta{ Terms: []string{"terms-1/1", "terms-2/5"}, }), newResolvedURL("cs:~charmers/precise/terms-0", -1)) archiveUrl := storeURL("~charmers/precise/terms-0/archive") httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: archiveUrl, Do: bakeryDo(client), ExpectError: ".*third party refused discharge: cannot discharge: user has not agreed to specified terms and conditions", }) } func (s *ArchiveSuiteWithTerms) TestGetIgnoringTermsWithBasicAuth(c *gc.C) { s.dischargeTerms = func(_, _ string) ([]checkers.Caveat, error) { return nil, errgo.New("user has not agreed to specified terms and conditions") } ch := storetesting.NewCharm(&charm.Meta{ Terms: []string{"terms-1/1", "terms-2/5"}, }) s.addPublicCharm(c, ch, newResolvedURL("cs:~charmers/precise/terms-0", -1)) s.assertArchiveDownload( c, "~charmers/precise/terms-0", &httptesting.DoRequestParams{ Header: basicAuthHeader(testUsername, testPassword), }, ch.Bytes(), ) } func (s *commonSuite) assertArchiveDownload(c *gc.C, id string, extraParams *httptesting.DoRequestParams, archiveBytes []byte) *httptest.ResponseRecorder { doParams := httptesting.DoRequestParams{} if extraParams != nil { doParams = *extraParams } doParams.Handler = s.srv if doParams.URL == "" { doParams.URL = storeURL(id + "/archive") } rec := httptesting.DoRequest(c, doParams) c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Body.Bytes(), gc.DeepEquals, archiveBytes) c.Assert(rec.Header().Get(params.ContentHashHeader), gc.Equals, hashOfBytes(archiveBytes)) return rec } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/export_test.go���������������������0000664�0001750�0001750�00000000613�12672604603�026451� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" var ( ProcessIcon = processIcon ErrProbablyNotXML = errProbablyNotXML TestAddAuditCallback = &testAddAuditCallback GetNewPromulgatedRevision = (*ReqHandler).getNewPromulgatedRevision ResolveURL = resolveURL ) ���������������������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/list.go����������������������������0000664�0001750�0001750�00000002711�12672604603�025045� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( "net/http" "gopkg.in/errgo.v1" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" "gopkg.in/juju/charmstore.v5-unstable/internal/entitycache" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" ) // GET list[?filter=value…][&include=meta][&sort=field[+dir]] // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-list func (h *ReqHandler) serveList(_ http.Header, req *http.Request) (interface{}, error) { sp, err := ParseSearchParams(req) if err != nil { return "", err } h.WillIncludeMetadata(sp.Include) lq, err := h.Store.ListQuery(sp) if err != nil { return nil, badRequestf(err, "") } var results []*mongodoc.Entity iter := h.Cache.CustomIter(entityCacheListQuery{lq}, nil) for iter.Next() { results = append(results, iter.Entity()) } if iter.Err() != nil { return nil, errgo.Notef(err, "error listing charms and bundles") } r, err := h.getMetadataForEntities(results, sp.Include, req, nil) if err != nil { return nil, errgo.Notef(err, "cannot get metadata") } return params.ListResponse{ Results: r, }, nil } type entityCacheListQuery struct { q *charmstore.ListQuery } func (q entityCacheListQuery) Iter(fields map[string]int) entitycache.StoreIter { return q.q.Iter(fields) } �������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/content.go�������������������������0000664�0001750�0001750�00000016134�12672604603�025550� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( "archive/zip" "bytes" "fmt" "io" "net/http" "path" "strings" "github.com/juju/xml" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/jujusvg.v1" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" "gopkg.in/juju/charmstore.v5-unstable/internal/router" ) // GET id/diagram.svg // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-iddiagramsvg func (h *ReqHandler) serveDiagram(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { if id.URL.Series != "bundle" { return errgo.WithCausef(nil, params.ErrNotFound, "diagrams not supported for charms") } entity, err := h.Cache.Entity(&id.URL, charmstore.FieldSelector("bundledata")) if err != nil { return errgo.Mask(err, errgo.Is(params.ErrNotFound)) } var urlErr error // TODO consider what happens when a charm's SVG does not exist. canvas, err := jujusvg.NewFromBundle(entity.BundleData, func(id *charm.URL) string { // TODO change jujusvg so that the iconURL function can // return an error. absPath := h.Handler.rootPath + "/" + id.Path() + "/icon.svg" p, err := router.RelativeURLPath(req.RequestURI, absPath) if err != nil { urlErr = errgo.Notef(err, "cannot make relative URL from %q and %q", req.RequestURI, absPath) } return p }, nil) if err != nil { return errgo.Notef(err, "cannot create canvas") } if urlErr != nil { return urlErr } setArchiveCacheControl(w.Header(), h.isPublic(id)) w.Header().Set("Content-Type", "image/svg+xml") canvas.Marshal(w) return nil } // These are all forms of README files // actually observed in charms in the wild. var allowedReadMe = map[string]bool{ "readme": true, "readme.md": true, "readme.rst": true, "readme.ex": true, "readme.markdown": true, "readme.txt": true, } // GET id/readme // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idreadme func (h *ReqHandler) serveReadMe(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { entity, err := h.Cache.Entity(&id.URL, charmstore.FieldSelector("contents", "blobname")) if err != nil { return errgo.NoteMask(err, "cannot get README", errgo.Is(params.ErrNotFound)) } isReadMeFile := func(f *zip.File) bool { name := strings.ToLower(path.Clean(f.Name)) // This is the same condition currently used by the GUI. // TODO propagate likely content type from file extension. return allowedReadMe[name] } r, err := h.Store.OpenCachedBlobFile(entity, mongodoc.FileReadMe, isReadMeFile) if err != nil { return errgo.Mask(err, errgo.Is(params.ErrNotFound)) } defer r.Close() setArchiveCacheControl(w.Header(), h.isPublic(id)) io.Copy(w, r) return nil } // GET id/icon.svg // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idiconsvg func (h *ReqHandler) serveIcon(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { if id.URL.Series == "bundle" { return errgo.WithCausef(nil, params.ErrNotFound, "icons not supported for bundles") } entity, err := h.Cache.Entity(&id.URL, charmstore.FieldSelector("contents", "blobname")) if err != nil { return errgo.NoteMask(err, "cannot get icon", errgo.Is(params.ErrNotFound)) } isIconFile := func(f *zip.File) bool { return path.Clean(f.Name) == "icon.svg" } r, err := h.Store.OpenCachedBlobFile(entity, mongodoc.FileIcon, isIconFile) if err != nil { logger.Errorf("cannot open icon.svg file for %v: %v", id, err) if errgo.Cause(err) != params.ErrNotFound { return errgo.Mask(err) } setArchiveCacheControl(w.Header(), h.isPublic(id)) w.Header().Set("Content-Type", "image/svg+xml") io.Copy(w, strings.NewReader(DefaultIcon)) return nil } defer r.Close() w.Header().Set("Content-Type", "image/svg+xml") setArchiveCacheControl(w.Header(), h.isPublic(id)) if err := processIcon(w, r); err != nil { if errgo.Cause(err) == errProbablyNotXML { logger.Errorf("cannot process icon.svg from %s: %v", id, err) io.Copy(w, strings.NewReader(DefaultIcon)) return nil } return errgo.Mask(err) } return nil } var errProbablyNotXML = errgo.New("probably not XML") const svgNamespace = "http://www.w3.org/2000/svg" // processIcon reads an icon SVG from r and writes // it to w, making any changes that need to be made. // Currently it adds a viewBox attribute to the // element if necessary. // If there is an error processing the XML before // the first token has been written, it returns an error // with errProbablyNotXML as the cause. func processIcon(w io.Writer, r io.Reader) error { // Arrange to save all the content that we find up // until the first element. Then we'll stitch it // back together again for the actual processing. var saved bytes.Buffer dec := xml.NewDecoder(io.TeeReader(r, &saved)) dec.DefaultSpace = svgNamespace found, changed := false, false for !found { tok, err := dec.Token() if err == io.EOF { break } if err != nil { return errgo.WithCausef(err, errProbablyNotXML, "") } _, found, changed = ensureViewbox(tok) } if !found { return errgo.WithCausef(nil, errProbablyNotXML, "no element found") } // Stitch the input back together again so we can // write the output without buffering it in memory. r = io.MultiReader(&saved, r) if !found || !changed { _, err := io.Copy(w, r) return err } return processNaive(w, r) } // processNaive is like processIcon but processes all of the // XML elements. It does not return errProbablyNotXML // on error because it may have written arbitrary XML // to w, at which point writing an alternative response would // be unwise. func processNaive(w io.Writer, r io.Reader) error { dec := xml.NewDecoder(r) dec.DefaultSpace = svgNamespace enc := xml.NewEncoder(w) found := false for { tok, err := dec.Token() if err == io.EOF { break } if err != nil { return fmt.Errorf("failed to read token: %v", err) } if !found { tok, found, _ = ensureViewbox(tok) } if err := enc.EncodeToken(tok); err != nil { return fmt.Errorf("cannot encode token %#v: %v", tok, err) } } if err := enc.Flush(); err != nil { return fmt.Errorf("cannot flush output: %v", err) } return nil } func ensureViewbox(tok0 xml.Token) (_ xml.Token, found, changed bool) { tok, ok := tok0.(xml.StartElement) if !ok || tok.Name.Space != svgNamespace || tok.Name.Local != "svg" { return tok0, false, false } var width, height string for _, attr := range tok.Attr { if attr.Name.Space != "" { continue } switch attr.Name.Local { case "width": width = attr.Value case "height": height = attr.Value case "viewBox": return tok, true, false } } if width == "" || height == "" { // Width and/or height have not been specified, // so leave viewbox unspecified too. return tok, true, false } tok.Attr = append(tok.Attr, xml.Attr{ Name: xml.Name{ Local: "viewBox", }, Value: fmt.Sprintf("0 0 %s %s", width, height), }) return tok, true, true } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/status_test.go���������������������0000664�0001750�0001750�00000020423�12672604603�026454� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( "encoding/json" "net/http" "time" jc "github.com/juju/testing/checkers" "github.com/juju/testing/httptesting" "github.com/juju/utils/debugstatus" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" "gopkg.in/juju/charmstore.v5-unstable/internal/router" ) var zeroTimeStr = time.Time{}.Format(time.RFC3339) func (s *APISuite) TestStatus(c *gc.C) { for _, id := range []*router.ResolvedURL{ newResolvedURL("cs:~charmers/precise/wordpress-2", 2), newResolvedURL("cs:~charmers/precise/wordpress-3", 3), newResolvedURL("cs:~foo/precise/mysql-9", 1), newResolvedURL("cs:~bar/utopic/mysql-10", -1), newResolvedURL("cs:~charmers/bundle/wordpress-simple-3", 3), newResolvedURL("cs:~bar/bundle/wordpress-simple-4", -1), } { if id.URL.Series == "bundle" { s.addPublicBundleFromRepo(c, id.URL.Name, id, false) } else { s.addPublicCharmFromRepo(c, id.URL.Name, id) } } now := time.Now() s.PatchValue(&debugstatus.StartTime, now) start := now.Add(-2 * time.Hour) s.addLog(c, &mongodoc.Log{ Data: []byte(`"ingestion started"`), Level: mongodoc.InfoLevel, Type: mongodoc.IngestionType, Time: start, }) end := now.Add(-1 * time.Hour) s.addLog(c, &mongodoc.Log{ Data: []byte(`"ingestion completed"`), Level: mongodoc.InfoLevel, Type: mongodoc.IngestionType, Time: end, }) statisticsStart := now.Add(-1*time.Hour - 30*time.Minute) s.addLog(c, &mongodoc.Log{ Data: []byte(`"legacy statistics import started"`), Level: mongodoc.InfoLevel, Type: mongodoc.LegacyStatisticsType, Time: statisticsStart, }) statisticsEnd := now.Add(-30 * time.Minute) s.addLog(c, &mongodoc.Log{ Data: []byte(`"legacy statistics import completed"`), Level: mongodoc.InfoLevel, Type: mongodoc.LegacyStatisticsType, Time: statisticsEnd, }) s.AssertDebugStatus(c, true, map[string]params.DebugStatus{ "mongo_connected": { Name: "MongoDB is connected", Value: "Connected", Passed: true, }, "mongo_collections": { Name: "MongoDB collections", Value: "All required collections exist", Passed: true, }, "elasticsearch": { Name: "Elastic search is running", Value: "Elastic search is not configured", Passed: true, }, "entities": { Name: "Entities in charm store", Value: "4 charms; 2 bundles; 4 promulgated", Passed: true, }, "base_entities": { Name: "Base entities in charm store", Value: "count: 5", Passed: true, }, "server_started": { Name: "Server started", Value: now.String(), Passed: true, }, "ingestion": { Name: "Ingestion", Value: "started: " + start.Format(time.RFC3339) + ", completed: " + end.Format(time.RFC3339), Passed: true, }, "legacy_statistics": { Name: "Legacy Statistics Load", Value: "started: " + statisticsStart.Format(time.RFC3339) + ", completed: " + statisticsEnd.Format(time.RFC3339), Passed: true, }, }) } func (s *APISuite) TestStatusWithoutCorrectCollections(c *gc.C) { s.store.DB.Entities().DropCollection() s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ "mongo_collections": { Name: "MongoDB collections", Value: "Missing collections: [" + s.store.DB.Entities().Name + "]", Passed: false, }, }) } func (s *APISuite) TestStatusWithoutIngestion(c *gc.C) { s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ "ingestion": { Name: "Ingestion", Value: "started: " + zeroTimeStr + ", completed: " + zeroTimeStr, Passed: false, }, }) } func (s *APISuite) TestStatusIngestionStarted(c *gc.C) { now := time.Now() start := now.Add(-1 * time.Hour) s.addLog(c, &mongodoc.Log{ Data: []byte(`"ingestion started"`), Level: mongodoc.InfoLevel, Type: mongodoc.IngestionType, Time: start, }) s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ "ingestion": { Name: "Ingestion", Value: "started: " + start.Format(time.RFC3339) + ", completed: " + zeroTimeStr, Passed: false, }, }) } func (s *APISuite) TestStatusWithoutLegacyStatistics(c *gc.C) { s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ "legacy_statistics": { Name: "Legacy Statistics Load", Value: "started: " + zeroTimeStr + ", completed: " + zeroTimeStr, Passed: false, }, }) } func (s *APISuite) TestStatusLegacyStatisticsStarted(c *gc.C) { now := time.Now() statisticsStart := now.Add(-1*time.Hour - 30*time.Minute) s.addLog(c, &mongodoc.Log{ Data: []byte(`"legacy statistics import started"`), Level: mongodoc.InfoLevel, Type: mongodoc.LegacyStatisticsType, Time: statisticsStart, }) s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ "legacy_statistics": { Name: "Legacy Statistics Load", Value: "started: " + statisticsStart.Format(time.RFC3339) + ", completed: " + zeroTimeStr, Passed: false, }, }) } func (s *APISuite) TestStatusLegacyStatisticsMultipleLogs(c *gc.C) { now := time.Now() statisticsStart := now.Add(-1*time.Hour - 30*time.Minute) s.addLog(c, &mongodoc.Log{ Data: []byte(`"legacy statistics import started"`), Level: mongodoc.InfoLevel, Type: mongodoc.LegacyStatisticsType, Time: statisticsStart.Add(-1 * time.Hour), }) s.addLog(c, &mongodoc.Log{ Data: []byte(`"legacy statistics import started"`), Level: mongodoc.InfoLevel, Type: mongodoc.LegacyStatisticsType, Time: statisticsStart, }) statisticsEnd := now.Add(-30 * time.Minute) s.addLog(c, &mongodoc.Log{ Data: []byte(`"legacy statistics import completed"`), Level: mongodoc.InfoLevel, Type: mongodoc.LegacyStatisticsType, Time: statisticsEnd.Add(-1 * time.Hour), }) s.addLog(c, &mongodoc.Log{ Data: []byte(`"legacy statistics import completed"`), Level: mongodoc.InfoLevel, Type: mongodoc.LegacyStatisticsType, Time: statisticsEnd, }) s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ "legacy_statistics": { Name: "Legacy Statistics Load", Value: "started: " + statisticsStart.Format(time.RFC3339) + ", completed: " + statisticsEnd.Format(time.RFC3339), Passed: true, }, }) } func (s *APISuite) TestStatusBaseEntitiesError(c *gc.C) { // Add a base entity without any corresponding entities. entity := &mongodoc.BaseEntity{ URL: charm.MustParseURL("django"), Name: "django", } err := s.store.DB.BaseEntities().Insert(entity) c.Assert(err, gc.IsNil) s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ "base_entities": { Name: "Base entities in charm store", Value: "count: 1", Passed: false, }, }) } // AssertDebugStatus asserts that the current /debug/status endpoint // matches the given status, ignoring status duration. // If complete is true, it fails if the results contain // keys not mentioned in status. func (s *APISuite) AssertDebugStatus(c *gc.C, complete bool, status map[string]params.DebugStatus) { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("debug/status"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.Bytes())) c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "application/json") var gotStatus map[string]params.DebugStatus err := json.Unmarshal(rec.Body.Bytes(), &gotStatus) c.Assert(err, gc.IsNil) for key, r := range gotStatus { if _, found := status[key]; !complete && !found { delete(gotStatus, key) continue } r.Duration = 0 gotStatus[key] = r } c.Assert(gotStatus, jc.DeepEquals, status) } type statusWithElasticSearchSuite struct { commonSuite } var _ = gc.Suite(&statusWithElasticSearchSuite{}) func (s *statusWithElasticSearchSuite) SetUpSuite(c *gc.C) { s.enableES = true s.commonSuite.SetUpSuite(c) } func (s *statusWithElasticSearchSuite) TestStatusWithElasticSearch(c *gc.C) { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("debug/status"), }) var results map[string]params.DebugStatus err := json.Unmarshal(rec.Body.Bytes(), &results) c.Assert(err, gc.IsNil) c.Assert(results["elasticsearch"].Name, gc.Equals, "Elastic search is running") c.Assert(results["elasticsearch"].Value, jc.Contains, "cluster_name:") } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/search.go��������������������������0000664�0001750�0001750�00000012626�12672604603�025345� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( "net/http" "strconv" "sync/atomic" "github.com/juju/utils/parallel" "gopkg.in/errgo.v1" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" "gopkg.in/juju/charmstore.v5-unstable/internal/router" ) const maxConcurrency = 20 // GET search[?text=text][&autocomplete=1][&filter=value…][&limit=limit][&include=meta][&skip=count][&sort=field[+dir]] // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-search func (h *ReqHandler) serveSearch(_ http.Header, req *http.Request) (interface{}, error) { sp, err := ParseSearchParams(req) if err != nil { return "", err } auth, err := h.CheckRequest(req, nil, OpOther) if err != nil { logger.Infof("authorization failed on search request, granting no privileges: %v", err) } sp.Admin = auth.Admin if auth.Username != "" { sp.Groups = append(sp.Groups, auth.Username) groups, err := h.GroupsForUser(auth.Username) if err != nil { logger.Infof("cannot get groups for user %q, assuming no groups: %v", auth.Username, err) } sp.Groups = append(sp.Groups, groups...) } return h.Search(sp, req) } // Search performs the search specified by SearchParams. If sp // specifies that additional metadata needs to be added to the results, // then it is added. func (h *ReqHandler) Search(sp charmstore.SearchParams, req *http.Request) (interface{}, error) { // perform query results, err := h.Store.Search(sp) if err != nil { return nil, errgo.Notef(err, "error performing search") } return params.SearchResponse{ SearchTime: results.SearchTime, Total: results.Total, Results: h.addMetaData(results.Results, sp.Include, req), }, nil } // addMetaData adds the requested meta data with the include list. func (h *ReqHandler) addMetaData(results []*mongodoc.Entity, include []string, req *http.Request) []params.EntityResult { entities := make([]params.EntityResult, len(results)) run := parallel.NewRun(maxConcurrency) var missing int32 for i, ent := range results { i, ent := i, ent run.Do(func() error { meta, err := h.Router.GetMetadata(charmstore.EntityResolvedURL(ent), include, req) if err != nil { // Unfortunately it is possible to get errors here due to // internal inconsistency, so rather than throwing away // all the search results, we just log the error and move on. logger.Errorf("cannot retrieve metadata for %v: %v", ent.PreferredURL(true), err) atomic.AddInt32(&missing, 1) return nil } entities[i] = params.EntityResult{ Id: ent.PreferredURL(true), Meta: meta, } return nil }) } // We never return an error from the Do function above, so no need to // check the error here. run.Wait() if missing == 0 { return entities } // We're missing some results - shuffle all the results down to // fill the gaps. j := 0 for _, result := range entities { if result.Id != nil { entities[j] = result j++ } } return entities[0:j] } // GET search/interesting[?limit=limit][&include=meta] // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-searchinteresting func (h *ReqHandler) serveSearchInteresting(w http.ResponseWriter, req *http.Request) { router.WriteError(w, errNotImplemented) } // ParseSearchParms extracts the search paramaters from the request func ParseSearchParams(req *http.Request) (charmstore.SearchParams, error) { sp := charmstore.SearchParams{} var err error for k, v := range req.Form { switch k { case "text": sp.Text = v[0] case "autocomplete": sp.AutoComplete, err = router.ParseBool(v[0]) if err != nil { return charmstore.SearchParams{}, badRequestf(err, "invalid autocomplete parameter") } case "limit": sp.Limit, err = strconv.Atoi(v[0]) if err != nil { return charmstore.SearchParams{}, badRequestf(err, "invalid limit parameter: could not parse integer") } if sp.Limit < 1 { return charmstore.SearchParams{}, badRequestf(nil, "invalid limit parameter: expected integer greater than zero") } case "include": for _, s := range v { if s != "" { sp.Include = append(sp.Include, s) } } case "description", "name", "owner", "provides", "requires", "series", "summary", "tags", "type": if sp.Filters == nil { sp.Filters = make(map[string][]string) } sp.Filters[k] = v case "promulgated": promulgated, err := router.ParseBool(v[0]) if err != nil { return charmstore.SearchParams{}, badRequestf(err, "invalid promulgated filter parameter") } if sp.Filters == nil { sp.Filters = make(map[string][]string) } if promulgated { sp.Filters[k] = []string{"1"} } else { sp.Filters[k] = []string{"0"} } case "skip": sp.Skip, err = strconv.Atoi(v[0]) if err != nil { return charmstore.SearchParams{}, badRequestf(err, "invalid skip parameter: could not parse integer") } if sp.Skip < 0 { return charmstore.SearchParams{}, badRequestf(nil, "invalid skip parameter: expected non-negative integer") } case "sort": err = sp.ParseSortFields(v...) if err != nil { return charmstore.SearchParams{}, badRequestf(err, "invalid sort field") } default: return charmstore.SearchParams{}, badRequestf(nil, "invalid parameter: %s", k) } } return sp, nil } ����������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/resources.go�����������������������0000664�0001750�0001750�00000005327�12672604603�026112� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Copyright 2016 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( "net/http" "net/url" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable/resource" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" "gopkg.in/juju/charmstore.v5-unstable/internal/router" ) // GET id/meta/resources // https://github.com/juju/charmstore/blob/v5/docs/API.md#get-idmetaresources func (h *ReqHandler) metaResources(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { // TODO(ericsnow) Handle flags. // TODO(ericsnow) Use h.Store.ListResources() once that exists. resources, err := basicListResources(entity) if err != nil { return nil, err } var results []params.Resource for _, res := range resources { result := params.Resource2API(res) results = append(results, result) } return results, nil } func basicListResources(entity *mongodoc.Entity) ([]resource.Resource, error) { if entity.URL.Series == "bundle" { return nil, badRequestf(nil, "bundles do not have resources") } if entity.CharmMeta == nil { return nil, errgo.Newf("entity missing charm metadata") } var resources []resource.Resource for _, meta := range entity.CharmMeta.Resources { // We use an origin of "upload" since resources cannot be uploaded yet. resOrigin := resource.OriginUpload res := resource.Resource{ Meta: meta, Origin: resOrigin, // Revision, Fingerprint, and Size are not set. } resources = append(resources, res) } resource.Sort(resources) return resources, nil } // POST id/resources/name // https://github.com/juju/charmstore/blob/v5/docs/API.md#post-idresourcesname // // GET id/resources/name[/revision] // https://github.com/juju/charmstore/blob/v5/docs/API.md#get-idresourcesnamerevision func (h *ReqHandler) serveResources(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { // Resources are "published" using "PUT id/publish" so we don't // support PUT here. // TODO(ericsnow) Support DELETE to remove a resource? // (like serveArchive() does) switch req.Method { case "GET": return h.serveDownloadResource(id, w, req) case "POST": return h.serveUploadResource(id, w, req) default: return errgo.WithCausef(nil, params.ErrMethodNotAllowed, "%s not allowed", req.Method) } } func (h *ReqHandler) serveDownloadResource(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { return errNotImplemented } func (h *ReqHandler) serveUploadResource(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { return errNotImplemented } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/search_test.go���������������������0000664�0001750�0001750�00000060171�12672604603�026402� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( "encoding/json" "net/http" "net/url" "sort" "strings" "github.com/juju/loggo" jc "github.com/juju/testing/checkers" "github.com/juju/testing/httptesting" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" "gopkg.in/juju/charmstore.v5-unstable/internal/router" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" "gopkg.in/juju/charmstore.v5-unstable/internal/v5" ) type SearchSuite struct { commonSuite } var _ = gc.Suite(&SearchSuite{}) var exportTestCharms = map[string]*router.ResolvedURL{ "wordpress": newResolvedURL("cs:~charmers/precise/wordpress-23", 23), "mysql": newResolvedURL("cs:~openstack-charmers/trusty/mysql-7", 7), "varnish": newResolvedURL("cs:~foo/trusty/varnish-1", -1), "riak": newResolvedURL("cs:~charmers/trusty/riak-67", 67), } var exportTestBundles = map[string]*router.ResolvedURL{ "wordpress-simple": newResolvedURL("cs:~charmers/bundle/wordpress-simple-4", 4), } func (s *SearchSuite) SetUpSuite(c *gc.C) { s.enableES = true s.enableIdentity = true s.commonSuite.SetUpSuite(c) } func (s *SearchSuite) SetUpTest(c *gc.C) { s.commonSuite.SetUpTest(c) s.addCharmsToStore(c) err := s.store.SetPerms(charm.MustParseURL("cs:~charmers/riak"), "stable.read", "charmers", "test-user") c.Assert(err, gc.IsNil) err = s.store.UpdateSearch(newResolvedURL("~charmers/trusty/riak-0", 0)) c.Assert(err, gc.IsNil) err = s.esSuite.ES.RefreshIndex(s.esSuite.TestIndex) c.Assert(err, gc.IsNil) } func (s *SearchSuite) addCharmsToStore(c *gc.C) { for name, id := range exportTestCharms { s.addPublicCharm(c, getSearchCharm(name), id) } for name, id := range exportTestBundles { s.addPublicBundle(c, getSearchBundle(name), id, false) } } func getSearchCharm(name string) *storetesting.Charm { ca := storetesting.Charms.CharmDir(name) meta := ca.Meta() meta.Categories = append(strings.Split(name, "-"), "bar") return storetesting.NewCharm(meta) } func getSearchBundle(name string) *storetesting.Bundle { ba := storetesting.Charms.BundleDir(name) data := ba.Data() data.Tags = append(strings.Split(name, "-"), "baz") return storetesting.NewBundle(data) } func (s *SearchSuite) TestParseSearchParams(c *gc.C) { tests := []struct { about string query string expectParams charmstore.SearchParams expectError string }{{ about: "bare search", query: "", }, { about: "text search", query: "text=test", expectParams: charmstore.SearchParams{ Text: "test", }, }, { about: "autocomplete", query: "autocomplete=1", expectParams: charmstore.SearchParams{ AutoComplete: true, }, }, { about: "invalid autocomplete", query: "autocomplete=true", expectError: `invalid autocomplete parameter: unexpected bool value "true" (must be "0" or "1")`, }, { about: "limit", query: "limit=20", expectParams: charmstore.SearchParams{ Limit: 20, }, }, { about: "invalid limit", query: "limit=twenty", expectError: `invalid limit parameter: could not parse integer: strconv.ParseInt: parsing "twenty": invalid syntax`, }, { about: "limit too low", query: "limit=-1", expectError: "invalid limit parameter: expected integer greater than zero", }, { about: "include", query: "include=archive-size", expectParams: charmstore.SearchParams{ Include: []string{"archive-size"}, }, }, { about: "include many", query: "include=archive-size&include=bundle-data", expectParams: charmstore.SearchParams{ Include: []string{"archive-size", "bundle-data"}, }, }, { about: "include many with blanks", query: "include=archive-size&include=&include=bundle-data", expectParams: charmstore.SearchParams{ Include: []string{"archive-size", "bundle-data"}, }, }, { about: "description filter", query: "description=text", expectParams: charmstore.SearchParams{ Filters: map[string][]string{ "description": {"text"}, }, }, }, { about: "name filter", query: "name=text", expectParams: charmstore.SearchParams{ Filters: map[string][]string{ "name": {"text"}, }, }, }, { about: "owner filter", query: "owner=text", expectParams: charmstore.SearchParams{ Filters: map[string][]string{ "owner": {"text"}, }, }, }, { about: "provides filter", query: "provides=text", expectParams: charmstore.SearchParams{ Filters: map[string][]string{ "provides": {"text"}, }, }, }, { about: "requires filter", query: "requires=text", expectParams: charmstore.SearchParams{ Filters: map[string][]string{ "requires": {"text"}, }, }, }, { about: "series filter", query: "series=text", expectParams: charmstore.SearchParams{ Filters: map[string][]string{ "series": {"text"}, }, }, }, { about: "tags filter", query: "tags=text", expectParams: charmstore.SearchParams{ Filters: map[string][]string{ "tags": {"text"}, }, }, }, { about: "type filter", query: "type=text", expectParams: charmstore.SearchParams{ Filters: map[string][]string{ "type": {"text"}, }, }, }, { about: "many filters", query: "name=name&owner=owner&series=series1&series=series2", expectParams: charmstore.SearchParams{ Filters: map[string][]string{ "name": {"name"}, "owner": {"owner"}, "series": {"series1", "series2"}, }, }, }, { about: "bad parameter", query: "a=b", expectError: "invalid parameter: a", }, { about: "skip", query: "skip=20", expectParams: charmstore.SearchParams{ Skip: 20, }, }, { about: "invalid skip", query: "skip=twenty", expectError: `invalid skip parameter: could not parse integer: strconv.ParseInt: parsing "twenty": invalid syntax`, }, { about: "skip too low", query: "skip=-1", expectError: "invalid skip parameter: expected non-negative integer", }, { about: "promulgated filter", query: "promulgated=1", expectParams: charmstore.SearchParams{ Filters: map[string][]string{ "promulgated": {"1"}, }, }, }, { about: "promulgated filter - bad", query: "promulgated=bad", expectError: `invalid promulgated filter parameter: unexpected bool value "bad" (must be "0" or "1")`, }} for i, test := range tests { c.Logf("test %d. %s", i, test.about) var req http.Request var err error req.Form, err = url.ParseQuery(test.query) c.Assert(err, gc.IsNil) sp, err := v5.ParseSearchParams(&req) if test.expectError != "" { c.Assert(err, gc.Not(gc.IsNil)) c.Assert(err.Error(), gc.Equals, test.expectError) } else { c.Assert(err, gc.IsNil) } c.Assert(sp, jc.DeepEquals, test.expectParams) } } func (s *SearchSuite) TestSuccessfulSearches(c *gc.C) { tests := []struct { about string query string results []*router.ResolvedURL }{{ about: "bare search", query: "", results: []*router.ResolvedURL{ exportTestCharms["wordpress"], exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], }, }, { about: "text search", query: "text=wordpress", results: []*router.ResolvedURL{ exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }, { about: "autocomplete search", query: "text=word&autocomplete=1", results: []*router.ResolvedURL{ exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }, { about: "blank text search", query: "text=", results: []*router.ResolvedURL{ exportTestCharms["wordpress"], exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], }, }, { about: "description filter search", query: "description=database", results: []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["varnish"], }, }, { about: "name filter search", query: "name=mysql", results: []*router.ResolvedURL{ exportTestCharms["mysql"], }, }, { about: "owner filter search", query: "owner=foo", results: []*router.ResolvedURL{ exportTestCharms["varnish"], }, }, { about: "provides filter search", query: "provides=mysql", results: []*router.ResolvedURL{ exportTestCharms["mysql"], }, }, { about: "requires filter search", query: "requires=mysql", results: []*router.ResolvedURL{ exportTestCharms["wordpress"], }, }, { about: "series filter search", query: "series=trusty", results: []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["varnish"], }, }, { about: "summary filter search", query: "summary=database", results: []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["varnish"], }, }, { about: "tags filter search", query: "tags=wordpress", results: []*router.ResolvedURL{ exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }, { about: "type filter search", query: "type=bundle", results: []*router.ResolvedURL{ exportTestBundles["wordpress-simple"], }, }, { about: "multiple type filter search", query: "type=bundle&type=charm", results: []*router.ResolvedURL{ exportTestCharms["wordpress"], exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], }, }, { about: "provides multiple interfaces filter search", query: "provides=monitoring+http", results: []*router.ResolvedURL{ exportTestCharms["wordpress"], }, }, { about: "requires multiple interfaces filter search", query: "requires=mysql+varnish", results: []*router.ResolvedURL{ exportTestCharms["wordpress"], }, }, { about: "multiple tags filter search", query: "tags=mysql+bar", results: []*router.ResolvedURL{ exportTestCharms["mysql"], }, }, { about: "blank owner", query: "owner=", results: []*router.ResolvedURL{ exportTestCharms["wordpress"], exportTestCharms["mysql"], exportTestBundles["wordpress-simple"], }, }, { about: "paginated search", query: "name=mysql&skip=1", }, { about: "promulgated", query: "promulgated=1", results: []*router.ResolvedURL{ exportTestCharms["wordpress"], exportTestCharms["mysql"], exportTestBundles["wordpress-simple"], }, }, { about: "not promulgated", query: "promulgated=0", results: []*router.ResolvedURL{ exportTestCharms["varnish"], }, }, { about: "promulgated with owner", query: "promulgated=1&owner=openstack-charmers", results: []*router.ResolvedURL{ exportTestCharms["mysql"], }, }} for i, test := range tests { c.Logf("test %d. %s", i, test.about) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search?" + test.query), }) var sr params.SearchResponse err := json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) c.Assert(sr.Results, gc.HasLen, len(test.results)) c.Logf("results: %s", rec.Body.Bytes()) assertResultSet(c, sr, test.results) } } func (s *SearchSuite) TestPaginatedSearch(c *gc.C) { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search?text=wordpress&skip=1"), }) var sr params.SearchResponse err := json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) c.Assert(sr.Results, gc.HasLen, 1) c.Assert(sr.Total, gc.Equals, 2) } func (s *SearchSuite) TestMetadataFields(c *gc.C) { tests := []struct { about string query string meta map[string]interface{} }{{ about: "archive-size", query: "name=mysql&include=archive-size", meta: map[string]interface{}{ "archive-size": params.ArchiveSizeResponse{getSearchCharm("mysql").Size()}, }, }, { about: "bundle-metadata", query: "name=wordpress-simple&type=bundle&include=bundle-metadata", meta: map[string]interface{}{ "bundle-metadata": getSearchBundle("wordpress-simple").Data(), }, }, { about: "bundle-machine-count", query: "name=wordpress-simple&type=bundle&include=bundle-machine-count", meta: map[string]interface{}{ "bundle-machine-count": params.BundleCount{2}, }, }, { about: "bundle-unit-count", query: "name=wordpress-simple&type=bundle&include=bundle-unit-count", meta: map[string]interface{}{ "bundle-unit-count": params.BundleCount{2}, }, }, { about: "charm-actions", query: "name=wordpress&type=charm&include=charm-actions", meta: map[string]interface{}{ "charm-actions": getSearchCharm("wordpress").Actions(), }, }, { about: "charm-config", query: "name=wordpress&type=charm&include=charm-config", meta: map[string]interface{}{ "charm-config": getSearchCharm("wordpress").Config(), }, }, { about: "charm-related", query: "name=wordpress&type=charm&include=charm-related", meta: map[string]interface{}{ "charm-related": params.RelatedResponse{ Provides: map[string][]params.EntityResult{ "mysql": { { Id: exportTestCharms["mysql"].PreferredURL(), }, }, "varnish": { { Id: exportTestCharms["varnish"].PreferredURL(), }, }, }, }, }, }, { about: "multiple values", query: "name=wordpress&type=charm&include=charm-related&include=charm-config", meta: map[string]interface{}{ "charm-related": params.RelatedResponse{ Provides: map[string][]params.EntityResult{ "mysql": { { Id: exportTestCharms["mysql"].PreferredURL(), }, }, "varnish": { { Id: exportTestCharms["varnish"].PreferredURL(), }, }, }, }, "charm-config": getSearchCharm("wordpress").Config(), }, }} for i, test := range tests { c.Logf("test %d. %s", i, test.about) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search?" + test.query), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) var sr struct { Results []struct { Meta json.RawMessage } } err := json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) c.Assert(sr.Results, gc.HasLen, 1) c.Assert(string(sr.Results[0].Meta), jc.JSONEquals, test.meta) } } func (s *SearchSuite) TestSearchError(c *gc.C) { err := s.esSuite.ES.DeleteIndex(s.esSuite.TestIndex) c.Assert(err, gc.Equals, nil) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search?name=wordpress"), }) c.Assert(rec.Code, gc.Equals, http.StatusInternalServerError) var resp params.Error err = json.Unmarshal(rec.Body.Bytes(), &resp) c.Assert(err, gc.IsNil) c.Assert(resp.Code, gc.Equals, params.ErrorCode("")) c.Assert(resp.Message, gc.Matches, "error performing search: search failed: .*") } func (s *SearchSuite) TestSearchIncludeError(c *gc.C) { // Perform a search for all charms, including the // manifest, which will try to retrieve all charm // blobs. rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search?type=charm&include=manifest"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) var resp params.SearchResponse err := json.Unmarshal(rec.Body.Bytes(), &resp) // cs:riak will not be found because it is not visible to "everyone". c.Assert(resp.Results, gc.HasLen, len(exportTestCharms)-1) // Now remove one of the blobs. The list should still // work, but only return a single result. entity, err := s.store.FindEntity(newResolvedURL("~charmers/precise/wordpress-23", 23), nil) c.Assert(err, gc.IsNil) err = s.store.BlobStore.Remove(entity.BlobName) c.Assert(err, gc.IsNil) // Now search again - we should get one result less // (and the error will be logged). // Register a logger that so that we can check the logging output. // It will be automatically removed later because IsolatedMgoESSuite // uses LoggingSuite. var tw loggo.TestWriter err = loggo.RegisterWriter("test-log", &tw, loggo.DEBUG) c.Assert(err, gc.IsNil) rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search?type=charm&include=manifest"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) resp = params.SearchResponse{} err = json.Unmarshal(rec.Body.Bytes(), &resp) // cs:riak will not be found because it is not visible to "everyone". // cs:wordpress will not be found because it has no manifest. c.Assert(resp.Results, gc.HasLen, len(exportTestCharms)-2) c.Assert(tw.Log(), jc.LogMatches, []string{"cannot retrieve metadata for cs:precise/wordpress-23: cannot open archive data for cs:precise/wordpress-23: .*"}) } func (s *SearchSuite) TestSorting(c *gc.C) { tests := []struct { about string query string results []*router.ResolvedURL }{{ about: "name ascending", query: "sort=name", results: []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }, { about: "name descending", query: "sort=-name", results: []*router.ResolvedURL{ exportTestBundles["wordpress-simple"], exportTestCharms["wordpress"], exportTestCharms["varnish"], exportTestCharms["mysql"], }, }, { about: "series ascending", query: "sort=series,name", results: []*router.ResolvedURL{ exportTestBundles["wordpress-simple"], exportTestCharms["wordpress"], exportTestCharms["mysql"], exportTestCharms["varnish"], }, }, { about: "series descending", query: "sort=-series&sort=name", results: []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }, { about: "owner ascending", query: "sort=owner,name", results: []*router.ResolvedURL{ exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], exportTestCharms["varnish"], exportTestCharms["mysql"], }, }, { about: "owner descending", query: "sort=-owner&sort=name", results: []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }} for i, test := range tests { c.Logf("test %d. %s", i, test.about) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search?" + test.query), }) var sr params.SearchResponse err := json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) // Not using assertResultSet(c, sr, test.results) as it does sort internally c.Assert(sr.Results, gc.HasLen, len(test.results), gc.Commentf("expected %#v", test.results)) c.Logf("results: %s", rec.Body.Bytes()) for i := range test.results { c.Assert(sr.Results[i].Id.String(), gc.Equals, test.results[i].PreferredURL().String(), gc.Commentf("element %d")) } } } func (s *SearchSuite) TestSortUnsupportedField(c *gc.C) { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search?sort=foo"), }) var e params.Error err := json.Unmarshal(rec.Body.Bytes(), &e) c.Assert(err, gc.IsNil) c.Assert(e.Code, gc.Equals, params.ErrBadRequest) c.Assert(e.Message, gc.Equals, "invalid sort field: unrecognized sort parameter \"foo\"") } func (s *SearchSuite) TestDownloadsBoost(c *gc.C) { // TODO (frankban): remove this call when removing the legacy counts logic. patchLegacyDownloadCountsEnabled(s.AddCleanup, false) charmDownloads := map[string]int{ "mysql": 0, "wordpress": 1, "varnish": 8, } for n, cnt := range charmDownloads { url := newResolvedURL("cs:~downloads-test/trusty/x-1", -1) url.URL.Name = n s.addPublicCharm(c, getSearchCharm(n), url) for i := 0; i < cnt; i++ { err := s.store.IncrementDownloadCounts(url) c.Assert(err, gc.IsNil) } } err := s.esSuite.ES.RefreshIndex(s.esSuite.TestIndex) c.Assert(err, gc.IsNil) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search?owner=downloads-test"), }) var sr params.SearchResponse err = json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) c.Assert(sr.Results, gc.HasLen, 3) c.Assert(sr.Results[0].Id.Name, gc.Equals, "varnish") c.Assert(sr.Results[1].Id.Name, gc.Equals, "wordpress") c.Assert(sr.Results[2].Id.Name, gc.Equals, "mysql") } // TODO(mhilton) remove this test when removing legacy counts logic. func (s *SearchSuite) TestLegacyStatsUpdatesSearch(c *gc.C) { patchLegacyDownloadCountsEnabled(s.AddCleanup, true) doc, err := s.store.ES.GetSearchDocument(charm.MustParseURL("~openstack-charmers/trusty/mysql-7")) c.Assert(err, gc.IsNil) c.Assert(doc.TotalDownloads, gc.Equals, int64(0)) s.assertPutAsAdmin(c, "~openstack-charmers/trusty/mysql-7/meta/extra-info/"+params.LegacyDownloadStats, 57) doc, err = s.store.ES.GetSearchDocument(charm.MustParseURL("~openstack-charmers/trusty/mysql-7")) c.Assert(err, gc.IsNil) c.Assert(doc.TotalDownloads, gc.Equals, int64(57)) } func (s *SearchSuite) TestSearchWithAdminCredentials(c *gc.C) { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search"), Username: testUsername, Password: testPassword, }) c.Assert(rec.Code, gc.Equals, http.StatusOK) expected := []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["wordpress"], exportTestCharms["riak"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], } var sr params.SearchResponse err := json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) assertResultSet(c, sr, expected) } func (s *SearchSuite) TestSearchWithUserMacaroon(c *gc.C) { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search"), Do: s.bakeryDoAsUser(c, "test-user"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) expected := []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["wordpress"], exportTestCharms["riak"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], } var sr params.SearchResponse err := json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) assertResultSet(c, sr, expected) } func (s *SearchSuite) TestSearchWithUserInGroups(c *gc.C) { s.idM.groups = map[string][]string{ "bob": {"test-user", "test-user2"}, } rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("search"), Do: s.bakeryDoAsUser(c, "bob"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) expected := []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["wordpress"], exportTestCharms["riak"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], } var sr params.SearchResponse err := json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) assertResultSet(c, sr, expected) } func (s *SearchSuite) TestSearchWithBadAdminCredentialsAndACookie(c *gc.C) { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, Do: s.bakeryDoAsUser(c, "test-user"), URL: storeURL("search"), Username: testUsername, Password: "bad-password", }) c.Assert(rec.Code, gc.Equals, http.StatusOK) expected := []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["wordpress"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], } var sr params.SearchResponse err := json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) assertResultSet(c, sr, expected) } func assertResultSet(c *gc.C, sr params.SearchResponse, expected []*router.ResolvedURL) { sort.Sort(searchResultById(sr.Results)) sort.Sort(resolvedURLByPreferredURL(expected)) c.Assert(sr.Results, gc.HasLen, len(expected), gc.Commentf("expected %#v", expected)) for i := range expected { c.Assert(sr.Results[i].Id.String(), gc.Equals, expected[i].PreferredURL().String(), gc.Commentf("element %d")) } } type searchResultById []params.EntityResult func (s searchResultById) Len() int { return len(s) } func (s searchResultById) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s searchResultById) Less(i, j int) bool { return s[i].Id.String() < s[j].Id.String() } type resolvedURLByPreferredURL []*router.ResolvedURL func (s resolvedURLByPreferredURL) Len() int { return len(s) } func (s resolvedURLByPreferredURL) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s resolvedURLByPreferredURL) Less(i, j int) bool { return s[i].PreferredURL().String() < s[j].PreferredURL().String() } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/log.go�����������������������������0000664�0001750�0001750�00000014347�12672604603�024663� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( "encoding/json" "io" "net/http" "strconv" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/mgo.v2/bson" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" ) // GET /log // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-log // // POST /log // https://github.com/juju/charmstore/blob/v4/docs/API.md#post-log func (h *ReqHandler) serveLog(w http.ResponseWriter, req *http.Request) error { if _, err := h.authorize(req, nil, true, nil); err != nil { return err } switch req.Method { case "GET": return h.getLogs(w, req) case "POST": return h.postLogs(w, req) } return errgo.WithCausef(nil, params.ErrMethodNotAllowed, "%s method not allowed", req.Method) } func (h *ReqHandler) getLogs(w http.ResponseWriter, req *http.Request) error { w.Header().Set("content-type", "application/json") encoder := json.NewEncoder(w) // Retrieve values from the query string. limit, err := intValue(req.Form.Get("limit"), 1, 1000) if err != nil { return badRequestf(err, "invalid limit value") } offset, err := intValue(req.Form.Get("skip"), 0, 0) if err != nil { return badRequestf(err, "invalid skip value") } id := req.Form.Get("id") strLevel := req.Form.Get("level") strType := req.Form.Get("type") // Build the Mongo query. query := make(bson.D, 0, 3) if id != "" { url, err := charm.ParseURL(id) if err != nil { return badRequestf(err, "invalid id value") } query = append(query, bson.DocElem{"urls", url}) } if strLevel != "" { logLevel, ok := paramsLogLevels[params.LogLevel(strLevel)] if !ok { return badRequestf(nil, "invalid log level value") } query = append(query, bson.DocElem{"level", logLevel}) } if strType != "" { logType, ok := paramsLogTypes[params.LogType(strType)] if !ok { return badRequestf(nil, "invalid log type value") } query = append(query, bson.DocElem{"type", logType}) } // Retrieve the logs. outputStarted := false closingContent := "[]" var log mongodoc.Log iter := h.Store.DB.Logs().Find(query).Sort("-_id").Skip(offset).Limit(limit).Iter() for iter.Next(&log) { // Start writing the response body. The logs are streamed, but we wrap // the output in square brackets and we separate entries with commas so // that it's more easy for clients to parse the response. closingContent = "]" if outputStarted { if err := writeString(w, ","); err != nil { return errgo.Notef(err, "cannot write response") } } else { if err := writeString(w, "["); err != nil { return errgo.Notef(err, "cannot write response") } outputStarted = true } logResponse := ¶ms.LogResponse{ Data: json.RawMessage(log.Data), Level: mongodocLogLevels[log.Level], Type: mongodocLogTypes[log.Type], URLs: log.URLs, Time: log.Time.UTC(), } if err := encoder.Encode(logResponse); err != nil { // Since we only allow properly encoded JSON messages to be stored // in the database, this should never happen. Moreover, at this // point we already sent a chunk of the 200 response, so we just // log the error. logger.Errorf("cannot marshal log: %s", err) } } if err := iter.Close(); err != nil { return errgo.Notef(err, "cannot retrieve logs") } // Close the JSON list, or just write an empty list, depending on whether // we had results. if err := writeString(w, closingContent); err != nil { return errgo.Notef(err, "cannot write response") } return nil } func (h *ReqHandler) postLogs(w http.ResponseWriter, req *http.Request) error { // Check the request content type. if ctype := req.Header.Get("Content-Type"); ctype != "application/json" { return badRequestf(nil, "unexpected Content-Type %q; expected 'application/json'", ctype) } // Unmarshal the request body. var logs []params.Log decoder := json.NewDecoder(req.Body) if err := decoder.Decode(&logs); err != nil { return badRequestf(err, "cannot unmarshal body") } for _, log := range logs { // Validate the provided level and type. logLevel, ok := paramsLogLevels[log.Level] if !ok { return badRequestf(nil, "invalid log level") } logType, ok := paramsLogTypes[log.Type] if !ok { return badRequestf(nil, "invalid log type") } // Add the log to the database. if err := h.Store.AddLog(log.Data, logLevel, logType, log.URLs); err != nil { return errgo.Notef(err, "cannot add log") } } return nil } func writeString(w io.Writer, content string) error { _, err := w.Write([]byte(content)) return err } // TODO (frankban): use slices instead of maps for the data structures below. var ( // mongodocLogLevels maps internal mongodoc log levels to API ones. mongodocLogLevels = map[mongodoc.LogLevel]params.LogLevel{ mongodoc.InfoLevel: params.InfoLevel, mongodoc.WarningLevel: params.WarningLevel, mongodoc.ErrorLevel: params.ErrorLevel, } // paramsLogLevels maps API params log levels to internal mongodoc ones. paramsLogLevels = map[params.LogLevel]mongodoc.LogLevel{ params.InfoLevel: mongodoc.InfoLevel, params.WarningLevel: mongodoc.WarningLevel, params.ErrorLevel: mongodoc.ErrorLevel, } // mongodocLogTypes maps internal mongodoc log types to API ones. mongodocLogTypes = map[mongodoc.LogType]params.LogType{ mongodoc.IngestionType: params.IngestionType, mongodoc.LegacyStatisticsType: params.LegacyStatisticsType, } // paramsLogTypes maps API params log types to internal mongodoc ones. paramsLogTypes = map[params.LogType]mongodoc.LogType{ params.IngestionType: mongodoc.IngestionType, params.LegacyStatisticsType: mongodoc.LegacyStatisticsType, } ) // intValue checks that the given string value is a number greater than or // equal to the given minValue. If the provided value is an empty string, the // defaultValue is returned without errors. func intValue(strValue string, minValue, defaultValue int) (int, error) { if strValue == "" { return defaultValue, nil } value, err := strconv.Atoi(strValue) if err != nil { return 0, errgo.New("value must be a number") } if value < minValue { return 0, errgo.Newf("value must be >= %d", minValue) } return value, nil } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/archive.go�������������������������0000664�0001750�0001750�00000031620�12672604603�025514� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( stdzip "archive/zip" "fmt" "io" "io/ioutil" "mime" "net/http" "path/filepath" "strconv" "time" "github.com/juju/httprequest" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" "gopkg.in/juju/charmstore.v5-unstable/internal/router" ) // GET id/archive // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idarchive // // POST id/archive?hash=sha384hash // https://github.com/juju/charmstore/blob/v4/docs/API.md#post-idarchive // // DELETE id/archive // https://github.com/juju/charmstore/blob/v4/docs/API.md#delete-idarchive // // PUT id/archive?hash=sha384hash // This is like POST except that it puts the archive to a known revision // rather than choosing a new one. As this feature is to support legacy // ingestion methods, and will be removed in the future, it has no entry // in the specification. func (h *ReqHandler) serveArchive(id *charm.URL, w http.ResponseWriter, req *http.Request) error { resolveId := h.ResolvedIdHandler switch req.Method { // TODO: support DELETE when it is understood how that interacts with channels. case "GET": return resolveId(h.serveGetArchive)(id, w, req) case "POST", "PUT": // Make sure we consume the full request body, before responding. // // It seems a shame to require the whole, possibly large, archive // is uploaded if we already know that the request is going to // fail, but it is necessary to prevent some failures. // // TODO: investigate using 100-Continue statuses to prevent // unnecessary uploads. defer io.Copy(ioutil.Discard, req.Body) if err := h.authorizeUpload(id, req); err != nil { return errgo.Mask(err, errgo.Any) } if req.Method == "POST" { return h.servePostArchive(id, w, req) } return h.servePutArchive(id, w, req) } return errgo.WithCausef(nil, params.ErrMethodNotAllowed, "%s not allowed", req.Method) } func (h *ReqHandler) authorizeUpload(id *charm.URL, req *http.Request) error { if id.User == "" { return badRequestf(nil, "user not specified in entity upload URL %q", id) } baseEntity, err := h.Store.FindBaseEntity(id, charmstore.FieldSelector("channelacls")) // Note that we pass a nil entity URL to authorizeWithPerms, because // we haven't got a resolved URL at this point. At some // point in the future, we may want to be able to allow // is-entity first-party caveats to be allowed when uploading // at which point we will need to rethink this a little. if err == nil { acls := baseEntity.ChannelACLs[params.UnpublishedChannel] if err := h.authorizeWithPerms(req, acls.Read, acls.Write, nil); err != nil { return errgo.Mask(err, errgo.Any) } return nil } if errgo.Cause(err) != params.ErrNotFound { return errgo.Notef(err, "cannot retrieve entity %q for authorization", id) } // The base entity does not currently exist, so we default to // assuming write permissions for the entity user. if err := h.authorizeWithPerms(req, nil, []string{id.User}, nil); err != nil { return errgo.Mask(err, errgo.Any) } return nil } func (h *ReqHandler) serveGetArchive(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { _, err := h.AuthorizeEntityAndTerms(req, []*router.ResolvedURL{id}) if err != nil { return errgo.Mask(err, errgo.Any) } blob, err := h.Store.OpenBlob(id) if err != nil { return errgo.Mask(err, errgo.Is(params.ErrNotFound)) } defer blob.Close() h.SendEntityArchive(id, w, req, blob) return nil } // SendEntityArchive writes the given blob, which has been retrieved // from the given id, as a response to the given request. func (h *ReqHandler) SendEntityArchive(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request, blob *charmstore.Blob) { header := w.Header() setArchiveCacheControl(w.Header(), h.isPublic(id)) logger.Infof("sendEntityArchive setting %s=%s", params.ContentHashHeader, blob.Hash) header.Set(params.ContentHashHeader, blob.Hash) header.Set(params.EntityIdHeader, id.PreferredURL().String()) if StatsEnabled(req) { h.Store.IncrementDownloadCountsAsync(id) } // TODO(rog) should we set connection=close here? // See https://codereview.appspot.com/5958045 serveContent(w, req, blob.Size, blob) } func (h *ReqHandler) serveDeleteArchive(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { if err := h.Store.DeleteEntity(id); err != nil { return errgo.NoteMask(err, fmt.Sprintf("cannot delete %q", id.PreferredURL()), errgo.Is(params.ErrNotFound)) } h.Store.IncCounterAsync(charmstore.EntityStatsKey(&id.URL, params.StatsArchiveDelete)) return nil } func (h *ReqHandler) updateStatsArchiveUpload(id *charm.URL, err *error) { // Upload stats don't include revision: it is assumed that each // entity revision is only uploaded once. id.Revision = -1 kind := params.StatsArchiveUpload if *err != nil { kind = params.StatsArchiveFailedUpload } h.Store.IncCounterAsync(charmstore.EntityStatsKey(id, kind)) } func (h *ReqHandler) servePostArchive(id *charm.URL, w http.ResponseWriter, req *http.Request) (err error) { defer h.updateStatsArchiveUpload(id, &err) if id.Revision != -1 { return badRequestf(nil, "revision specified, but should not be specified") } if id.User == "" { return badRequestf(nil, "user not specified") } hash := req.Form.Get("hash") if hash == "" { return badRequestf(nil, "hash parameter not specified") } if req.ContentLength == -1 { return badRequestf(nil, "Content-Length not specified") } oldURL, oldHash, err := h.latestRevisionInfo(id) if err != nil && errgo.Cause(err) != params.ErrNotFound { return errgo.Notef(err, "cannot get hash of latest revision") } if oldHash == hash { // The hash matches the hash of the latest revision, so // no need to upload anything. return httprequest.WriteJSON(w, http.StatusOK, ¶ms.ArchiveUploadResponse{ Id: &oldURL.URL, PromulgatedId: oldURL.PromulgatedURL(), }) } rid := &router.ResolvedURL{ URL: *id.WithChannel(""), } // Choose the next revision number for the upload. if oldURL == nil { rid.URL.Revision = 0 } else { rid.URL.Revision = oldURL.URL.Revision + 1 } rid.PromulgatedRevision, err = h.getNewPromulgatedRevision(id) if err != nil { return errgo.Mask(err) } if err := h.Store.UploadEntity(rid, req.Body, hash, req.ContentLength, nil); err != nil { return errgo.Mask(err, errgo.Is(params.ErrDuplicateUpload), errgo.Is(params.ErrEntityIdNotAllowed), errgo.Is(params.ErrInvalidEntity), ) } return httprequest.WriteJSON(w, http.StatusOK, ¶ms.ArchiveUploadResponse{ Id: &rid.URL, PromulgatedId: rid.PromulgatedURL(), }) } func (h *ReqHandler) servePutArchive(id *charm.URL, w http.ResponseWriter, req *http.Request) (err error) { defer h.updateStatsArchiveUpload(id, &err) if id.Series == "" { return badRequestf(nil, "series not specified") } if id.Revision == -1 { return badRequestf(nil, "revision not specified") } if id.User == "" { return badRequestf(nil, "user not specified") } hash := req.Form.Get("hash") if hash == "" { return badRequestf(nil, "hash parameter not specified") } if req.ContentLength == -1 { return badRequestf(nil, "Content-Length not specified") } var chans []params.Channel for _, c := range req.Form["channel"] { c := params.Channel(c) if c != params.DevelopmentChannel && c != params.StableChannel { return badRequestf(nil, "cannot put entity into channel %q", c) } chans = append(chans, c) } rid := &router.ResolvedURL{ URL: *id.WithChannel(""), PromulgatedRevision: -1, } // Get the PromulgatedURL from the request parameters. When ingesting // entities might not be added in order and the promulgated revision might // not match the non-promulgated revision, so the full promulgated URL // needs to be specified. promulgatedURL := req.Form.Get("promulgated") var pid *charm.URL if promulgatedURL != "" { pid, err = charm.ParseURL(promulgatedURL) if err != nil { return badRequestf(err, "cannot parse promulgated url") } if pid.User != "" { return badRequestf(nil, "promulgated URL cannot have a user") } if pid.Name != id.Name { return badRequestf(nil, "promulgated URL has incorrect charm name") } if pid.Series != id.Series { return badRequestf(nil, "promulgated URL has incorrect series") } if pid.Revision == -1 { return badRequestf(nil, "promulgated URL has no revision") } rid.PromulgatedRevision = pid.Revision } if err := h.Store.UploadEntity(rid, req.Body, hash, req.ContentLength, chans); err != nil { return errgo.Mask(err, errgo.Is(params.ErrDuplicateUpload), errgo.Is(params.ErrEntityIdNotAllowed), errgo.Is(params.ErrInvalidEntity), ) } return httprequest.WriteJSON(w, http.StatusOK, ¶ms.ArchiveUploadResponse{ Id: &rid.URL, PromulgatedId: rid.PromulgatedURL(), }) return nil } func (h *ReqHandler) latestRevisionInfo(id *charm.URL) (*router.ResolvedURL, string, error) { entities, err := h.Store.FindEntities(id, charmstore.FieldSelector("_id", "blobhash", "promulgated-url")) if err != nil { return nil, "", errgo.Mask(err) } if len(entities) == 0 { return nil, "", params.ErrNotFound } latest := entities[0] for _, entity := range entities { if entity.URL.Revision > latest.URL.Revision { latest = entity } } return charmstore.EntityResolvedURL(latest), latest.BlobHash, nil } // GET id/archive/path // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idarchivepath func (h *ReqHandler) serveArchiveFile(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { blob, err := h.Store.OpenBlob(id) if err != nil { return errgo.Notef(err, "cannot open archive data for %v", id) } defer blob.Close() return h.ServeBlobFile(w, req, id, blob) } // ServeBlobFile serves a file from the given blob. The // path of the file is taken from req.URL.Path. // The blob should be associated with the entity // with the given id. func (h *ReqHandler) ServeBlobFile(w http.ResponseWriter, req *http.Request, id *router.ResolvedURL, blob *charmstore.Blob) error { r, size, err := h.Store.OpenBlobFile(blob, req.URL.Path) if err != nil { return errgo.Mask(err, errgo.Is(params.ErrNotFound), errgo.Is(params.ErrForbidden)) } defer r.Close() ctype := mime.TypeByExtension(filepath.Ext(req.URL.Path)) if ctype != "" { w.Header().Set("Content-Type", ctype) } w.Header().Set("Content-Length", strconv.FormatInt(size, 10)) setArchiveCacheControl(w.Header(), h.isPublic(id)) w.WriteHeader(http.StatusOK) io.Copy(w, r) return nil } func (h *ReqHandler) isPublic(id *router.ResolvedURL) bool { acls, _ := h.entityACLs(id) for _, p := range acls.Read { if p == params.Everyone { return true } } return false } // ArchiveCachePublicMaxAge specifies the cache expiry duration for items // returned from the archive where the id represents the id of a public entity. const ArchiveCachePublicMaxAge = 1 * time.Hour // setArchiveCacheControl sets cache control headers // in a response to an archive-derived endpoint. // The isPublic parameter specifies whether // the entity id can or not be cached . func setArchiveCacheControl(h http.Header, isPublic bool) { if isPublic { seconds := int(ArchiveCachePublicMaxAge / time.Second) h.Set("Cache-Control", "public, max-age="+strconv.Itoa(seconds)) } else { h.Set("Cache-Control", "no-cache, must-revalidate") } } // getNewPromulgatedRevision returns the promulgated revision // to give to a newly uploaded charm with the given id. // It returns -1 if the charm is not promulgated. func (h *ReqHandler) getNewPromulgatedRevision(id *charm.URL) (int, error) { baseEntity, err := h.Store.FindBaseEntity(id, charmstore.FieldSelector("promulgated")) if err != nil && errgo.Cause(err) != params.ErrNotFound { return 0, errgo.Mask(err) } if baseEntity == nil || !baseEntity.Promulgated { return -1, nil } query := h.Store.EntitiesQuery(&charm.URL{ Series: id.Series, Name: id.Name, Revision: -1, }) var entity mongodoc.Entity err = query.Sort("-promulgated-revision").Select(bson.D{{"promulgated-revision", 1}}).One(&entity) if err == mgo.ErrNotFound { return 0, nil } if err != nil { return 0, errgo.Mask(err) } return entity.PromulgatedRevision + 1, nil } // archiveReadError creates an appropriate error for errors in reading an // uploaded archive. If the archive could not be read because the data // uploaded is invalid then an error with a cause of // params.ErrInvalidEntity will be returned. The given message will be // added as context. func archiveReadError(err error, msg string) error { switch errgo.Cause(err) { case stdzip.ErrFormat, stdzip.ErrAlgorithm, stdzip.ErrChecksum: return errgo.WithCausef(err, params.ErrInvalidEntity, msg) } return errgo.Notef(err, msg) } ����������������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/auth_test.go�����������������������0000664�0001750�0001750�00000114254�12672604603�026100� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( "encoding/json" "fmt" "io" "net/http" "net/url" "os" "sort" "strings" "sync" "time" jc "github.com/juju/testing/checkers" "github.com/juju/testing/httptesting" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/macaroon.v1" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" "gopkg.in/juju/charmstore.v5-unstable/internal/v5" ) func (s *commonSuite) AssertEndpointAuth(c *gc.C, p httptesting.JSONCallParams) { s.testNonMacaroonAuth(c, p) s.testMacaroonAuth(c, p) } func (s *commonSuite) testNonMacaroonAuth(c *gc.C, p httptesting.JSONCallParams) { p.Handler = s.noMacaroonSrv // Check that the request succeeds when provided with the // correct credentials. p.Username = "test-user" p.Password = "test-password" httptesting.AssertJSONCall(c, p) // Check that auth fails with no creds provided. p.Username = "" p.Password = "" p.ExpectStatus = http.StatusUnauthorized p.ExpectBody = params.Error{ Message: "authentication failed: missing HTTP auth header", Code: params.ErrUnauthorized, } httptesting.AssertJSONCall(c, p) // Check that auth fails with the wrong username provided. p.Username = "wrong" p.Password = "test-password" p.ExpectStatus = http.StatusUnauthorized p.ExpectBody = params.Error{ Message: "invalid user name or password", Code: params.ErrUnauthorized, } httptesting.AssertJSONCall(c, p) // Check that auth fails with the wrong password provided. p.Username = "test-user" p.Password = "test-password-wrong" p.ExpectStatus = http.StatusUnauthorized p.ExpectBody = params.Error{ Message: "invalid user name or password", Code: params.ErrUnauthorized, } httptesting.AssertJSONCall(c, p) } func (s *commonSuite) testMacaroonAuth(c *gc.C, p httptesting.JSONCallParams) { // Make a test third party caveat discharger. var checkedCaveats []string var mu sync.Mutex var dischargeError error s.discharge = func(cond string, arg string) ([]checkers.Caveat, error) { mu.Lock() defer mu.Unlock() checkedCaveats = append(checkedCaveats, cond+" "+arg) if dischargeError != nil { return nil, dischargeError } return []checkers.Caveat{ checkers.DeclaredCaveat("username", "bob"), }, nil } p.Handler = s.srv client := httpbakery.NewHTTPClient() cookieJar := &cookieJar{CookieJar: client.Jar} client.Jar = cookieJar p.Do = bakeryDo(client) // Check that the call succeeds with simple auth. c.Log("simple auth sucess") p.Username = "test-user" p.Password = "test-password" httptesting.AssertJSONCall(c, p) c.Assert(checkedCaveats, gc.HasLen, 0) c.Assert(cookieJar.cookieURLs, gc.HasLen, 0) // Check that the call gives us the correct // "authentication denied response" without simple auth // and uses the third party checker // and that a cookie is stored at the correct location. // TODO when we allow admin access via macaroon creds, // change this test to expect success. c.Log("macaroon unauthorized error") p.Username, p.Password = "", "" p.ExpectStatus = http.StatusUnauthorized p.ExpectBody = params.Error{ Message: `unauthorized: access denied for user "bob"`, Code: params.ErrUnauthorized, } httptesting.AssertJSONCall(c, p) sort.Strings(checkedCaveats) c.Assert(checkedCaveats, jc.DeepEquals, []string{ "is-authenticated-user ", }) checkedCaveats = nil c.Assert(cookieJar.cookieURLs, gc.DeepEquals, []string{"http://somehost/"}) // Check that the call fails with incorrect simple auth info. c.Log("simple auth error") p.Password = "bad-password" p.ExpectStatus = http.StatusUnauthorized p.ExpectBody = params.Error{ Message: "authentication failed: missing HTTP auth header", Code: params.ErrUnauthorized, } // Check that it fails when the discharger refuses the discharge. c.Log("macaroon discharge error") client = httpbakery.NewHTTPClient() dischargeError = fmt.Errorf("go away") p.Do = bakeryDo(client) // clear cookies p.Password = "" p.Username = "" p.ExpectError = `cannot get discharge from "https://[^"]*": third party refused discharge: cannot discharge: go away` httptesting.AssertJSONCall(c, p) } type cookieJar struct { cookieURLs []string http.CookieJar } func (j *cookieJar) SetCookies(url *url.URL, cookies []*http.Cookie) { url1 := *url url1.Host = "somehost" for _, cookie := range cookies { if cookie.Path != "" { url1.Path = cookie.Path } if cookie.Name != "macaroon-authn" { panic("unexpected cookie name: " + cookie.Name) } } j.cookieURLs = append(j.cookieURLs, url1.String()) j.CookieJar.SetCookies(url, cookies) } func noInteraction(*url.URL) error { return fmt.Errorf("unexpected interaction required") } // dischargedAuthCookie retrieves and discharges an authentication macaroon cookie. It adds the provided // first-party caveats before discharging the macaroon. func dischargedAuthCookie(c *gc.C, srv http.Handler, caveats ...string) *http.Cookie { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: srv, URL: storeURL("macaroon"), Method: "GET", }) var m macaroon.Macaroon err := json.Unmarshal(rec.Body.Bytes(), &m) c.Assert(err, gc.IsNil) for _, cav := range caveats { err := m.AddFirstPartyCaveat(cav) c.Assert(err, gc.IsNil) } client := httpbakery.NewClient() ms, err := client.DischargeAll(&m) c.Assert(err, gc.IsNil) macaroonCookie, err := httpbakery.NewCookie(ms) c.Assert(err, gc.IsNil) return macaroonCookie } type authSuite struct { commonSuite } var _ = gc.Suite(&authSuite{}) func (s *authSuite) SetUpSuite(c *gc.C) { s.enableIdentity = true s.commonSuite.SetUpSuite(c) } var readAuthorizationTests = []struct { // about holds the test description. about string // username holds the authenticated user name returned by the discharger. // If empty, an anonymous user is returned. username string // groups holds group names the user is member of, as returned by the // discharger. groups []string // unpublishedReadPerm stores a list of users with read permissions on // on the unpublished entities. unpublishedReadPerm []string // developmentReadPerm stores a list of users with read permissions on the development channel. developmentReadPerm []string // stableReadPerm stores a list of users with read permissions on the stable channel. stableReadPerm []string // channels contains a list of channels, to which the entity belongs. channels []params.Channel // expectStatus is the expected HTTP response status. // Defaults to 200 status OK. expectStatus int // expectBody holds the expected body of the HTTP response. If nil, // the body is not checked and the response is assumed to be ok. expectBody interface{} }{{ about: "anonymous users are authorized", unpublishedReadPerm: []string{params.Everyone}, }, { about: "everyone is authorized", username: "dalek", unpublishedReadPerm: []string{params.Everyone}, }, { about: "everyone and a specific user", username: "dalek", unpublishedReadPerm: []string{params.Everyone, "janeway"}, }, { about: "specific user authorized", username: "who", unpublishedReadPerm: []string{"who"}, }, { about: "multiple specific users authorized", username: "picard", unpublishedReadPerm: []string{"kirk", "picard", "sisko"}, }, { about: "nobody authorized", username: "picard", expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "picard"`, }, }, { about: "access denied for user", username: "kirk", unpublishedReadPerm: []string{"picard", "sisko"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "everyone is authorized (user is member of groups)", username: "dalek", groups: []string{"group1", "group2"}, unpublishedReadPerm: []string{params.Everyone}, }, { about: "everyone and a specific group", username: "dalek", groups: []string{"group2", "group3"}, unpublishedReadPerm: []string{params.Everyone, "group1"}, }, { about: "specific group authorized", username: "who", groups: []string{"group1", "group42", "group2"}, unpublishedReadPerm: []string{"group42"}, }, { about: "multiple specific groups authorized", username: "picard", groups: []string{"group2"}, unpublishedReadPerm: []string{"kirk", "group0", "group2"}, }, { about: "no group authorized", username: "picard", groups: []string{"group1", "group2"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "picard"`, }, }, { about: "access denied for group", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group47"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "access provided through development channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group47"}, developmentReadPerm: []string{"group1"}, channels: []params.Channel{params.DevelopmentChannel}, }, { about: "access provided through development channel, but charm not published", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group47"}, developmentReadPerm: []string{"group1"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "access provided through stable channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group47"}, developmentReadPerm: []string{"group12"}, stableReadPerm: []string{"group2"}, channels: []params.Channel{params.DevelopmentChannel, params.StableChannel}, }, { about: "access provided through stable channel, but charm not published", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group47"}, developmentReadPerm: []string{"group12"}, stableReadPerm: []string{"group2"}, channels: []params.Channel{params.DevelopmentChannel}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "access provided through development channel, but charm on stable channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group47"}, developmentReadPerm: []string{"group1"}, stableReadPerm: []string{"group11"}, channels: []params.Channel{ params.DevelopmentChannel, params.StableChannel, }, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "access provided through unpublished ACL, but charm on stable channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group1"}, stableReadPerm: []string{"group11"}, channels: []params.Channel{ params.DevelopmentChannel, params.StableChannel, }, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "access provided through unpublished ACL, but charm on development channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group1"}, developmentReadPerm: []string{"group11"}, channels: []params.Channel{ params.DevelopmentChannel, }, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }} func dischargeForUser(username string) func(_, _ string) ([]checkers.Caveat, error) { return func(_, _ string) ([]checkers.Caveat, error) { return []checkers.Caveat{ checkers.DeclaredCaveat(v5.UsernameAttr, username), }, nil } } func (s *authSuite) TestReadAuthorization(c *gc.C) { for i, test := range readAuthorizationTests { c.Logf("test %d: %s", i, test.about) s.discharge = dischargeForUser(test.username) s.idM.groups = map[string][]string{ test.username: test.groups, } // Add a charm to the store, used for testing. rurl := newResolvedURL("~charmers/utopic/wordpress-42", -1) err := s.store.AddCharmWithArchive(rurl, storetesting.Charms.CharmDir("wordpress")) c.Assert(err, gc.IsNil) // publish the charm on any required channels. if len(test.channels) > 0 { err := s.store.Publish(rurl, test.channels...) c.Assert(err, gc.IsNil) } // Change the ACLs for the testing charm. err = s.store.SetPerms(&rurl.URL, "unpublished.read", test.unpublishedReadPerm...) c.Assert(err, gc.IsNil) err = s.store.SetPerms(&rurl.URL, "development.read", test.developmentReadPerm...) c.Assert(err, gc.IsNil) err = s.store.SetPerms(&rurl.URL, "stable.read", test.stableReadPerm...) c.Assert(err, gc.IsNil) // Define an helper function used to send requests and check responses. doRequest := func(path string, expectStatus int, expectBody interface{}) { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL(path), }) if expectStatus == 0 { expectStatus = http.StatusOK } c.Assert(rec.Code, gc.Equals, expectStatus, gc.Commentf("body: %s", rec.Body)) if expectBody != nil { c.Assert(rec.Body.String(), jc.JSONEquals, expectBody) } } // Perform meta and id requests. // Note that we use the full URL so that we test authorization specifically // on that entity without trying to look up the entity in the stable channel. doRequest("~charmers/utopic/wordpress-42/meta/archive-size", test.expectStatus, test.expectBody) doRequest("~charmers/utopic/wordpress-42/expand-id", test.expectStatus, test.expectBody) // Remove all entities from the store. _, err = s.store.DB.Entities().RemoveAll(nil) c.Assert(err, gc.IsNil) } } var writeAuthorizationTests = []struct { // about holds the test description. about string // username holds the authenticated user name returned by the discharger. // If empty, an anonymous user is returned. username string // groups holds group names the user is member of, as returned by the // discharger. groups []string // writePerm stores a list of users with write permissions. unpublishedWritePerm []string // developmentWritePerm stores a list of users with write permissions on the development channel. developmentWritePerm []string // stableWritePerm stores a list of users with write permissions on the stable channel. stableWritePerm []string // channels contains a list of channels, to which the entity belongs. channels []params.Channel // expectStatus is the expected HTTP response status. // Defaults to 200 status OK. expectStatus int // expectBody holds the expected body of the HTTP response. If nil, // the body is not checked and the response is assumed to be ok. expectBody interface{} }{{ about: "anonymous users are not authorized", unpublishedWritePerm: []string{"who"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: "unauthorized: no username declared", }, }, { about: "specific user authorized to write", username: "dalek", unpublishedWritePerm: []string{"dalek"}, }, { about: "multiple users authorized", username: "sisko", unpublishedWritePerm: []string{"kirk", "picard", "sisko"}, }, { about: "no users authorized", username: "who", expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "who"`, }, }, { about: "specific user unauthorized", username: "kirk", unpublishedWritePerm: []string{"picard", "sisko", "janeway"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "access granted for group", username: "picard", groups: []string{"group1", "group2"}, unpublishedWritePerm: []string{"group2"}, }, { about: "multiple groups authorized", username: "picard", groups: []string{"group1", "group2"}, unpublishedWritePerm: []string{"kirk", "group0", "group1", "group2"}, }, { about: "no group authorized", username: "picard", groups: []string{"group1", "group2"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "picard"`, }, }, { about: "access denied for group", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group47"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "access provided through development channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group47"}, developmentWritePerm: []string{"group1"}, channels: []params.Channel{params.DevelopmentChannel}, }, { about: "access provided through development channel, but charm not published", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group47"}, developmentWritePerm: []string{"group1"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "access provided through stable channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group47"}, developmentWritePerm: []string{"group12"}, stableWritePerm: []string{"group2"}, channels: []params.Channel{params.DevelopmentChannel, params.StableChannel}, }, { about: "access provided through stable channel, but charm not published", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group47"}, developmentWritePerm: []string{"group12"}, stableWritePerm: []string{"group2"}, channels: []params.Channel{params.DevelopmentChannel}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "access provided through development channel, but charm on stable channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group47"}, developmentWritePerm: []string{"group1"}, stableWritePerm: []string{"group11"}, channels: []params.Channel{ params.DevelopmentChannel, params.StableChannel, }, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "access provided through unpublished ACL, but charm on stable channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group1"}, stableWritePerm: []string{"group11"}, channels: []params.Channel{ params.DevelopmentChannel, params.StableChannel, }, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "access provided through unpublished ACL, but charm on development channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group1"}, developmentWritePerm: []string{"group11"}, channels: []params.Channel{ params.DevelopmentChannel, }, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }} func (s *authSuite) TestWriteAuthorization(c *gc.C) { for i, test := range writeAuthorizationTests { c.Logf("test %d: %s", i, test.about) s.discharge = dischargeForUser(test.username) s.idM.groups = map[string][]string{ test.username: test.groups, } // Add a charm to the store, used for testing. rurl := newResolvedURL("~charmers/utopic/wordpress-42", -1) err := s.store.AddCharmWithArchive(rurl, storetesting.Charms.CharmDir("wordpress")) c.Assert(err, gc.IsNil) // publish the charm on any required channels. if len(test.channels) > 0 { err := s.store.Publish(rurl, test.channels...) c.Assert(err, gc.IsNil) } // Change the ACLs for the testing charm. err = s.store.SetPerms(&rurl.URL, "unpublished.write", test.unpublishedWritePerm...) c.Assert(err, gc.IsNil) err = s.store.SetPerms(&rurl.URL, "development.write", test.developmentWritePerm...) c.Assert(err, gc.IsNil) err = s.store.SetPerms(&rurl.URL, "stable.write", test.stableWritePerm...) c.Assert(err, gc.IsNil) makeRequest := func(path string, expectStatus int, expectBody interface{}) { client := httpbakery.NewHTTPClient() rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, Do: bakeryDo(client), URL: storeURL(path), Method: "PUT", Header: http.Header{"Content-Type": {"application/json"}}, Body: strings.NewReader("42"), }) if expectStatus == 0 { expectStatus = http.StatusOK } c.Assert(rec.Code, gc.Equals, expectStatus, gc.Commentf("body: %s", rec.Body)) if expectBody != nil { c.Assert(rec.Body.String(), jc.JSONEquals, expectBody) } } // Perform a meta PUT request to the URLs. // Note that we use the full URL so that we test authorization specifically // on that entity without trying to look up the entity in the stable channel. makeRequest("~charmers/utopic/wordpress-42/meta/extra-info/key", test.expectStatus, test.expectBody) // Remove all entities from the store. _, err = s.store.DB.Entities().RemoveAll(nil) c.Assert(err, gc.IsNil) } } var uploadEntityAuthorizationTests = []struct { // about holds the test description. about string // username holds the authenticated user name returned by the discharger. // If empty, an anonymous user is returned. username string // groups holds group names the user is member of, as returned by the // discharger. groups []string // id holds the id of the entity to be uploaded. id string // promulgated holds whether the corresponding promulgated entity must be // already present in the charm store before performing the upload. promulgated bool // writeAcls can be used to set customized write ACLs for the published // entity before performing the upload. If empty, default ACLs are used. writeAcls []string // expectStatus is the expected HTTP response status. // Defaults to 200 status OK. expectStatus int // expectBody holds the expected body of the HTTP response. If nil, // the body is not checked and the response is assumed to be ok. expectBody interface{} }{{ about: "user owned entity", username: "who", id: "~who/utopic/django", }, { about: "group owned entity", username: "dalek", groups: []string{"group1", "group2"}, id: "~group1/utopic/django", }, { about: "specific group", username: "dalek", groups: []string{"group42"}, id: "~group42/utopic/django", }, { about: "promulgated entity", username: "sisko", groups: []string{"charmers", "group2"}, id: "~charmers/utopic/django", promulgated: true, }, { about: "unauthorized: promulgated entity", username: "sisko", groups: []string{"group1", "group2"}, id: "~charmers/utopic/django", promulgated: true, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "sisko"`, }, }, { about: "unauthorized: anonymous user", id: "~who/utopic/django", expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: "unauthorized: no username declared", }, }, { about: "unauthorized: anonymous user and promulgated entity", id: "~charmers/utopic/django", promulgated: true, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: "unauthorized: no username declared", }, }, { about: "unauthorized: user does not match", username: "kirk", id: "~picard/utopic/django", expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "unauthorized: group does not match", username: "kirk", groups: []string{"group1", "group2", "group3"}, id: "~group0/utopic/django", expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { about: "unauthorized: specific group and promulgated entity", username: "janeway", groups: []string{"group1"}, id: "~charmers/utopic/django", promulgated: true, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "janeway"`, }, }, { about: "unauthorized: entity no permissions", username: "picard", id: "~picard/wily/django", writeAcls: []string{"kirk"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "picard"`, }, }} func (s *authSuite) TestUploadEntityAuthorization(c *gc.C) { for i, test := range uploadEntityAuthorizationTests { c.Logf("test %d: %s", i, test.about) s.discharge = dischargeForUser(test.username) s.idM.groups = map[string][]string{ test.username: test.groups, } // Prepare the expected status. expectStatus := test.expectStatus if expectStatus == 0 { expectStatus = http.StatusOK } // Add a pre-existing entity if required. if test.promulgated || len(test.writeAcls) != 0 { id := charm.MustParseURL(test.id).WithRevision(0) revision := -1 if test.promulgated { revision = 1 } rurl := newResolvedURL(id.String(), revision) s.store.AddCharmWithArchive(rurl, storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) if len(test.writeAcls) != 0 { s.store.SetPerms(&rurl.URL, "unpublished.write", test.writeAcls...) } } // Try to upload the entity. body, hash, size := archiveInfo(c, "wordpress") defer body.Close() client := httpbakery.NewHTTPClient() rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, Do: bakeryDo(client), URL: storeURL(test.id + "/archive?hash=" + hash), Method: "POST", ContentLength: size, Header: http.Header{ "Content-Type": {"application/zip"}, }, Body: body, }) c.Assert(rec.Code, gc.Equals, expectStatus, gc.Commentf("body: %s", rec.Body)) if test.expectBody != nil { c.Assert(rec.Body.String(), jc.JSONEquals, test.expectBody) } // Remove all entities from the store. _, err := s.store.DB.Entities().RemoveAll(nil) c.Assert(err, gc.IsNil) _, err = s.store.DB.BaseEntities().RemoveAll(nil) c.Assert(err, gc.IsNil) } } type readSeekCloser interface { io.ReadCloser io.Seeker } // archiveInfo prepares a zip archive of an entity and return a reader for the // archive, its blob hash and size. func archiveInfo(c *gc.C, name string) (r readSeekCloser, hashSum string, size int64) { ch := storetesting.Charms.CharmArchive(c.MkDir(), name) f, err := os.Open(ch.Path) c.Assert(err, gc.IsNil) hash, size := hashOf(f) _, err = f.Seek(0, 0) c.Assert(err, gc.IsNil) return f, hash, size } var isEntityCaveatTests = []struct { url string expectError string }{{ url: "~charmers/utopic/wordpress-42/archive", }, { url: "~charmers/utopic/wordpress-42/meta/hash", }, { url: "wordpress/archive", }, { url: "wordpress/meta/hash", }, { url: "utopic/wordpress-10/archive", }, { url: "utopic/wordpress-10/meta/hash", }, { url: "~charmers/utopic/wordpress-41/archive", expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: operation on entity cs:~charmers/utopic/wordpress-41 not allowed`, }, { url: "~charmers/utopic/wordpress-41/meta/hash", expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: operation on entity cs:~charmers/utopic/wordpress-41 not allowed`, }, { url: "utopic/wordpress-9/archive", expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: operation on entity cs:utopic/wordpress-9 not allowed`, }, { url: "utopic/wordpress-9/meta/hash", expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: operation on entity cs:utopic/wordpress-9 not allowed`, }, { url: "log", expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: operation does not involve any of the allowed entities cs:~charmers/utopic/wordpress-42`, }} func (s *authSuite) TestIsEntityCaveat(c *gc.C) { s.discharge = func(_, _ string) ([]checkers.Caveat, error) { return []checkers.Caveat{{ Condition: "is-entity cs:~charmers/utopic/wordpress-42", }, checkers.DeclaredCaveat(v5.UsernameAttr, "bob"), }, nil } // Add a charm to the store, used for testing. s.addPublicCharm(c, storetesting.NewCharm(nil), newResolvedURL("~charmers/utopic/wordpress-41", 9)) s.addPublicCharm(c, storetesting.NewCharm(nil), newResolvedURL("~charmers/utopic/wordpress-42", 10)) // Change the ACLs for charms we've just uploaded, otherwise // no authorization checking will take place. err := s.store.SetPerms(charm.MustParseURL("cs:~charmers/wordpress"), "stable.read", "bob") c.Assert(err, gc.IsNil) for i, test := range isEntityCaveatTests { c.Logf("test %d: %s", i, test.url) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL(test.url), Method: "GET", }) if test.expectError != "" { c.Assert(rec.Code, gc.Equals, http.StatusUnauthorized) var respErr httpbakery.Error err := json.Unmarshal(rec.Body.Bytes(), &respErr) c.Assert(err, gc.IsNil) c.Assert(respErr.Message, gc.Matches, test.expectError) continue } c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.Bytes())) } } func (s *authSuite) TestDelegatableMacaroon(c *gc.C) { // Create a new server with a third party discharger. s.discharge = dischargeForUser("bob") // First check that we get a macaraq error when using a vanilla http do // request with both bakery protocol. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("delegatable-macaroon"), Header: http.Header{"Bakery-Protocol-Version": {"1"}}, ExpectBody: httptesting.BodyAsserter(func(c *gc.C, m json.RawMessage) { // Allow any body - the next check will check that it's a valid macaroon. }), ExpectStatus: http.StatusUnauthorized, }) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("delegatable-macaroon"), ExpectBody: httptesting.BodyAsserter(func(c *gc.C, m json.RawMessage) { // Allow any body - the next check will check that it's a valid macaroon. }), ExpectStatus: http.StatusProxyAuthRequired, }) client := httpbakery.NewHTTPClient() now := time.Now() var gotBody json.RawMessage httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("delegatable-macaroon"), ExpectBody: httptesting.BodyAsserter(func(c *gc.C, m json.RawMessage) { gotBody = m }), Do: bakeryDo(client), ExpectStatus: http.StatusOK, }) c.Assert(gotBody, gc.NotNil) var m macaroon.Macaroon err := json.Unmarshal(gotBody, &m) c.Assert(err, gc.IsNil) caveats := m.Caveats() foundExpiry := false for _, cav := range caveats { cond, arg, err := checkers.ParseCaveat(cav.Id) c.Assert(err, gc.IsNil) switch cond { case checkers.CondTimeBefore: t, err := time.Parse(time.RFC3339Nano, arg) c.Assert(err, gc.IsNil) c.Assert(t, jc.TimeBetween(now.Add(v5.DelegatableMacaroonExpiry), now.Add(v5.DelegatableMacaroonExpiry+time.Second))) foundExpiry = true } } c.Assert(foundExpiry, jc.IsTrue) // Now check that we can use the obtained macaroon to do stuff // as the declared user. rurl := newResolvedURL("~charmers/utopic/wordpress-41", 9) err = s.store.AddCharmWithArchive( rurl, storetesting.Charms.CharmDir("wordpress")) c.Assert(err, gc.IsNil) err = s.store.Publish(rurl, params.StableChannel) c.Assert(err, gc.IsNil) // Change the ACLs for the testing charm. err = s.store.SetPerms(charm.MustParseURL("cs:~charmers/wordpress"), "stable.read", "bob") c.Assert(err, gc.IsNil) // First check that we require authorization to access the charm. rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("~charmers/utopic/wordpress/meta/id-name"), Method: "GET", }) c.Assert(rec.Code, gc.Equals, http.StatusProxyAuthRequired) // Then check that the request succeeds if we provide the delegatable // macaroon. client = httpbakery.NewHTTPClient() u, err := url.Parse("http://127.0.0.1") c.Assert(err, gc.IsNil) err = httpbakery.SetCookie(client.Jar, u, macaroon.Slice{&m}) c.Assert(err, gc.IsNil) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("~charmers/utopic/wordpress/meta/id-name"), ExpectBody: params.IdNameResponse{ Name: "wordpress", }, ExpectStatus: http.StatusOK, Do: bakeryDo(client), }) } func (s *authSuite) TestDelegatableMacaroonWithBasicAuth(c *gc.C) { // First check that we get a macaraq error when using a vanilla http do // request. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Username: testUsername, Password: testPassword, URL: storeURL("delegatable-macaroon"), ExpectBody: params.Error{ Code: params.ErrForbidden, Message: "delegatable macaroon is not obtainable using admin credentials", }, ExpectStatus: http.StatusForbidden, }) } func (s *authSuite) TestGroupsForUserSuccess(c *gc.C) { h := s.handler(c) defer h.Close() s.idM.groups = map[string][]string{ "bob": {"one", "two"}, } groups, err := h.GroupsForUser("bob") c.Assert(err, gc.IsNil) c.Assert(groups, jc.DeepEquals, []string{"one", "two"}) } func (s *authSuite) TestGroupsForUserWithNoIdentity(c *gc.C) { h := s.handler(c) defer h.Close() groups, err := h.GroupsForUser("someone") c.Assert(err, gc.IsNil) c.Assert(groups, gc.HasLen, 0) } func (s *authSuite) TestGroupsForUserWithInvalidIdentityURL(c *gc.C) { s.PatchValue(&s.srvParams.IdentityAPIURL, ":::::") h := s.handler(c) defer h.Close() groups, err := h.GroupsForUser("someone") c.Assert(err, gc.ErrorMatches, `cannot get groups for someone: cannot parse ":::::": parse :::::: missing protocol scheme`) c.Assert(groups, gc.HasLen, 0) } func (s *authSuite) TestGroupsForUserWithInvalidBody(c *gc.C) { h := s.handler(c) defer h.Close() s.idM.body = "bad" s.idM.contentType = "application/json" groups, err := h.GroupsForUser("someone") c.Assert(err, gc.ErrorMatches, `cannot get groups for someone: GET .*: invalid character 'b' looking for beginning of value`) c.Assert(groups, gc.HasLen, 0) } func (s *authSuite) TestGroupsForUserWithErrorResponse(c *gc.C) { h := s.handler(c) defer h.Close() s.idM.body = `{"message":"some error","code":"some code"}` s.idM.status = http.StatusUnauthorized s.idM.contentType = "application/json" groups, err := h.GroupsForUser("someone") c.Assert(err, gc.ErrorMatches, `cannot get groups for someone: GET .*: some error`) c.Assert(groups, gc.HasLen, 0) } func (s *authSuite) TestGroupsForUserWithBadErrorResponse(c *gc.C) { h := s.handler(c) defer h.Close() s.idM.body = `{"message":"some error"` s.idM.status = http.StatusUnauthorized s.idM.contentType = "application/json" groups, err := h.GroupsForUser("someone") c.Assert(err, gc.ErrorMatches, `cannot get groups for someone: GET .*: cannot unmarshal error response \(status 401 Unauthorized\): unexpected EOF`) c.Assert(groups, gc.HasLen, 0) } type errorTransport string func (e errorTransport) RoundTrip(*http.Request) (*http.Response, error) { return nil, errgo.New(string(e)) } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/httpfs.go��������������������������0000664�0001750�0001750�00000004621�12672604603�025404� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( "fmt" "io" "net/http" "os" "time" ) // serveContent serves the given content as a single HTTP endpoint. // We use http.FileServer under the covers because that // provides us with all the HTTP Content-Range goodness // that we'd like. // TODO use http.ServeContent instead of this. func serveContent(w http.ResponseWriter, req *http.Request, length int64, content io.ReadSeeker) { fs := &archiveFS{ length: length, ReadSeeker: content, } // Copy the request and mutate the path to pretend // we're looking for the given file. nreq := *req nreq.URL.Path = "/archive.zip" h := http.FileServer(fs) h.ServeHTTP(w, &nreq) } // archiveFS implements http.FileSystem to serve a single file. // http.FileSystem.Open returns an http.File; http.File.Stat returns an // os.FileInfo. We implement methods for all of those interfaces on the // same type, and return the same value for all the aforementioned // methods, since we only ever need one instance of any of them. type archiveFS struct { length int64 io.ReadSeeker } // Open implements http.FileSystem.Open. func (fs *archiveFS) Open(name string) (http.File, error) { if name != "/archive.zip" { return nil, fmt.Errorf("unexpected name %q", name) } return fs, nil } // Close implements http.File.Close. // It does not actually close anything because // that responsibility is left to the caller of serveContent. func (fs *archiveFS) Close() error { return nil } // Stat implements http.File.Stat. func (fs *archiveFS) Stat() (os.FileInfo, error) { return fs, nil } // Readdir implements http.File.Readdir. func (fs *archiveFS) Readdir(count int) ([]os.FileInfo, error) { return nil, fmt.Errorf("not a directory") } // Name implements os.FileInfo.Name. func (fs *archiveFS) Name() string { return "archive" } // Size implements os.FileInfo.Size. func (fs *archiveFS) Size() int64 { return fs.length } // Mode implements os.FileInfo.Mode. func (fs *archiveFS) Mode() os.FileMode { return 0444 } // ModTime implements os.FileInfo.ModTime. func (fs *archiveFS) ModTime() time.Time { return time.Time{} } // IsDir implements os.FileInfo.IsDir. func (fs *archiveFS) IsDir() bool { return false } // Sys implements os.FileInfo.Sys. func (fs *archiveFS) Sys() interface{} { return nil } ���������������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/defaulticon_test.go����������������0000664�0001750�0001750�00000001045�12672604603�027425� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( "strings" gc "gopkg.in/check.v1" "gopkg.in/juju/charmstore.v5-unstable/internal/v5" ) type iconSuite struct{} var _ = gc.Suite(&iconSuite{}) func (s *iconSuite) TestValidXML(c *gc.C) { // The XML declaration must be included in the first line of the icon. hasXMLPrefix := strings.HasPrefix(v5.DefaultIcon, " 0 { return params.TagsResponse{entity.CharmMeta.Tags} } return params.TagsResponse{entity.CharmMeta.Categories} }), checkURL: newResolvedURL("~charmers/utopic/category-2", 2), assertCheckData: func(c *gc.C, data interface{}) { c.Assert(data, jc.DeepEquals, params.TagsResponse{ Tags: []string{"openstack", "storage"}, }) }, }, { name: "id-user", get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { return params.IdUserResponse{url.PreferredURL().User}, nil }, checkURL: newResolvedURL("cs:~bob/utopic/wordpress-2", -1), assertCheckData: func(c *gc.C, data interface{}) { c.Assert(data, gc.Equals, params.IdUserResponse{"bob"}) }, }, { name: "id-series", get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { return params.IdSeriesResponse{url.URL.Series}, nil }, checkURL: newResolvedURL("~charmers/utopic/category-2", 2), assertCheckData: func(c *gc.C, data interface{}) { c.Assert(data, gc.Equals, params.IdSeriesResponse{"utopic"}) }, }, { name: "id-name", get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { return params.IdNameResponse{url.URL.Name}, nil }, checkURL: newResolvedURL("~charmers/utopic/category-2", 2), assertCheckData: func(c *gc.C, data interface{}) { c.Assert(data, gc.Equals, params.IdNameResponse{"category"}) }, }, { name: "id-revision", get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { return params.IdRevisionResponse{url.PreferredURL().Revision}, nil }, checkURL: newResolvedURL("~charmers/utopic/category-2", 2), assertCheckData: func(c *gc.C, data interface{}) { c.Assert(data, gc.Equals, params.IdRevisionResponse{2}) }, }, { name: "id", get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { id := url.PreferredURL() return params.IdResponse{ Id: id, User: id.User, Series: id.Series, Name: id.Name, Revision: id.Revision, }, nil }, checkURL: newResolvedURL("~charmers/utopic/category-2", 2), assertCheckData: func(c *gc.C, data interface{}) { c.Assert(data, jc.DeepEquals, params.IdResponse{ Id: charm.MustParseURL("cs:utopic/category-2"), User: "", Series: "utopic", Name: "category", Revision: 2, }) }, }, { name: "promulgated", get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { e, err := store.FindBaseEntity(&url.URL, nil) if err != nil { return nil, err } return params.PromulgatedResponse{ Promulgated: bool(e.Promulgated), }, nil }, checkURL: newResolvedURL("cs:~bob/utopic/wordpress-2", -1), assertCheckData: func(c *gc.C, data interface{}) { c.Assert(data, gc.Equals, params.PromulgatedResponse{Promulgated: false}) }, }, { name: "supported-series", get: entityGetter(func(entity *mongodoc.Entity) interface{} { if entity.URL.Series == "bundle" { return nil } return params.SupportedSeriesResponse{ SupportedSeries: entity.SupportedSeries, } }), checkURL: newResolvedURL("~charmers/utopic/category-2", 2), assertCheckData: func(c *gc.C, data interface{}) { c.Assert(data, jc.DeepEquals, params.SupportedSeriesResponse{ SupportedSeries: []string{"utopic"}, }) }, }, { name: "terms", get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { doc, err := store.FindEntity(url, nil) if err != nil { return nil, errgo.Mask(err) } if doc.URL.Series == "bundle" { return nil, nil } if doc.CharmMeta == nil || len(doc.CharmMeta.Terms) == 0 { return []string{}, nil } return doc.CharmMeta.Terms, nil }, checkURL: newResolvedURL("cs:~charmers/precise/terms-42", 42), assertCheckData: func(c *gc.C, data interface{}) { c.Assert(data, gc.DeepEquals, []string{"terms-1/1", "terms-2/5"}) }, }, { name: "resources", get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { entity, err := store.FindEntity(url, nil) if err != nil { return nil, err } // TODO(ericsnow) Switch to store.ListResources() once it exists. resources, err := basicListResources(entity) if err != nil { return resources, err } // Apparently the router's "isNull" check treats empty slices // as nil... if len(resources) == 0 { return nil, nil } var results []params.Resource for _, res := range resources { result := params.Resource2API(res) results = append(results, result) } return results, nil }, exclusive: charmOnly, checkURL: newResolvedURL("cs:~charmers/utopic/starsay-17", 17), assertCheckData: func(c *gc.C, data interface{}) { c.Assert(data, jc.DeepEquals, []params.Resource{{ Name: "for-install", Type: "file", Path: "initial.tgz", Description: "get things started", Origin: "upload", }, { Name: "for-store", Type: "file", Path: "dummy.tgz", Description: "One line that is useful when operators need to push it.", Origin: "upload", }, { Name: "for-upload", Type: "file", Path: "config.xml", Description: "Who uses xml anymore?", Origin: "upload", }}) }, }, { name: "published", get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { // All the entities published are in stable, not development, // and there's only one for each base entity. return ¶ms.PublishedResponse{ Info: []params.PublishedInfo{{ Channel: params.StableChannel, Current: true, }}, }, nil }, checkURL: newResolvedURL("cs:~charmers/precise/wordpress-23", 23), assertCheckData: func(c *gc.C, data interface{}) { c.Assert(data, jc.DeepEquals, ¶ms.PublishedResponse{ Info: []params.PublishedInfo{{ Channel: params.StableChannel, Current: true, }}, }) }, }} func basicListResources(entity *mongodoc.Entity) ([]resource.Resource, error) { if entity.URL.Series == "bundle" { return nil, errgo.Newf("bundles do not have resources") } var resources []resource.Resource for _, meta := range entity.CharmMeta.Resources { // We use an origin of "upload" since charms cannot be uploaded yet. resOrigin := resource.OriginUpload res := resource.Resource{ Meta: meta, Origin: resOrigin, // Revision, Fingerprint, and Size are not set. } resources = append(resources, res) } resource.Sort(resources) return resources, nil } // TestEndpointGet tries to ensure that the endpoint // test data getters correspond with reality. func (s *APISuite) TestEndpointGet(c *gc.C) { s.addTestEntities(c) for i, ep := range metaEndpoints { c.Logf("test %d: %s\n", i, ep.name) data, err := ep.get(s.store, ep.checkURL) if err != nil && ep.isExcluded(ep.checkURL) { // endpoint not relevant. continue } c.Assert(err, gc.IsNil) ep.assertCheckData(c, data) } } func (s *APISuite) TestAllMetaEndpointsTested(c *gc.C) { // Make sure that we're testing all the metadata // endpoints that we need to. s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("~charmers/precise/wordpress-23", 23)) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("precise/wordpress-23/meta"), }) c.Logf("meta response body: %s", rec.Body) var list []string err := json.Unmarshal(rec.Body.Bytes(), &list) c.Assert(err, gc.IsNil) listNames := make(map[string]bool) for _, name := range list { c.Assert(listNames[name], gc.Equals, false, gc.Commentf("name %s", name)) listNames[name] = true } testNames := make(map[string]bool) for _, test := range metaEndpoints { if strings.Contains(test.name, "/") { continue } testNames[test.name] = true } c.Assert(testNames, jc.DeepEquals, listNames) } var testEntities = []*router.ResolvedURL{ // A stock charm. newResolvedURL("cs:~charmers/precise/wordpress-23", 23), // Another stock charm, to satisfy the bundle's requirements. newResolvedURL("cs:~charmers/precise/mysql-5", 5), // A stock bundle. newResolvedURL("cs:~charmers/bundle/wordpress-simple-42", 42), // A charm with some actions. newResolvedURL("cs:~charmers/precise/dummy-10", 10), // A charm with some tags. newResolvedURL("cs:~charmers/utopic/category-2", 2), // A charm with a different user. newResolvedURL("cs:~bob/utopic/wordpress-2", -1), // A charms, which requires agreement to terms newResolvedURL("cs:~charmers/precise/terms-42", 42), // A charm with resources. newResolvedURL("cs:~charmers/utopic/starsay-17", 17), } func (s *APISuite) addTestEntities(c *gc.C) []*router.ResolvedURL { for _, e := range testEntities { if e.URL.Series == "bundle" { s.addPublicBundleFromRepo(c, e.URL.Name, e, true) } else { s.addPublicCharmFromRepo(c, e.URL.Name, e) } // Associate some extra-info data with the entity. key := e.URL.Path() + "/meta/extra-info/key" commonkey := e.URL.Path() + "/meta/common-info/key" s.assertPutAsAdmin(c, key, "value "+e.URL.String()) s.assertPutAsAdmin(c, commonkey, "value "+e.URL.String()) } return testEntities } func (s *APISuite) TestMetaEndpointsSingle(c *gc.C) { urls := s.addTestEntities(c) for i, ep := range metaEndpoints { c.Logf("test %d. %s", i, ep.name) tested := false for _, url := range urls { charmId := strings.TrimPrefix(url.String(), "cs:") path := charmId + "/meta/" + ep.name expectData, err := ep.get(s.store, url) if err != nil && ep.isExcluded(url) { // endpoint not relevant. continue } c.Assert(err, gc.IsNil) c.Logf(" expected data for %q: %#v", url, expectData) if isNull(expectData) { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(path), ExpectStatus: http.StatusNotFound, ExpectBody: params.Error{ Message: params.ErrMetadataNotFound.Error(), Code: params.ErrMetadataNotFound, }, }) continue } tested = true c.Logf(" path %q: %#v", url, path) s.assertGet(c, path, expectData) } if !tested { c.Errorf("endpoint %q is null for all endpoints, so is not properly tested", ep.name) } } } type publishedEntity struct { rev int channels []params.Channel } // Note that, unusually, all the entities in all the tests // are added before any of the "expect" values are // determined because we know that we want // exactly one test for each entity. var metaPublishedTests = []struct { id string entity charmstore.ArchiverTo channels []params.Channel expect params.PublishedResponse }{{ id: "~charmers/precise/wordpress-0", entity: storetesting.NewCharm(nil), expect: params.PublishedResponse{ []params.PublishedInfo{}, }, }, { id: "~charmers/precise/wordpress-1", entity: storetesting.NewCharm(nil), channels: []params.Channel{params.DevelopmentChannel, params.StableChannel}, expect: params.PublishedResponse{ Info: []params.PublishedInfo{{ Channel: params.DevelopmentChannel, }, { Channel: params.StableChannel, }}, }, }, { id: "~charmers/precise/wordpress-3", entity: storetesting.NewCharm(nil), channels: []params.Channel{params.DevelopmentChannel}, expect: params.PublishedResponse{ Info: []params.PublishedInfo{{ Channel: params.DevelopmentChannel, }}, }, }, { id: "~charmers/precise/wordpress-4", entity: storetesting.NewCharm(nil), expect: params.PublishedResponse{ []params.PublishedInfo{}, }, }, { id: "~charmers/precise/wordpress-5", entity: storetesting.NewCharm(nil), channels: []params.Channel{params.DevelopmentChannel}, expect: params.PublishedResponse{ Info: []params.PublishedInfo{{ Channel: params.DevelopmentChannel, Current: true, }}, }, }, { id: "~charmers/trusty/wordpress-0", entity: storetesting.NewCharm(nil), channels: []params.Channel{params.DevelopmentChannel}, expect: params.PublishedResponse{ Info: []params.PublishedInfo{{ Channel: params.DevelopmentChannel, Current: true, }}, }, }, { id: "~charmers/precise/wordpress-6", entity: storetesting.NewCharm(nil), channels: []params.Channel{params.StableChannel}, expect: params.PublishedResponse{ Info: []params.PublishedInfo{{ Channel: params.StableChannel, Current: true, }}, }, }, { id: "~charmers/wordpress-7", entity: storetesting.NewCharm(&charm.Meta{ Series: []string{"wily"}, }), channels: []params.Channel{params.StableChannel}, expect: params.PublishedResponse{ Info: []params.PublishedInfo{{ Channel: params.StableChannel, Current: true, }}, }, }, { id: "~bob/bundle/mybundle-2", entity: storetesting.NewBundle(&charm.BundleData{ Services: map[string]*charm.ServiceSpec{ "wordpress": { Charm: "~charmers/precise/wordpress", }, }, }), channels: []params.Channel{params.StableChannel}, expect: params.PublishedResponse{ Info: []params.PublishedInfo{{ Channel: params.StableChannel, Current: true, }}, }, }} func (s *APISuite) TestMetaPublished(c *gc.C) { // First add all the entities for _, test := range metaPublishedTests { id := mustParseResolvedURL(test.id) err := s.store.AddEntityWithArchive(id, test.entity) c.Assert(err, gc.IsNil) if len(test.channels) > 0 { err = s.store.Publish(id, test.channels...) c.Assert(err, gc.IsNil) } err = s.store.SetPerms(&id.URL, "unpublished.read", params.Everyone) c.Assert(err, gc.IsNil) } // Then run the checks. for i, test := range metaPublishedTests { c.Logf("test %d: %v", i, test.id) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(test.id + "/meta/published?channel=unpublished"), ExpectBody: test.expect, }) } } func (s *APISuite) TestMetaPermAudit(c *gc.C) { var calledEntities []audit.Entry s.PatchValue(v5.TestAddAuditCallback, func(e audit.Entry) { calledEntities = append(calledEntities, e) }) s.discharge = dischargeForUser("bob") url := newResolvedURL("~bob/precise/wordpress-23", 23) s.addPublicCharmFromRepo(c, "wordpress", url) s.assertPut(c, "precise/wordpress-23/meta/perm/read", []string{"charlie"}) c.Assert(calledEntities, jc.DeepEquals, []audit.Entry{{ User: "bob", Op: audit.OpSetPerm, ACL: &audit.ACL{ Read: []string{"charlie"}, }, Entity: charm.MustParseURL("~bob/precise/wordpress-23"), }}) calledEntities = []audit.Entry{} s.assertPutAsAdmin(c, "precise/wordpress-23/meta/perm/write", []string{"bob", "foo"}) c.Assert(calledEntities, jc.DeepEquals, []audit.Entry{{ User: "admin", Op: audit.OpSetPerm, ACL: &audit.ACL{ Write: []string{"bob", "foo"}, }, Entity: charm.MustParseURL("~bob/precise/wordpress-23"), }}) calledEntities = []audit.Entry{} s.assertPut(c, "precise/wordpress-23/meta/perm", params.PermRequest{ Read: []string{"a"}, Write: []string{"b", "c"}, }) c.Assert(calledEntities, jc.DeepEquals, []audit.Entry{{ User: "bob", Op: audit.OpSetPerm, ACL: &audit.ACL{ Read: []string{"a"}, }, Entity: charm.MustParseURL("~bob/precise/wordpress-23"), }, { User: "bob", Op: audit.OpSetPerm, ACL: &audit.ACL{ Write: []string{"b", "c"}, }, Entity: charm.MustParseURL("~bob/precise/wordpress-23"), }}) } func (s *APISuite) TestMetaPermPublicWrite(c *gc.C) { url := newResolvedURL("~bob/precise/wordpress-23", 23) s.addPublicCharmFromRepo(c, "wordpress", url) s.assertPutAsAdmin(c, "precise/wordpress-23/meta/perm/write", []string{"everyone"}) // Even though the endpoint has write permissions open to anyone, // we still require authentication so that we can make an entry in // the audit log. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("precise/wordpress-23/meta/perm/read"), Method: "PUT", Header: http.Header{ "Content-Type": {"application/json"}, }, Body: strings.NewReader(`["alice"]`), ExpectStatus: http.StatusProxyAuthRequired, ExpectBody: dischargeRequiredBody, }) s.discharge = dischargeForUser("bob") s.assertPut(c, "precise/wordpress-23/meta/perm/read", []string{"alice"}) } func (s *APISuite) TestMetaPerm(c *gc.C) { for _, u := range []*router.ResolvedURL{ newResolvedURL("~charmers/precise/wordpress-23", 23), newResolvedURL("~charmers/precise/wordpress-24", 24), newResolvedURL("~charmers/trusty/wordpress-1", 1), } { err := s.store.AddCharmWithArchive(u, storetesting.NewCharm(nil)) c.Assert(err, gc.IsNil) } s.doAsUser("charmers", func() { s.assertGet(c, "wordpress/meta/perm?channel=unpublished", params.PermResponse{ Read: []string{"charmers"}, Write: []string{"charmers"}, }) }) s.assertChannelACLs(c, "precise/wordpress-23", map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, params.DevelopmentChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, params.StableChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, }) s.doAsUser("charmers", func() { // Change the read perms to only include a specific user and the // published write perms to include an "admin" user. // Because the entity isn't published yet, the unpublished channel ACLs // will be changed. s.assertPut(c, "precise/wordpress-23/meta/perm/read", []string{"bob"}) s.assertPut(c, "precise/wordpress-23/meta/perm/write", []string{"admin"}) // charmers no longer has permission. s.assertGetIsUnauthorized(c, "precise/wordpress-23/meta/perm", `unauthorized: access denied for user "charmers"`) }) // The permissions are only for bob now, so act as bob. s.doAsUser("bob", func() { // Check that the perms have changed for all revisions and series. for i, u := range []string{"precise/wordpress-23", "precise/wordpress-24", "trusty/wordpress-1"} { c.Logf("id %d: %q", i, u) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL(u + "/meta/perm"), ExpectBody: params.PermResponse{ Read: []string{"bob"}, Write: []string{"admin"}, }, }) } }) s.assertChannelACLs(c, "precise/wordpress-23", map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"bob"}, Write: []string{"admin"}, }, params.DevelopmentChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, params.StableChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, }) // Publish one of the revisions to development, then PUT to meta/perm // and check that the development ACLs have changed. err := s.store.Publish(newResolvedURL("~charmers/precise/wordpress-23", 23), params.DevelopmentChannel) c.Assert(err, gc.IsNil) s.doAsUser("bob", func() { // Check that we aren't allowed to put to the newly published entity as bob. s.assertPutIsUnauthorized(c, "~charmers/precise/wordpress/meta/perm/read?channel=development", []string{}, `unauthorized: access denied for user "bob"`) }) s.doAsUser("charmers", func() { s.discharge = dischargeForUser("charmers") s.assertPut(c, "precise/wordpress-23/meta/perm/read", []string{"bob", "charlie"}) s.assertGetIsUnauthorized(c, "~charmers/precise/wordpress/meta/perm/read?channel=development", `unauthorized: access denied for user "charmers"`) }) s.doAsUser("bob", func() { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL("precise/wordpress-23/meta/perm"), ExpectBody: params.PermResponse{ Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, }) // The other revisions should still see the old ACLs. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL("precise/wordpress-24/meta/perm"), ExpectBody: params.PermResponse{ Read: []string{"bob"}, Write: []string{"admin"}, }, }) }) s.assertChannelACLs(c, "precise/wordpress-23", map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"bob"}, Write: []string{"admin"}, }, params.DevelopmentChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, params.StableChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, }) // Publish wordpress-1 to stable and check that the stable ACLs // have changed. err = s.store.Publish(newResolvedURL("~charmers/trusty/wordpress-1", 1), params.StableChannel) c.Assert(err, gc.IsNil) // The stable permissions only allow charmers currently, so act as // charmers again. s.doAsUser("charmers", func() { s.assertPut(c, "trusty/wordpress-1/meta/perm/write", []string{"doris"}) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL("~charmers/trusty/wordpress-1/meta/perm"), ExpectBody: params.PermResponse{ Read: []string{"charmers"}, Write: []string{"doris"}, }, }) }) // The other revisions should still see the old ACLs. s.doAsUser("bob", func() { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL("precise/wordpress-24/meta/perm"), ExpectBody: params.PermResponse{ Read: []string{"bob"}, Write: []string{"admin"}, }, }) // The development-channel entity should still see the development ACLS. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL("precise/wordpress-23/meta/perm"), ExpectBody: params.PermResponse{ Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, }) }) s.assertChannelACLs(c, "precise/wordpress-23", map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"bob"}, Write: []string{"admin"}, }, params.DevelopmentChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, params.StableChannel: { Read: []string{"charmers"}, Write: []string{"doris"}, }, }) s.doAsUser("doris", func() { // Try restoring everyone's read permission on the charm. // Note: wordpress resolves to trusty/wordpress-1 here because // trusty is a later LTS series than precise. s.assertPut(c, "wordpress/meta/perm/read", []string{"bob", params.Everyone}) }) s.assertChannelACLs(c, "precise/wordpress-23", map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"bob"}, Write: []string{"admin"}, }, params.DevelopmentChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, params.StableChannel: { Read: []string{"bob", params.Everyone}, Write: []string{"doris"}, }, }) s.doAsUser("bob", func() { s.assertGet(c, "wordpress/meta/perm", params.PermResponse{ Read: []string{"bob", params.Everyone}, Write: []string{"doris"}, }) s.assertGet(c, "wordpress/meta/perm/read", []string{"bob", params.Everyone}) }) s.assertChannelACLs(c, "precise/wordpress-23", map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"bob"}, Write: []string{"admin"}, }, params.DevelopmentChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, params.StableChannel: { Read: []string{"bob", params.Everyone}, Write: []string{"doris"}, }, }) // Try deleting all permissions. s.doAsUser("doris", func() { s.assertPut(c, "wordpress/meta/perm/read", []string{}) s.assertPut(c, "wordpress/meta/perm/write", []string{}) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL("wordpress/meta/perm"), ExpectStatus: http.StatusUnauthorized, ExpectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "doris"`, }, }) }) // Now no-one except admin can do anything with trusty/wordpress-1. for _, user := range []string{"charmers", "bob", "charlie", "doris", "admin"} { s.doAsUser(user, func() { s.assertGetIsUnauthorized(c, "wordpress/meta/perm", fmt.Sprintf("unauthorized: access denied for user %q", user)) s.assertPutIsUnauthorized(c, "wordpress/meta/perm", []string{}, fmt.Sprintf("unauthorized: access denied for user %q", user)) }) } s.assertChannelACLs(c, "precise/wordpress-23", map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"bob"}, Write: []string{"admin"}, }, params.DevelopmentChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, params.StableChannel: { Read: []string{}, Write: []string{}, }, }) // Try setting all permissions in one request. We need to be admin here. s.assertPutAsAdmin(c, "wordpress/meta/perm", params.PermRequest{ Read: []string{"bob"}, Write: []string{"admin"}, }) s.assertChannelACLs(c, "precise/wordpress-23", map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"bob"}, Write: []string{"admin"}, }, params.DevelopmentChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, params.StableChannel: { Read: []string{"bob"}, Write: []string{"admin"}, }, }) // Try putting only read permissions. s.doAsUser("admin", func() { readRequest := struct { Read []string }{Read: []string{"joe"}} s.assertPut(c, "wordpress/meta/perm", readRequest) }) s.assertChannelACLs(c, "precise/wordpress-23", map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"bob"}, Write: []string{"admin"}, }, params.DevelopmentChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, params.StableChannel: { Read: []string{"joe"}, Write: []string{}, }, }) // Restore some write rights to the stable channel. s.assertPutAsAdmin(c, "trusty/wordpress-1/meta/perm/write", []string{"bob"}) // ~charmers/trusty/wordpress-1 has been published only to the // stable channel. If we specify a different channel in a perm PUT // request, we'll get an error because the channel isn't valid for // that entity. s.doAsUser("charmers", func() { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), Method: "PUT", JSONBody: params.PermRequest{ Read: []string{"foo"}, Write: []string{"bar"}, }, URL: storeURL("trusty/wordpress-1/meta/perm?channel=development"), ExpectStatus: http.StatusNotFound, ExpectBody: params.Error{ Code: params.ErrNotFound, Message: `cs:trusty/wordpress-1 not found in development channel`, }, }) }) // Similarly, we should be able to specify a channel on read // to read a different channel. s.doAsUser("bob", func() { s.assertGet(c, "trusty/wordpress/meta/perm?channel=unpublished", params.PermResponse{ Read: []string{"bob"}, Write: []string{"admin"}, }) s.assertGet(c, "wordpress/meta/perm?channel=development", params.PermResponse{ Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }) }) // We can't write to a channel that the charm's not in. s.doAsUser("charmers", func() { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), Method: "PUT", JSONBody: []string{"arble"}, URL: storeURL("trusty/wordpress-1/meta/perm/read?channel=development"), ExpectStatus: http.StatusNotFound, ExpectBody: params.Error{ Code: params.ErrNotFound, Message: `cs:trusty/wordpress-1 not found in development channel`, }, }) }) s.assertChannelACLs(c, "precise/wordpress-23", map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"bob"}, Write: []string{"admin"}, }, params.DevelopmentChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, params.StableChannel: { Read: []string{"joe"}, Write: []string{"bob"}, }, }) s.doAsUser("bob", func() { s.assertGet(c, "trusty/wordpress/meta/perm/read?channel=unpublished", []string{"bob"}) }) } // assertChannelACLs asserts that the ChannelACLs field of the base entity with the // given URL are as given. func (s *APISuite) assertChannelACLs(c *gc.C, url string, acls map[params.Channel]mongodoc.ACL) { e, err := s.store.FindBaseEntity(charm.MustParseURL(url), nil) c.Assert(err, gc.IsNil) c.Assert(e.ChannelACLs, jc.DeepEquals, acls) } func (s *APISuite) TestMetaPermPutUnauthorized(c *gc.C) { id := "precise/wordpress-23" s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("~charmers/"+id, 23)) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.noMacaroonSrv, URL: storeURL("~charmers/" + id + "/meta/perm/read"), Method: "PUT", Header: http.Header{ "Content-Type": {"application/json"}, }, Body: strings.NewReader(`["some-user"]`), ExpectStatus: http.StatusUnauthorized, ExpectBody: params.Error{ Code: params.ErrUnauthorized, Message: "authentication failed: missing HTTP auth header", }, }) } func (s *APISuite) TestMetaTerms(c *gc.C) { id1 := "precise/terms-17" s.addPublicCharmFromRepo(c, "terms", newResolvedURL("~charmers/"+id1, 17)) s.assertGet(c, id1+"/meta/terms", []string{"terms-1/1", "terms-2/5"}) id2 := "precise/mysql-1" s.addPublicCharmFromRepo(c, "mysql", newResolvedURL("~charmers/"+id2, 1)) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(id2 + "meta/terms"), Method: "GET", ExpectStatus: http.StatusNotFound, ExpectBody: params.Error{ Code: params.ErrNotFound, Message: "not found", }, }) } func (s *APISuite) TestMetaTermsBundle(c *gc.C) { id := newResolvedURL("~charmers/bundle/wordpress-simple-10", 10) s.addPublicBundleFromRepo(c, "wordpress-simple", id, true) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(id.URL.Path() + "/meta/terms"), Method: "GET", ExpectStatus: http.StatusNotFound, ExpectBody: params.Error{ Code: params.ErrMetadataNotFound, Message: "metadata not found", }, }) } func (s *APISuite) TestSeries(c *gc.C) { for k := range series.Series { if k == "bundle" { continue } id := k + "/wordpress-23" s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("~charmers/"+id, 23)) s.assertGet(c, id+"/meta/id", map[string]interface{}{ "Id": "cs:" + k + "/wordpress-23", "Series": k, "Name": "wordpress", "Revision": 23, }) } } func (s *APISuite) TestExtraInfo(c *gc.C) { id := "precise/wordpress-23" s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("~charmers/"+id, 23)) s.checkInfo(c, "extra-info", id) s.checkInfo(c, "common-info", id) } func (s *APISuite) checkInfo(c *gc.C, path string, id string) { // Add one value and check that it's there. s.assertPutAsAdmin(c, id+"/meta/"+path+"/foo", "fooval") s.assertGet(c, id+"/meta/"+path+"/foo", "fooval") s.assertGet(c, id+"/meta/"+path, map[string]string{ "foo": "fooval", }) // Add another value and check that both values are there. s.assertPutAsAdmin(c, id+"/meta/"+path+"/bar", "barval") s.assertGet(c, id+"/meta/"+path+"/bar", "barval") s.assertGet(c, id+"/meta/"+path, map[string]string{ "foo": "fooval", "bar": "barval", }) // Overwrite a value and check that it's changed. s.assertPutAsAdmin(c, id+"/meta/"+path+"/foo", "fooval2") s.assertGet(c, id+"/meta/"+path+"/foo", "fooval2") s.assertGet(c, id+"/meta/"+path+"", map[string]string{ "foo": "fooval2", "bar": "barval", }) // Write several values at once. s.assertPutAsAdmin(c, id+"/meta/any", params.MetaAnyResponse{ Meta: map[string]interface{}{ path: map[string]string{ "foo": "fooval3", "baz": "bazval", }, path + "/frob": []int{1, 4, 6}, }, }) s.assertGet(c, id+"/meta/"+path, map[string]interface{}{ "foo": "fooval3", "baz": "bazval", "bar": "barval", "frob": []int{1, 4, 6}, }) // Delete a single value. s.assertPutAsAdmin(c, id+"/meta/"+path+"/foo", nil) s.assertGet(c, id+"/meta/"+path, map[string]interface{}{ "baz": "bazval", "bar": "barval", "frob": []int{1, 4, 6}, }) // Delete a value and add some values at the same time. s.assertPutAsAdmin(c, id+"/meta/any", params.MetaAnyResponse{ Meta: map[string]interface{}{ path: map[string]interface{}{ "baz": nil, "bar": nil, "dazzle": "x", "fizzle": "y", }, }, }) s.assertGet(c, id+"/meta/"+path, map[string]interface{}{ "frob": []int{1, 4, 6}, "dazzle": "x", "fizzle": "y", }) } var extraInfoBadPutRequestsTests = []struct { about string key string body interface{} contentType string expectStatus int expectBody params.Error }{{ about: "key with extra element", key: "foo/bar", body: "hello", expectStatus: http.StatusBadRequest, expectBody: params.Error{ Code: params.ErrBadRequest, Message: "bad key for $1", }, }, { about: "key with a dot", key: "foo.bar", body: "hello", expectStatus: http.StatusBadRequest, expectBody: params.Error{ Code: params.ErrBadRequest, Message: "bad key for $1", }, }, { about: "key with a dollar", key: "foo$bar", body: "hello", expectStatus: http.StatusBadRequest, expectBody: params.Error{ Code: params.ErrBadRequest, Message: "bad key for $1", }, }, { about: "multi key with extra element", key: "", body: map[string]string{ "foo/bar": "value", }, expectStatus: http.StatusBadRequest, expectBody: params.Error{ Code: params.ErrBadRequest, Message: "bad key for $1", }, }, { about: "multi key with dot", key: "", body: map[string]string{ ".bar": "value", }, expectStatus: http.StatusBadRequest, expectBody: params.Error{ Code: params.ErrBadRequest, Message: "bad key for $1", }, }, { about: "multi key with dollar", key: "", body: map[string]string{ "$bar": "value", }, expectStatus: http.StatusBadRequest, expectBody: params.Error{ Code: params.ErrBadRequest, Message: "bad key for $1", }, }, { about: "multi key with bad map", key: "", body: "bad", expectStatus: http.StatusInternalServerError, expectBody: params.Error{ Message: `cannot unmarshal $1 body: json: cannot unmarshal string into Go value of type map[string]*json.RawMessage`, }, }} func (s *APISuite) TestExtraInfoBadPutRequests(c *gc.C) { s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) path := "precise/wordpress-23/meta/" for i, test := range extraInfoBadPutRequestsTests { c.Logf("test %d: %s", i, test.about) contentType := test.contentType if contentType == "" { contentType = "application/json" } extraBodyMessage := strings.Replace(test.expectBody.Message, "$1", "extra-info", -1) commonBodyMessage := strings.Replace(test.expectBody.Message, "$1", "common-info", -1) test.expectBody.Message = extraBodyMessage httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(path + "extra-info/" + test.key), Method: "PUT", Header: http.Header{ "Content-Type": {contentType}, }, Username: testUsername, Password: testPassword, Body: strings.NewReader(mustMarshalJSON(test.body)), ExpectStatus: test.expectStatus, ExpectBody: test.expectBody, }) test.expectBody.Message = commonBodyMessage httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(path + "common-info/" + test.key), Method: "PUT", Header: http.Header{ "Content-Type": {contentType}, }, Username: testUsername, Password: testPassword, Body: strings.NewReader(mustMarshalJSON(test.body)), ExpectStatus: test.expectStatus, ExpectBody: test.expectBody, }) } } func (s *APISuite) TestExtraInfoPutUnauthorized(c *gc.C) { s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("precise/wordpress-23/meta/extra-info"), Method: "PUT", Header: http.Header{ "Content-Type": {"application/json"}, }, Body: strings.NewReader(mustMarshalJSON(map[string]string{ "bar": "value", })), ExpectStatus: http.StatusProxyAuthRequired, ExpectBody: dischargeRequiredBody, }) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("precise/wordpress-23/meta/extra-info"), Method: "PUT", Header: http.Header{ "Content-Type": {"application/json"}, "Bakery-Protocol-Version": {"1"}, }, Body: strings.NewReader(mustMarshalJSON(map[string]string{ "bar": "value", })), ExpectStatus: http.StatusUnauthorized, ExpectHeader: http.Header{ "WWW-Authenticate": {"Macaroon"}, }, ExpectBody: dischargeRequiredBody, }) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("precise/wordpress-23/meta/common-info"), Method: "PUT", Header: http.Header{ "Content-Type": {"application/json"}, }, Body: strings.NewReader(mustMarshalJSON(map[string]string{ "bar": "value", })), ExpectStatus: http.StatusProxyAuthRequired, ExpectBody: dischargeRequiredBody, }) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("precise/wordpress-23/meta/common-info"), Method: "PUT", Header: http.Header{ "Content-Type": {"application/json"}, "Bakery-Protocol-Version": {"1"}, }, Body: strings.NewReader(mustMarshalJSON(map[string]string{ "bar": "value", })), ExpectStatus: http.StatusUnauthorized, ExpectHeader: http.Header{ "WWW-Authenticate": {"Macaroon"}, }, ExpectBody: dischargeRequiredBody, }) } func (s *APISuite) TestCommonInfo(c *gc.C) { s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("~charmers/precise/wordpress-23", 23)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("~charmers/precise/wordpress-24", 24)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("~charmers/trusty/wordpress-1", 1)) s.assertPutAsAdmin(c, "wordpress/meta/common-info/key", "something") s.assertGet(c, "wordpress/meta/common-info", map[string]string{ "key": "something", }) for i, u := range []string{"precise/wordpress-23", "precise/wordpress-24", "trusty/wordpress-1"} { c.Logf("id %d: %q", i, u) s.assertGet(c, u+"/meta/common-info", map[string]string{ "key": "something", }) e, err := s.store.FindBaseEntity(charm.MustParseURL(u), nil) c.Assert(err, gc.IsNil) c.Assert(e.CommonInfo, gc.DeepEquals, map[string][]byte{ "key": []byte("\"something\""), }) } } func isNull(v interface{}) bool { data, err := json.Marshal(v) if err != nil { panic(err) } return string(data) == "null" } func (s *APISuite) TestMetaEndpointsAny(c *gc.C) { rurls := s.addTestEntities(c) // We check the meta endpoint for both promulgated and non-promulgated // versions of each URL. urls := make([]*router.ResolvedURL, 0, len(rurls)*2) for _, rurl := range rurls { urls = append(urls, rurl) if rurl.PromulgatedRevision != -1 { rurl1 := *rurl rurl1.PromulgatedRevision = -1 urls = append(urls, &rurl1) } } for _, url := range urls { charmId := strings.TrimPrefix(url.String(), "cs:") var flags []string expectData := params.MetaAnyResponse{ Id: url.PreferredURL(), Meta: make(map[string]interface{}), } for _, ep := range metaEndpoints { if ep.isExcluded(url) { // endpoint not relevant. continue } flags = append(flags, "include="+ep.name) val, err := ep.get(s.store, url) if err != nil && ep.isExcluded(url) { // endpoint not relevant. continue } c.Assert(err, gc.IsNil) if val != nil { expectData.Meta[ep.name] = val } } s.assertGet(c, charmId+"/meta/any?"+strings.Join(flags, "&"), expectData) } } func (s *APISuite) TestMetaAnyWithNoIncludesAndNoEntity(c *gc.C) { wordpressURL, _ := s.addPublicCharmFromRepo( c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23), ) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("precise/wordpress-1/meta/any"), ExpectStatus: http.StatusNotFound, ExpectBody: params.Error{ Code: params.ErrNotFound, Message: `no matching charm or bundle for cs:precise/wordpress-1`, }, }) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("meta/any?id=precise/wordpress-23&id=precise/wordpress-1"), ExpectStatus: http.StatusOK, ExpectBody: map[string]interface{}{ "precise/wordpress-23": params.MetaAnyResponse{ Id: wordpressURL.PreferredURL(), }, }, }) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("precise/wordpress-23/meta/any"), ExpectStatus: http.StatusOK, ExpectBody: params.MetaAnyResponse{ Id: wordpressURL.PreferredURL(), }, }) } // In this test we rely on the charm.v2 testing repo package and // dummy charm that has actions included. func (s *APISuite) TestMetaCharmActions(c *gc.C) { url, dummy := s.addPublicCharmFromRepo(c, "dummy", newResolvedURL("cs:~charmers/precise/dummy-10", 10)) s.assertGet(c, "precise/dummy-10/meta/charm-actions", dummy.Actions()) s.assertGet(c, "precise/dummy-10/meta/any?include=charm-actions", params.MetaAnyResponse{ Id: url.PreferredURL(), Meta: map[string]interface{}{ "charm-actions": dummy.Actions(), }, }, ) } func (s *APISuite) TestBulkMeta(c *gc.C) { // We choose an arbitrary set of ids and metadata here, just to smoke-test // whether the meta/any logic is hooked up correctly. // Detailed tests for this feature are in the router package. _, wordpress := s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) _, mysql := s.addPublicCharmFromRepo(c, "mysql", newResolvedURL("cs:~charmers/precise/mysql-10", 10)) s.assertGet(c, "meta/charm-metadata?id=precise/wordpress-23&id=precise/mysql-10", map[string]*charm.Meta{ "precise/wordpress-23": wordpress.Meta(), "precise/mysql-10": mysql.Meta(), }, ) } func (s *APISuite) TestBulkMetaAny(c *gc.C) { // We choose an arbitrary set of metadata here, just to smoke-test // whether the meta/any logic is hooked up correctly. // Detailed tests for this feature are in the router package. wordpressURL, wordpress := s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) mysqlURL, mysql := s.addPublicCharmFromRepo(c, "mysql", newResolvedURL("cs:~charmers/precise/mysql-10", 10)) s.assertGet(c, "meta/any?include=charm-metadata&include=charm-config&id=precise/wordpress-23&id=precise/mysql-10", map[string]params.MetaAnyResponse{ "precise/wordpress-23": { Id: wordpressURL.PreferredURL(), Meta: map[string]interface{}{ "charm-config": wordpress.Config(), "charm-metadata": wordpress.Meta(), }, }, "precise/mysql-10": { Id: mysqlURL.PreferredURL(), Meta: map[string]interface{}{ "charm-config": mysql.Config(), "charm-metadata": mysql.Meta(), }, }, }, ) } var metaCharmTagsTests = []struct { about string tags []string categories []string expectTags []string }{{ about: "tags only", tags: []string{"foo", "bar"}, expectTags: []string{"foo", "bar"}, }, { about: "categories only", categories: []string{"foo", "bar"}, expectTags: []string{"foo", "bar"}, }, { about: "tags and categories", categories: []string{"foo", "bar"}, tags: []string{"tag1", "tag2"}, expectTags: []string{"tag1", "tag2"}, }, { about: "no tags or categories", }} func (s *APISuite) TestMetaCharmTags(c *gc.C) { url := newResolvedURL("~charmers/precise/wordpress-0", -1) for i, test := range metaCharmTagsTests { c.Logf("%d: %s", i, test.about) url.URL.Revision = i s.addPublicCharm(c, storetesting.NewCharm(&charm.Meta{ Tags: test.tags, Categories: test.categories, }), url) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(url.URL.Path() + "/meta/tags"), ExpectStatus: http.StatusOK, ExpectBody: params.TagsResponse{test.expectTags}, }) } } func (s *APISuite) TestPromulgatedMetaCharmTags(c *gc.C) { url := newResolvedURL("~charmers/precise/wordpress-0", 0) for i, test := range metaCharmTagsTests { c.Logf("%d: %s", i, test.about) url.URL.Revision = i url.PromulgatedRevision = i s.addPublicCharm(c, storetesting.NewCharm(&charm.Meta{ Tags: test.tags, Categories: test.categories, }), url) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(url.URL.Path() + "/meta/tags"), ExpectStatus: http.StatusOK, ExpectBody: params.TagsResponse{test.expectTags}, }) } } func (s *APISuite) TestBundleTags(c *gc.C) { url := newResolvedURL("~charmers/bundle/wordpress-simple-2", -1) s.addPublicBundle(c, storetesting.NewBundle(&charm.BundleData{ Tags: []string{"foo", "bar"}, Services: map[string]*charm.ServiceSpec{ "wordpress": { Charm: "wordpress", }, }, }), url, true) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(url.URL.Path() + "/meta/tags"), ExpectStatus: http.StatusOK, ExpectBody: params.TagsResponse{[]string{"foo", "bar"}}, }) } func (s *APISuite) TestPromulgatedBundleTags(c *gc.C) { url := newResolvedURL("~charmers/bundle/wordpress-simple-2", 2) s.addPublicBundle(c, storetesting.NewBundle(&charm.BundleData{ Tags: []string{"foo", "bar"}, Services: map[string]*charm.ServiceSpec{ "wordpress": { Charm: "wordpress", }, }, }), url, true) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(url.URL.Path() + "/meta/tags"), ExpectStatus: http.StatusOK, ExpectBody: params.TagsResponse{[]string{"foo", "bar"}}, }) } type testMetaCharm struct { meta *charm.Meta charm.Charm } func (c *testMetaCharm) Meta() *charm.Meta { return c.meta } func (s *APISuite) TestIdsAreResolved(c *gc.C) { // This is just testing that ResolveURL is actually // passed to the router. Given how Router is // defined, and the ResolveURL tests, this should // be sufficient to "join the dots". _, wordpress := s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) s.assertGet(c, "wordpress/meta/charm-metadata", wordpress.Meta()) } func (s *APISuite) TestMetaCharmNotFound(c *gc.C) { for i, ep := range metaEndpoints { c.Logf("test %d: %s", i, ep.name) expected := params.Error{ Message: `no matching charm or bundle for cs:precise/wordpress-23`, Code: params.ErrNotFound, } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("precise/wordpress-23/meta/" + ep.name), ExpectStatus: http.StatusNotFound, ExpectBody: expected, }) expected.Message = `no matching charm or bundle for cs:wordpress` httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("wordpress/meta/" + ep.name), ExpectStatus: http.StatusNotFound, ExpectBody: expected, }) } } var resolveURLTests = []struct { url string expect *router.ResolvedURL notFound bool }{{ url: "wordpress", expect: newResolvedURL("cs:~charmers/trusty/wordpress-25", 25), }, { url: "precise/wordpress", expect: newResolvedURL("cs:~charmers/precise/wordpress-24", 24), }, { url: "utopic/bigdata", expect: newResolvedURL("cs:~charmers/utopic/bigdata-10", 10), }, { url: "~charmers/precise/wordpress", expect: newResolvedURL("cs:~charmers/precise/wordpress-24", -1), }, { url: "~charmers/precise/wordpress-99", notFound: true, }, { url: "~charmers/wordpress", expect: newResolvedURL("cs:~charmers/trusty/wordpress-25", -1), }, { url: "~charmers/wordpress-24", notFound: true, }, { url: "~bob/wordpress", expect: newResolvedURL("cs:~bob/trusty/wordpress-1", -1), }, { url: "~bob/precise/wordpress", expect: newResolvedURL("cs:~bob/precise/wordpress-2", -1), }, { url: "bigdata", expect: newResolvedURL("cs:~charmers/utopic/bigdata-10", 10), }, { url: "wordpress-24", notFound: true, }, { url: "bundlelovin", expect: newResolvedURL("cs:~charmers/bundle/bundlelovin-10", 10), }, { url: "wordpress-26", notFound: true, }, { url: "foo", notFound: true, }, { url: "trusty/bigdata", notFound: true, }, { url: "~bob/wily/django-47", notFound: true, }, { url: "~bob/django", notFound: true, }, { url: "wily/django", notFound: true, }, { url: "django", notFound: true, }, { url: "~bob/multi-series", expect: newResolvedURL("cs:~bob/multi-series-0", -1), }, { url: "~bob/utopic/multi-series", expect: newResolvedURL("cs:~bob/multi-series-0", -1), }} func (s *APISuite) TestResolveURL(c *gc.C) { s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-24", 24)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-24", 24)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-25", 25)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/utopic/wordpress-10", 10)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/saucy/bigdata-99", 99)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/utopic/bigdata-10", 10)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~bob/trusty/wordpress-1", -1)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~bob/precise/wordpress-2", -1)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~bob/precise/other-2", -1)) s.addPublicBundleFromRepo(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/bundlelovin-10", 10), true) s.addPublicBundleFromRepo(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/wordpress-simple-10", 10), true) s.addPublicCharmFromRepo(c, "multi-series", newResolvedURL("cs:~bob/multi-series-0", -1)) for i, test := range resolveURLTests { c.Logf("test %d: %s", i, test.url) url := charm.MustParseURL(test.url) rurl, err := v5.ResolveURL(entitycache.New(&v5.StoreWithChannel{ Store: s.store, Channel: params.UnpublishedChannel, }), url) if test.notFound { c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) c.Assert(err, gc.ErrorMatches, `no matching charm or bundle for .*`) c.Assert(rurl, gc.IsNil) continue } c.Assert(err, gc.IsNil) c.Assert(rurl, jc.DeepEquals, test.expect) } } var serveExpandIdTests = []struct { about string url string expect []params.ExpandedId err string }{{ about: "fully qualified URL", url: "~charmers/trusty/wordpress-47", expect: []params.ExpandedId{ {Id: "cs:~charmers/utopic/wordpress-42"}, {Id: "cs:~charmers/trusty/wordpress-47"}, {Id: "cs:~charmers/wordpress-5"}, }, }, { about: "promulgated URL", url: "trusty/wordpress-47", expect: []params.ExpandedId{ {Id: "cs:utopic/wordpress-42"}, {Id: "cs:trusty/wordpress-47"}, {Id: "cs:wordpress-49"}, }, }, { about: "non-promulgated charm", url: "~bob/precise/builder", expect: []params.ExpandedId{ {Id: "cs:~bob/precise/builder-5"}, }, }, { about: "partial URL", url: "haproxy", expect: []params.ExpandedId{ {Id: "cs:trusty/haproxy-1"}, {Id: "cs:precise/haproxy-1"}, }, }, { about: "revision with series matches bundles (and multi-series charms) only", url: "mongo-0", expect: []params.ExpandedId{ {Id: "cs:bundle/mongo-0"}, }, }, { about: "single result", url: "bundle/mongo-0", expect: []params.ExpandedId{ {Id: "cs:bundle/mongo-0"}, }, }, { about: "fully qualified URL with no entities found", url: "~charmers/precise/no-such-42", err: `no matching charm or bundle for cs:~charmers/precise/no-such-42`, }, { about: "partial URL with no entities found", url: "no-such", err: `no matching charm or bundle for cs:no-such`, }} func (s *APISuite) TestServeExpandId(c *gc.C) { // Add a bunch of entities in the database. // Note that expand-id only cares about entity identifiers, // so it is ok to reuse the same charm for all the entities. s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/utopic/wordpress-42", 42)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-47", 47)) err := s.store.AddCharmWithArchive(newResolvedURL("cs:~charmers/trusty/wordpress-48", 48), storetesting.NewCharm(nil)) c.Assert(err, gc.IsNil) err = s.store.Publish(newResolvedURL("cs:~charmers/trusty/wordpress-48", 48), params.DevelopmentChannel) c.Assert(err, gc.IsNil) s.addPublicCharmFromRepo(c, "multi-series", newResolvedURL("cs:~charmers/wordpress-5", 49)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/precise/haproxy-1", 1)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/haproxy-1", 1)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~bob/precise/builder-5", -1)) s.addPublicBundleFromRepo(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/mongo-0", 0), true) s.addPublicBundleFromRepo(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/wordpress-simple-0", 0), true) for i, test := range serveExpandIdTests { c.Logf("test %d: %s", i, test.about) storeURL := storeURL(test.url + "/expand-id") var expectStatus int var expectBody interface{} if test.err == "" { expectStatus = http.StatusOK expectBody = test.expect } else { expectStatus = http.StatusNotFound expectBody = params.Error{ Code: params.ErrNotFound, Message: test.err, } } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL, ExpectStatus: expectStatus, ExpectBody: expectBody, }) } } var serveMetaRevisionInfoTests = []struct { about string url string asUser string expect params.RevisionInfoResponse err string }{{ about: "fully qualified url", url: "trusty/wordpress-9", expect: params.RevisionInfoResponse{ []*charm.URL{ charm.MustParseURL("cs:trusty/wordpress-43"), charm.MustParseURL("cs:trusty/wordpress-42"), charm.MustParseURL("cs:trusty/wordpress-41"), charm.MustParseURL("cs:trusty/wordpress-9"), }, }, }, { about: "partial url uses a default series", url: "wordpress", expect: params.RevisionInfoResponse{ []*charm.URL{ charm.MustParseURL("cs:trusty/wordpress-43"), charm.MustParseURL("cs:trusty/wordpress-42"), charm.MustParseURL("cs:trusty/wordpress-41"), charm.MustParseURL("cs:trusty/wordpress-9"), }, }, }, { about: "non-promulgated URL gives non-promulgated revisions (~charmers)", url: "~charmers/trusty/cinder", expect: params.RevisionInfoResponse{ []*charm.URL{ charm.MustParseURL("cs:~charmers/trusty/cinder-6"), charm.MustParseURL("cs:~charmers/trusty/cinder-5"), charm.MustParseURL("cs:~charmers/trusty/cinder-4"), charm.MustParseURL("cs:~charmers/trusty/cinder-3"), charm.MustParseURL("cs:~charmers/trusty/cinder-2"), charm.MustParseURL("cs:~charmers/trusty/cinder-1"), charm.MustParseURL("cs:~charmers/trusty/cinder-0"), }, }, }, { about: "non-promulgated URL gives non-promulgated revisions (~openstack-charmers)", url: "~openstack-charmers/trusty/cinder", expect: params.RevisionInfoResponse{ []*charm.URL{ charm.MustParseURL("cs:~openstack-charmers/trusty/cinder-1"), charm.MustParseURL("cs:~openstack-charmers/trusty/cinder-0"), }, }, }, { about: "promulgated URL gives promulgated revisions", url: "trusty/cinder", expect: params.RevisionInfoResponse{ []*charm.URL{ charm.MustParseURL("cs:trusty/cinder-5"), charm.MustParseURL("cs:trusty/cinder-4"), charm.MustParseURL("cs:trusty/cinder-3"), charm.MustParseURL("cs:trusty/cinder-2"), charm.MustParseURL("cs:trusty/cinder-1"), charm.MustParseURL("cs:trusty/cinder-0"), }, }, }, { about: "multi-series charm expands to all revisions of that charm", url: "multi-series", expect: params.RevisionInfoResponse{ []*charm.URL{ charm.MustParseURL("cs:multi-series-41"), charm.MustParseURL("cs:multi-series-40"), }, }, }, { about: "multi-series charm with series specified", url: "trusty/multi-series", expect: params.RevisionInfoResponse{ []*charm.URL{ charm.MustParseURL("cs:multi-series-41"), charm.MustParseURL("cs:multi-series-40"), }, }, }, { about: "multi-series charm with non-promulgated URL", url: "~charmers/multi-series", expect: params.RevisionInfoResponse{ []*charm.URL{ charm.MustParseURL("cs:~charmers/multi-series-2"), charm.MustParseURL("cs:~charmers/multi-series-1"), }, }, }, { about: "multi-series charm with non-promulgated URL and series specified", url: "~charmers/utopic/multi-series", expect: params.RevisionInfoResponse{ []*charm.URL{ charm.MustParseURL("cs:~charmers/multi-series-2"), charm.MustParseURL("cs:~charmers/multi-series-1"), }, }, }, { about: "mixed multi/single series charm, latest rev", url: "mixed", expect: params.RevisionInfoResponse{ []*charm.URL{ charm.MustParseURL("cs:mixed-43"), charm.MustParseURL("cs:mixed-42"), charm.MustParseURL("cs:trusty/mixed-41"), charm.MustParseURL("cs:trusty/mixed-40"), }, }, }, { about: "mixed multi/single series charm with series", url: "trusty/mixed-40", expect: params.RevisionInfoResponse{ []*charm.URL{ charm.MustParseURL("cs:mixed-43"), charm.MustParseURL("cs:mixed-42"), charm.MustParseURL("cs:trusty/mixed-41"), charm.MustParseURL("cs:trusty/mixed-40"), }, }, }, { about: "no entities found", url: "precise/no-such-33", err: `no matching charm or bundle for cs:precise/no-such-33`, }} func (s *APISuite) TestServeMetaRevisionInfo(c *gc.C) { s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/mysql-41", 41)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/mysql-42", 42)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-9", 9)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-41", 41)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-42", 42)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-43", 43)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-42", 42)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-0", -1)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-1", -1)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-2", 0)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-3", 1)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~openstack-charmers/trusty/cinder-0", 2)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~openstack-charmers/trusty/cinder-1", 3)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-4", -1)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-5", 4)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-6", 5)) s.addPublicCharmFromRepo(c, "multi-series", newResolvedURL("cs:~charmers/multi-series-1", 40)) s.addPublicCharmFromRepo(c, "multi-series", newResolvedURL("cs:~charmers/multi-series-2", 41)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/mixed-1", 40)) s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/mixed-2", 41)) s.addPublicCharmFromRepo(c, "multi-series", newResolvedURL("cs:~charmers/mixed-3", 42)) s.addPublicCharmFromRepo(c, "multi-series", newResolvedURL("cs:~charmers/mixed-4", 43)) for i, test := range serveMetaRevisionInfoTests { c.Logf("test %d: %s", i, test.about) storeURL := storeURL(test.url + "/meta/revision-info") var expectStatus int var expectBody interface{} if test.err == "" { expectStatus = http.StatusOK expectBody = test.expect } else { expectStatus = http.StatusNotFound expectBody = params.Error{ Code: params.ErrNotFound, Message: test.err, } } do := bakeryDo(nil) if test.asUser != "" { do = s.bakeryDoAsUser(c, test.asUser) } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL, Do: do, ExpectStatus: expectStatus, ExpectBody: expectBody, }) } } var metaStatsTests = []struct { // about describes the test. about string // url is the entity id to use when making the meta/stats request. url string // downloads maps entity ids to a numeric key/value pair where the key is // the number of days in the past when the entity was downloaded and the // value is the number of downloads performed that day. downloads map[string]map[int]int // expectResponse is the expected response from the meta/stats endpoint. expectResponse params.StatsResponse }{{ about: "no downloads", url: "trusty/mysql-0", downloads: map[string]map[int]int{"trusty/mysql-0": {}}, }, { about: "single download", url: "utopic/django-42", downloads: map[string]map[int]int{ "utopic/django-42": {0: 1}, }, expectResponse: params.StatsResponse{ ArchiveDownloadCount: 1, ArchiveDownload: params.StatsCount{ Total: 1, Day: 1, Week: 1, Month: 1, }, ArchiveDownloadAllRevisions: params.StatsCount{ Total: 1, Day: 1, Week: 1, Month: 1, }, }, }, { about: "single download a long time ago", url: "utopic/django-42", downloads: map[string]map[int]int{ "utopic/django-42": {100: 1}, }, expectResponse: params.StatsResponse{ ArchiveDownloadCount: 1, ArchiveDownload: params.StatsCount{ Total: 1, }, ArchiveDownloadAllRevisions: params.StatsCount{ Total: 1, }, }, }, { about: "some downloads this month", url: "utopic/wordpress-47", downloads: map[string]map[int]int{ "utopic/wordpress-47": {20: 2, 25: 5}, }, expectResponse: params.StatsResponse{ ArchiveDownloadCount: 2 + 5, ArchiveDownload: params.StatsCount{ Total: 2 + 5, Month: 2 + 5, }, ArchiveDownloadAllRevisions: params.StatsCount{ Total: 2 + 5, Month: 2 + 5, }, }, }, { about: "multiple recent downloads", url: "utopic/django-42", downloads: map[string]map[int]int{ "utopic/django-42": {100: 1, 12: 3, 8: 5, 4: 10, 2: 1, 0: 3}, }, expectResponse: params.StatsResponse{ ArchiveDownloadCount: 1 + 3 + 5 + 10 + 1 + 3, ArchiveDownload: params.StatsCount{ Total: 1 + 3 + 5 + 10 + 1 + 3, Day: 3, Week: 10 + 1 + 3, Month: 3 + 5 + 10 + 1 + 3, }, ArchiveDownloadAllRevisions: params.StatsCount{ Total: 1 + 3 + 5 + 10 + 1 + 3, Day: 3, Week: 10 + 1 + 3, Month: 3 + 5 + 10 + 1 + 3, }, }, }, { about: "sparse downloads", url: "utopic/django-42", downloads: map[string]map[int]int{ "utopic/django-42": {200: 3, 27: 4, 3: 5}, }, expectResponse: params.StatsResponse{ ArchiveDownloadCount: 3 + 4 + 5, ArchiveDownload: params.StatsCount{ Total: 3 + 4 + 5, Week: 5, Month: 4 + 5, }, ArchiveDownloadAllRevisions: params.StatsCount{ Total: 3 + 4 + 5, Week: 5, Month: 4 + 5, }, }, }, { about: "bundle downloads", url: "bundle/django-simple-2", downloads: map[string]map[int]int{ "bundle/django-simple-2": {200: 3, 27: 4, 3: 5}, }, expectResponse: params.StatsResponse{ ArchiveDownloadCount: 3 + 4 + 5, ArchiveDownload: params.StatsCount{ Total: 3 + 4 + 5, Week: 5, Month: 4 + 5, }, ArchiveDownloadAllRevisions: params.StatsCount{ Total: 3 + 4 + 5, Week: 5, Month: 4 + 5, }, }, }, { about: "different charms", url: "trusty/rails-47", downloads: map[string]map[int]int{ "utopic/rails-47": {200: 3, 27: 4, 3: 5}, "trusty/rails-47": {20: 2, 6: 10}, "trusty/mysql-0": {200: 1, 14: 2, 1: 7}, }, expectResponse: params.StatsResponse{ ArchiveDownloadCount: 2 + 10, ArchiveDownload: params.StatsCount{ Total: 2 + 10, Week: 10, Month: 2 + 10, }, ArchiveDownloadAllRevisions: params.StatsCount{ Total: 2 + 10, Week: 10, Month: 2 + 10, }, }, }, { about: "different revisions of the same charm", url: "precise/rails-1", downloads: map[string]map[int]int{ "precise/rails-0": {300: 1, 200: 2}, "precise/rails-1": {100: 5, 10: 3, 2: 7}, "precise/rails-2": {6: 10, 0: 9}, }, expectResponse: params.StatsResponse{ ArchiveDownloadCount: 5 + 3 + 7, ArchiveDownload: params.StatsCount{ Total: 5 + 3 + 7, Week: 7, Month: 3 + 7, }, ArchiveDownloadAllRevisions: params.StatsCount{ Total: (1 + 2) + (5 + 3 + 7) + (10 + 9), Day: 0 + 0 + 9, Week: 0 + 7 + (10 + 9), Month: 0 + (3 + 7) + (10 + 9), }, }, }, { about: "downloads only in an old revision", url: "trusty/wordpress-2", downloads: map[string]map[int]int{ "precise/wordpress-2": {2: 2, 0: 1}, "trusty/wordpress-0": {100: 10}, "trusty/wordpress-2": {}, }, expectResponse: params.StatsResponse{ ArchiveDownloadAllRevisions: params.StatsCount{ Total: 10, }, }, }, { about: "downloads only in newer revision", url: "utopic/wordpress-0", downloads: map[string]map[int]int{ "utopic/wordpress-0": {}, "utopic/wordpress-1": {31: 7, 10: 1, 3: 2, 0: 1}, "utopic/wordpress-2": {6: 9, 0: 2}, }, expectResponse: params.StatsResponse{ ArchiveDownloadAllRevisions: params.StatsCount{ Total: (7 + 1 + 2 + 1) + (9 + 2), Day: 1 + 2, Week: (2 + 1) + (9 + 2), Month: (1 + 2 + 1) + (9 + 2), }, }, }, { about: "non promulgated charms", url: "~who/utopic/django-0", downloads: map[string]map[int]int{ "utopic/django-0": {100: 1, 10: 2, 1: 3, 0: 4}, "~who/utopic/django-0": {2: 5}, }, expectResponse: params.StatsResponse{ ArchiveDownloadCount: 5, ArchiveDownload: params.StatsCount{ Total: 5, Week: 5, Month: 5, }, ArchiveDownloadAllRevisions: params.StatsCount{ Total: 5, Week: 5, Month: 5, }, }, }} func (s *APISuite) TestMetaStats(c *gc.C) { if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } // TODO (frankban): remove this call when removing the legacy counts logic. patchLegacyDownloadCountsEnabled(s.AddCleanup, false) today := time.Now() for i, test := range metaStatsTests { c.Logf("test %d: %s", i, test.about) for id, downloadsPerDay := range test.downloads { url := &router.ResolvedURL{ URL: *charm.MustParseURL(id), PromulgatedRevision: -1, } if url.URL.User == "" { url.URL.User = "charmers" url.PromulgatedRevision = url.URL.Revision } // Add the required entities to the database. if url.URL.Series == "bundle" { s.addPublicBundleFromRepo(c, "wordpress-simple", url, true) } else { s.addPublicCharmFromRepo(c, "wordpress", url) } // Simulate the entity was downloaded at the specified dates. for daysAgo, downloads := range downloadsPerDay { date := today.AddDate(0, 0, -daysAgo) key := []string{params.StatsArchiveDownload, url.URL.Series, url.URL.Name, url.URL.User, strconv.Itoa(url.URL.Revision)} for i := 0; i < downloads; i++ { err := s.store.IncCounterAtTime(key, date) c.Assert(err, gc.IsNil) } if url.PromulgatedRevision > -1 { key := []string{params.StatsArchiveDownloadPromulgated, url.URL.Series, url.URL.Name, "", strconv.Itoa(url.PromulgatedRevision)} for i := 0; i < downloads; i++ { err := s.store.IncCounterAtTime(key, date) c.Assert(err, gc.IsNil) } } } } // Ensure the meta/stats response reports the correct downloads count. s.assertGet(c, test.url+"/meta/stats", test.expectResponse) // Clean up the collections. _, err := s.store.DB.Entities().RemoveAll(nil) c.Assert(err, gc.IsNil) _, err = s.store.DB.StatCounters().RemoveAll(nil) c.Assert(err, gc.IsNil) } } var metaStatsWithLegacyDownloadCountsTests = []struct { about string count string expectValue int64 expectError string }{{ about: "no extra-info", }, { about: "zero downloads", count: "0", }, { about: "some downloads", count: "47", expectValue: 47, }, { about: "invalid value", count: "invalid", expectError: "cannot unmarshal extra-info value: invalid character 'i' looking for beginning of value", }} // Tests meta/stats with LegacyDownloadCountsEnabled set to true. // TODO (frankban): remove this test case when removing the legacy counts // logic. func (s *APISuite) TestMetaStatsWithLegacyDownloadCounts(c *gc.C) { patchLegacyDownloadCountsEnabled(s.AddCleanup, true) id, _ := s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("~charmers/utopic/wordpress-42", 42)) url := storeURL("utopic/wordpress-42/meta/stats") for i, test := range metaStatsWithLegacyDownloadCountsTests { c.Logf("test %d: %s", i, test.about) // Update the entity extra info if required. if test.count != "" { extraInfo := map[string][]byte{ params.LegacyDownloadStats: []byte(test.count), } err := s.store.UpdateEntity(id, bson.D{{ "$set", bson.D{{"extrainfo", extraInfo}}, }}) c.Assert(err, gc.IsNil) } var expectBody interface{} var expectStatus int if test.expectError == "" { // Ensure the downloads count is correctly returned. expectBody = params.StatsResponse{ ArchiveDownloadCount: test.expectValue, ArchiveDownload: params.StatsCount{ Total: test.expectValue, }, ArchiveDownloadAllRevisions: params.StatsCount{ Total: test.expectValue, }, } expectStatus = http.StatusOK } else { // Ensure an error is returned. expectBody = params.Error{ Message: test.expectError, } expectStatus = http.StatusInternalServerError } // Perform the request. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: url, ExpectStatus: expectStatus, ExpectBody: expectBody, }) } } type publishSpec struct { id *router.ResolvedURL time string acl []string } func (p publishSpec) published() params.Published { t, err := time.Parse("2006-01-02 15:04", p.time) if err != nil { panic(err) } return params.Published{&p.id.URL, t} } var publishedCharms = []publishSpec{{ id: newResolvedURL("cs:~charmers/precise/wordpress-1", 1), time: "5432-10-12 00:00", }, { id: newResolvedURL("cs:~charmers/precise/mysql-1", 1), time: "5432-10-12 13:00", }, { id: newResolvedURL("cs:~charmers/precise/wordpress-2", 2), time: "5432-10-12 23:59", }, { id: newResolvedURL("cs:~charmers/precise/mysql-2", 2), time: "5432-10-13 00:00", }, { id: newResolvedURL("cs:~charmers/precise/mysql-5", 5), time: "5432-10-13 10:00", }, { id: newResolvedURL("cs:~charmers/precise/wordpress-3", 3), time: "5432-10-14 01:00", }, { id: newResolvedURL("cs:~charmers/precise/django-0", -1), time: "5432-10-14 02:00", acl: []string{"charmers"}, }} var changesPublishedTests = []struct { args string // expect holds indexes into publishedCharms // of the expected indexes returned by charms/published expect []int }{{ args: "", expect: []int{5, 4, 3, 2, 1, 0}, }, { args: "?start=5432-10-13", expect: []int{5, 4, 3}, }, { args: "?stop=5432-10-13", expect: []int{4, 3, 2, 1, 0}, }, { args: "?start=5432-10-13&stop=5432-10-13", expect: []int{4, 3}, }, { args: "?start=5432-10-12&stop=5432-10-13", expect: []int{4, 3, 2, 1, 0}, }, { args: "?start=5432-10-13&stop=5432-10-12", expect: []int{}, }, { args: "?limit=3", expect: []int{5, 4, 3}, }, { args: "?start=5432-10-12&stop=5432-10-13&limit=2", expect: []int{4, 3}, }} func (s *APISuite) TestChangesPublished(c *gc.C) { s.publishCharmsAtKnownTimes(c, publishedCharms) for i, test := range changesPublishedTests { c.Logf("test %d: %q", i, test.args) expect := make([]params.Published, len(test.expect)) for j, index := range test.expect { expect[j] = publishedCharms[index].published() } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("changes/published") + test.args, ExpectBody: expect, }) } } func (s *APISuite) TestChangesPublishedAdmin(c *gc.C) { s.publishCharmsAtKnownTimes(c, publishedCharms) expect := make([]params.Published, len(publishedCharms)) for i := range expect { expect[i] = publishedCharms[len(publishedCharms)-(i+1)].published() } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Username: testUsername, Password: testPassword, URL: storeURL("changes/published"), ExpectBody: expect, }) } var changesPublishedErrorsTests = []struct { args string expect params.Error status int }{{ args: "?limit=0", expect: params.Error{ Code: params.ErrBadRequest, Message: "invalid 'limit' value", }, status: http.StatusBadRequest, }, { args: "?limit=-1", expect: params.Error{ Code: params.ErrBadRequest, Message: "invalid 'limit' value", }, status: http.StatusBadRequest, }, { args: "?limit=-9999", expect: params.Error{ Code: params.ErrBadRequest, Message: "invalid 'limit' value", }, status: http.StatusBadRequest, }, { args: "?start=baddate", expect: params.Error{ Code: params.ErrBadRequest, Message: `invalid 'start' value "baddate": parsing time "baddate" as "2006-01-02": cannot parse "baddate" as "2006"`, }, status: http.StatusBadRequest, }, { args: "?stop=baddate", expect: params.Error{ Code: params.ErrBadRequest, Message: `invalid 'stop' value "baddate": parsing time "baddate" as "2006-01-02": cannot parse "baddate" as "2006"`, }, status: http.StatusBadRequest, }} func (s *APISuite) TestChangesPublishedErrors(c *gc.C) { s.publishCharmsAtKnownTimes(c, publishedCharms) for i, test := range changesPublishedErrorsTests { c.Logf("test %d: %q", i, test.args) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("changes/published") + test.args, ExpectStatus: test.status, ExpectBody: test.expect, }) } } var publishErrorsTests = []struct { about string method string id string contentType string body string expectStatus int expectBody params.Error }{{ about: "get method not allowed", method: "GET", id: "~who/trusty/wordpress-0", expectStatus: http.StatusMethodNotAllowed, expectBody: params.Error{ Code: params.ErrMethodNotAllowed, Message: "GET not allowed", }, }, { about: "post method not allowed", method: "POST", id: "~who/trusty/wordpress-0", expectStatus: http.StatusMethodNotAllowed, expectBody: params.Error{ Code: params.ErrMethodNotAllowed, Message: "POST not allowed", }, }, { about: "unexpected content type", method: "PUT", id: "~who/trusty/wordpress-0", contentType: "text/invalid", expectStatus: http.StatusBadRequest, expectBody: params.Error{ Code: params.ErrBadRequest, Message: `cannot unmarshal publish request body: cannot unmarshal into field: unexpected content type text/invalid; want application/json; content: "{\"Channels\":[\"development\"]}"`, }, }, { about: "invalid body", method: "PUT", id: "~who/trusty/wordpress-0", body: "bad wolf", expectStatus: http.StatusBadRequest, expectBody: params.Error{ Code: params.ErrBadRequest, Message: "cannot unmarshal publish request body: cannot unmarshal into field: cannot unmarshal request body: invalid character 'b' looking for beginning of value", }, }, { about: "entity to be published not found", method: "PUT", id: "~who/wily/django-42", expectStatus: http.StatusNotFound, expectBody: params.Error{ Code: params.ErrNotFound, Message: `no matching charm or bundle for cs:~who/wily/django-42`, }, }, { about: "no channels provided", method: "PUT", id: "~who/trusty/wordpress-0", body: mustMarshalJSON(params.PublishRequest{}), expectStatus: http.StatusBadRequest, expectBody: params.Error{ Message: `no channels provided`, Code: params.ErrBadRequest, }, }, { about: "invalid channel specified", method: "PUT", id: "~who/trusty/wordpress-0", body: mustMarshalJSON(params.PublishRequest{ Channels: []params.Channel{"bad"}, }), expectStatus: http.StatusBadRequest, expectBody: params.Error{ Message: `cannot publish to "bad"`, Code: params.ErrBadRequest, }, }, { about: "empty channel specified", method: "PUT", id: "~who/trusty/wordpress-0", body: mustMarshalJSON(params.PublishRequest{ Channels: []params.Channel{""}, }), expectStatus: http.StatusBadRequest, expectBody: params.Error{ Message: `cannot publish to ""`, Code: params.ErrBadRequest, }, }, { about: "unpublished channel specified", method: "PUT", id: "~who/trusty/wordpress-0", body: mustMarshalJSON(params.PublishRequest{ Channels: []params.Channel{params.UnpublishedChannel}, }), expectStatus: http.StatusBadRequest, expectBody: params.Error{ Message: `cannot publish to "unpublished"`, Code: params.ErrBadRequest, }, }} func (s *APISuite) TestPublishErrors(c *gc.C) { s.addPublicCharm(c, storetesting.NewCharm(nil), newResolvedURL("~who/trusty/wordpress-0", -1)) for i, test := range publishErrorsTests { c.Logf("test %d: %s", i, test.about) contentType := test.contentType if contentType == "" { contentType = "application/json" } body := test.body if body == "" { body = mustMarshalJSON(params.PublishRequest{ Channels: []params.Channel{params.DevelopmentChannel}, }) } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(test.id + "/publish"), Method: test.method, Header: http.Header{"Content-Type": {contentType}}, Username: testUsername, Password: testPassword, Body: strings.NewReader(body), ExpectStatus: test.expectStatus, ExpectBody: test.expectBody, }) } } var publishAuthorizationTests = []struct { about string // acls holds the ACLs that will be associated with the // entity we're publishing. Note: we'll always publish // as the same user ("bob"). acls map[params.Channel]mongodoc.ACL // channels holds the channels we'll try to publish to. channels []params.Channel // expectError is true if we expect the authorization // to fail. expectError bool }{{ about: "all perms allow bob; publish to single channel", acls: map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"bob"}, Write: []string{"bob"}, }, params.DevelopmentChannel: { Read: []string{"bob"}, Write: []string{"bob"}, }, params.StableChannel: { Read: []string{"bob"}, Write: []string{"bob"}, }, }, channels: []params.Channel{"development"}, }, { about: "all perms allow bob; publish to several channels", acls: map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"bob"}, Write: []string{"bob"}, }, params.DevelopmentChannel: { Read: []string{"bob"}, Write: []string{"bob"}, }, params.StableChannel: { Read: []string{"bob"}, Write: []string{"bob"}, }, }, channels: []params.Channel{"development", "stable"}, }, { about: "publish on an entity without perms on its current channel", acls: map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: {}, params.DevelopmentChannel: { Read: []string{"bob"}, Write: []string{"bob"}, }, params.StableChannel: { Read: []string{"bob"}, Write: []string{"bob"}, }, }, channels: []params.Channel{"development"}, }, { about: "publish on channels without access", acls: map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: { Read: []string{"everyone"}, Write: []string{"everyone"}, }, params.DevelopmentChannel: { Read: []string{"alice"}, Write: []string{"alice"}, }, params.StableChannel: { Read: []string{"everyone"}, Write: []string{"everyone"}, }, }, channels: []params.Channel{"development"}, expectError: true, }, { about: "publish on several channels without access to all", acls: map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: {}, params.DevelopmentChannel: { Read: []string{"bob"}, Write: []string{"bob"}, }, params.StableChannel: { Read: []string{"alice"}, Write: []string{"alice"}, }, }, channels: []params.Channel{"development", "stable"}, expectError: true, }} func (s *APISuite) TestPublishAuthorization(c *gc.C) { s.discharge = dischargeForUser("bob") for i, test := range publishAuthorizationTests { c.Logf("test %d: %v", i, test.about) id := newResolvedURL(fmt.Sprintf("cs:~who/precise/wordpress%d-0", i), -1) err := s.store.AddCharmWithArchive(id, storetesting.NewCharm(nil)) c.Assert(err, gc.IsNil) for ch, acl := range test.acls { err := s.store.SetPerms(&id.URL, string(ch)+".read", acl.Read...) c.Assert(err, gc.IsNil) err = s.store.SetPerms(&id.URL, string(ch)+".write", acl.Write...) c.Assert(err, gc.IsNil) } if test.expectError { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Method: "PUT", URL: storeURL(id.URL.Path() + "/publish"), Do: bakeryDo(nil), JSONBody: params.PublishRequest{ Channels: test.channels, }, ExpectStatus: http.StatusUnauthorized, ExpectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "bob"`, }, }) continue } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Method: "PUT", URL: storeURL(id.URL.Path() + "/publish"), Do: bakeryDo(nil), JSONBody: params.PublishRequest{ Channels: test.channels, }, }) // Check that the entity really has been published to all the given channels. for _, ch := range test.channels { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(fmt.Sprintf("%s/meta/id-revision?channel=%s", mongodoc.BaseURL(&id.URL).Path(), ch)), Do: bakeryDo(nil), ExpectBody: params.IdRevisionResponse{ Revision: 0, }, }) } } } func (s *APISuite) TestPublishSuccess(c *gc.C) { s.discharge = dischargeForUser("bob") // Publish an entity to all channels (don't use publish endpoint // 'cos that's what we're trying to test). id0 := newResolvedURL("cs:~bob/precise/wordpress-0", -1) err := s.store.AddCharmWithArchive(id0, storetesting.NewCharm(nil)) c.Assert(err, gc.IsNil) err = s.store.Publish(id0, params.DevelopmentChannel, params.StableChannel) c.Assert(err, gc.IsNil) // Add an unpublished entity. err = s.store.AddCharmWithArchive(newResolvedURL("cs:~bob/precise/wordpress-1", -1), storetesting.NewCharm(nil)) c.Assert(err, gc.IsNil) // Publish it to the development channel. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Method: "PUT", URL: storeURL("~bob/precise/wordpress-1/publish"), Do: bakeryDo(nil), JSONBody: params.PublishRequest{ Channels: []params.Channel{params.DevelopmentChannel}, }, }) assertResolvesTo := func(ch params.Channel, rev int) { chanParam := "" if ch != params.NoChannel { chanParam = "?channel=" + string(ch) } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(fmt.Sprintf("~bob/precise/wordpress/meta/id-revision" + chanParam)), Do: bakeryDo(nil), ExpectBody: params.IdRevisionResponse{ Revision: rev, }, }) } assertResolvesTo(params.UnpublishedChannel, 1) assertResolvesTo(params.DevelopmentChannel, 1) assertResolvesTo(params.StableChannel, 0) assertResolvesTo(params.NoChannel, 0) } // publishCharmsAtKnownTimes populates the store with // a range of charms with known time stamps. func (s *APISuite) publishCharmsAtKnownTimes(c *gc.C, charms []publishSpec) { for _, ch := range publishedCharms { id, _ := s.addPublicCharmFromRepo(c, "wordpress", ch.id) t := ch.published().PublishTime err := s.store.UpdateEntity(id, bson.D{{"$set", bson.D{{"uploadtime", t}}}}) c.Assert(err, gc.IsNil) if len(ch.acl) > 0 { err := s.store.SetPerms(&id.URL, "unpublished.read", ch.acl...) c.Assert(err, gc.IsNil) err = s.store.SetPerms(&id.URL, "stable.read", ch.acl...) c.Assert(err, gc.IsNil) } } } var debugPprofTests = []struct { path string match string }{{ path: "debug/pprof/", match: `(?s).*profiles:.*heap.*`, }, { path: "debug/pprof/goroutine?debug=2", match: "(?s)goroutine [0-9]+.*", }, { path: "debug/pprof/cmdline", match: ".+charmstore.+", }} func (s *APISuite) TestDebugPprof(c *gc.C) { for i, test := range debugPprofTests { c.Logf("test %d: %s", i, test.path) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, Header: basicAuthHeader(testUsername, testPassword), URL: storeURL(test.path), }) c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.String())) c.Assert(rec.Body.String(), gc.Matches, test.match) } } func (s *APISuite) TestDebugPprofFailsWithoutAuth(c *gc.C) { for i, test := range debugPprofTests { c.Logf("test %d: %s", i, test.path) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(test.path), ExpectStatus: http.StatusProxyAuthRequired, ExpectBody: dischargeRequiredBody, }) } } func (s *APISuite) TestHash256Laziness(c *gc.C) { // TODO frankban: remove this test after updating entities in the // production db with their SHA256 hash value. Entities are updated by // running the cshash256 command. id, _ := s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~who/precise/wordpress-0", -1)) // Retrieve the SHA256 hash. entity, err := s.store.FindEntity(id, charmstore.FieldSelector("blobhash256")) c.Assert(err, gc.IsNil) c.Assert(entity.BlobHash256, gc.Not(gc.Equals), "") } var urlChannelResolvingEntities = []struct { id *router.ResolvedURL channel params.Channel }{{ id: newResolvedURL("~charmers/precise/wordpress-0", 0), channel: params.StableChannel, }, { id: newResolvedURL("~charmers/precise/wordpress-1", 1), channel: params.DevelopmentChannel, }, { id: newResolvedURL("~charmers/precise/wordpress-2", 2), channel: params.UnpublishedChannel, }, { id: newResolvedURL("~charmers/trusty/mysql-0", 0), channel: params.UnpublishedChannel, }} var urlChannelResolvingTests = []struct { url string channel params.Channel expectURL string expectStatus int expectError params.Error }{{ url: "wordpress", expectURL: "cs:precise/wordpress-0", }, { url: "wordpress", channel: params.StableChannel, expectURL: "cs:precise/wordpress-0", }, { url: "wordpress", channel: params.DevelopmentChannel, expectURL: "cs:precise/wordpress-1", }, { url: "wordpress", channel: params.UnpublishedChannel, expectURL: "cs:precise/wordpress-2", }, { url: "~charmers/precise/wordpress", channel: params.StableChannel, expectURL: "cs:~charmers/precise/wordpress-0", }, { url: "~charmers/precise/wordpress-2", channel: params.StableChannel, expectStatus: http.StatusNotFound, expectError: params.Error{ Message: `cs:~charmers/precise/wordpress-2 not found in stable channel`, Code: params.ErrNotFound, }, }, { url: "mysql", expectStatus: http.StatusNotFound, expectError: params.Error{ Message: `no matching charm or bundle for cs:mysql`, Code: params.ErrNotFound, }, }, { url: "mysql", channel: "unknown", expectStatus: http.StatusBadRequest, expectError: params.Error{ Message: `invalid channel "unknown" specified in request`, Code: params.ErrBadRequest, }, }} func (s *APISuite) TestURLChannelResolving(c *gc.C) { s.discharge = dischargeForUser("charmers") for _, add := range urlChannelResolvingEntities { err := s.store.AddCharmWithArchive(add.id, storetesting.NewCharm(nil)) c.Assert(err, gc.IsNil) if add.channel != params.UnpublishedChannel { err = s.store.Publish(add.id, add.channel) c.Assert(err, gc.IsNil) } } for i, test := range urlChannelResolvingTests { path := test.url + "/meta/any" if test.channel != "" { path += "?channel=" + string(test.channel) } c.Logf("test %d: %v", i, test.url) if test.expectError.Message != "" { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL(path), ExpectStatus: test.expectStatus, ExpectBody: test.expectError, }) } else { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL(path), ExpectBody: params.MetaAnyResponse{ Id: charm.MustParseURL(test.expectURL), }, }) } } } func basicAuthHeader(username, password string) http.Header { // It's a pity we have to jump through this hoop. req := &http.Request{ Header: make(http.Header), } req.SetBasicAuth(username, password) return req.Header } func entityFieldGetter(fieldName string) metaEndpointExpectedValueGetter { return entityGetter(func(entity *mongodoc.Entity) interface{} { field := reflect.ValueOf(entity).Elem().FieldByName(fieldName) if !field.IsValid() { panic(errgo.Newf("entity has no field %q", fieldName)) } return field.Interface() }) } func entityGetter(get func(*mongodoc.Entity) interface{}) metaEndpointExpectedValueGetter { return func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { doc, err := store.FindEntity(url, nil) if err != nil { return nil, errgo.Mask(err) } return get(doc), nil } } func zipGetter(get func(*zip.Reader) interface{}) metaEndpointExpectedValueGetter { return func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { doc, err := store.FindEntity(url, charmstore.FieldSelector("blobname")) if err != nil { return nil, errgo.Mask(err) } blob, size, err := store.BlobStore.Open(doc.BlobName) if err != nil { return nil, errgo.Mask(err) } defer blob.Close() content, err := ioutil.ReadAll(blob) if err != nil { return nil, errgo.Mask(err) } r, err := zip.NewReader(bytes.NewReader(content), size) if err != nil { return nil, errgo.Mask(err) } return get(r), nil } } func entitySizeChecker(c *gc.C, data interface{}) { response := data.(*params.ArchiveSizeResponse) c.Assert(response.Size, gc.Not(gc.Equals), int64(0)) } func (s *APISuite) addLog(c *gc.C, log *mongodoc.Log) { err := s.store.DB.Logs().Insert(log) c.Assert(err, gc.Equals, nil) } func mustMarshalJSON(val interface{}) string { data, err := json.Marshal(val) if err != nil { panic(fmt.Errorf("cannot marshal %#v: %v", val, err)) } return string(data) } func (s *APISuite) TestMacaroon(c *gc.C) { var checkedCaveats []string var mu sync.Mutex var dischargeError error s.discharge = func(cond string, arg string) ([]checkers.Caveat, error) { mu.Lock() defer mu.Unlock() checkedCaveats = append(checkedCaveats, cond+" "+arg) return []checkers.Caveat{checkers.DeclaredCaveat("username", "who")}, dischargeError } rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("macaroon"), Method: "GET", }) c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.String())) var m macaroon.Macaroon err := json.Unmarshal(rec.Body.Bytes(), &m) c.Assert(err, gc.IsNil) c.Assert(m.Location(), gc.Equals, "charmstore") client := httpbakery.NewClient() ms, err := client.DischargeAll(&m) c.Assert(err, gc.IsNil) sort.Strings(checkedCaveats) c.Assert(checkedCaveats, jc.DeepEquals, []string{ "is-authenticated-user ", }) macaroonCookie, err := httpbakery.NewCookie(ms) c.Assert(err, gc.IsNil) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("log"), Do: bakeryDo(nil), Cookies: []*http.Cookie{macaroonCookie}, ExpectStatus: http.StatusUnauthorized, ExpectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "who"`, }, }) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.noMacaroonSrv, URL: storeURL("log"), ExpectStatus: http.StatusUnauthorized, ExpectBody: params.Error{ Message: "authentication failed: missing HTTP auth header", Code: params.ErrUnauthorized, }, }) } func (s *APISuite) TestWhoAmIFailWithNoMacaroon(c *gc.C) { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.noMacaroonSrv, URL: storeURL("whoami"), Do: bakeryDo(nil), ExpectStatus: http.StatusUnauthorized, ExpectBody: params.Error{ Code: params.ErrUnauthorized, Message: "authentication failed: missing HTTP auth header", }, }) } func (s *APISuite) TestWhoAmIReturnsNameAndGroups(c *gc.C) { s.discharge = dischargeForUser("who") s.idM.groups = map[string][]string{ "who": {"foo", "bar"}, } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("whoami"), Do: bakeryDo(nil), ExpectStatus: http.StatusOK, ExpectBody: params.WhoAmIResponse{ User: "who", Groups: []string{"foo", "bar"}, }, }) } var promulgateTests = []struct { about string entities []*mongodoc.Entity baseEntities []*mongodoc.BaseEntity id string useHTTPDo bool method string caveats []checkers.Caveat groups map[string][]string body io.Reader username string password string expectStatus int expectBody interface{} expectEntities []*mongodoc.Entity expectBaseEntities []*mongodoc.BaseEntity expectPromulgate bool expectUser string }{{ about: "unpromulgate base entity", entities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, baseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), }, id: "~charmers/wordpress", body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), username: testUsername, password: testPassword, expectStatus: http.StatusOK, expectEntities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, expectBaseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").Build(), }, expectUser: "admin", }, { about: "promulgate base entity", entities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), }, baseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").Build(), }, id: "~charmers/wordpress", body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), username: testUsername, password: testPassword, expectStatus: http.StatusOK, expectEntities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, expectBaseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithACLs(params.StableChannel, mongodoc.ACL{ Write: []string{v5.PromulgatorsGroup}, }).WithPromulgated(true).Build(), }, expectPromulgate: true, expectUser: "admin", }, { about: "unpromulgate base entity not found", entities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, baseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), }, id: "~charmers/mysql", body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), username: testUsername, password: testPassword, expectStatus: http.StatusNotFound, expectBody: params.Error{ Code: params.ErrNotFound, Message: `no matching charm or bundle for cs:~charmers/mysql`, }, expectEntities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, expectBaseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), }, }, { about: "promulgate base entity not found", entities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), }, baseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").Build(), }, id: "~charmers/mysql", body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), username: testUsername, password: testPassword, expectStatus: http.StatusNotFound, expectBody: params.Error{ Code: params.ErrNotFound, Message: `no matching charm or bundle for cs:~charmers/mysql`, }, expectEntities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), }, expectBaseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").Build(), }, }, { about: "bad method", entities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, baseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), }, id: "~charmers/wordpress", body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), username: testUsername, password: testPassword, method: "POST", expectStatus: http.StatusMethodNotAllowed, expectBody: params.Error{ Code: params.ErrMethodNotAllowed, Message: "POST not allowed", }, expectEntities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, expectBaseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), }, }, { about: "bad JSON", entities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, baseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), }, id: "~charmers/wordpress", body: bytes.NewReader([]byte("tru")), username: testUsername, password: testPassword, expectStatus: http.StatusBadRequest, expectBody: params.Error{ Code: params.ErrBadRequest, Message: "bad request: invalid character ' ' in literal true (expecting 'e')", }, expectEntities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, expectBaseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), }, }, { about: "unpromulgate base entity with macaroon", entities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, baseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), }, id: "~charmers/wordpress", body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), caveats: []checkers.Caveat{ checkers.DeclaredCaveat(v5.UsernameAttr, v5.PromulgatorsGroup), }, expectStatus: http.StatusOK, expectEntities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, expectBaseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").Build(), }, expectUser: v5.PromulgatorsGroup, }, { about: "promulgate base entity with macaroon", entities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), }, baseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").Build(), }, id: "~charmers/wordpress", body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), caveats: []checkers.Caveat{ checkers.DeclaredCaveat(v5.UsernameAttr, v5.PromulgatorsGroup), }, expectStatus: http.StatusOK, expectEntities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, expectBaseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithACLs(params.StableChannel, mongodoc.ACL{ Write: []string{v5.PromulgatorsGroup}, }).WithPromulgated(true).Build(), }, expectPromulgate: true, expectUser: v5.PromulgatorsGroup, }, { about: "promulgate base entity with group macaroon", entities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), }, baseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").Build(), }, id: "~charmers/wordpress", body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), caveats: []checkers.Caveat{ checkers.DeclaredCaveat(v5.UsernameAttr, "bob"), }, groups: map[string][]string{ "bob": {v5.PromulgatorsGroup, "yellow"}, }, expectStatus: http.StatusOK, expectEntities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, expectBaseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithACLs(params.StableChannel, mongodoc.ACL{ Write: []string{v5.PromulgatorsGroup}, }).WithPromulgated(true).Build(), }, expectPromulgate: true, expectUser: "bob", }, { about: "no authorisation", entities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, baseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), }, useHTTPDo: true, id: "~charmers/wordpress", body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), expectStatus: http.StatusProxyAuthRequired, expectBody: dischargeRequiredBody, expectEntities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), }, expectBaseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), }, }, { about: "promulgate base entity with unauthorized user macaroon", entities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), }, baseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").Build(), }, id: "~charmers/wordpress", body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), caveats: []checkers.Caveat{ checkers.DeclaredCaveat(v5.UsernameAttr, "bob"), }, groups: map[string][]string{ "bob": {"yellow"}, }, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Message: `unauthorized: access denied for user "bob"`, Code: params.ErrUnauthorized, }, expectEntities: []*mongodoc.Entity{ storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), }, expectBaseEntities: []*mongodoc.BaseEntity{ storetesting.NewBaseEntity("~charmers/wordpress").Build(), }, }} func (s *APISuite) TestPromulgate(c *gc.C) { for i, test := range promulgateTests { c.Logf("%d. %s\n", i, test.about) _, err := s.store.DB.Entities().RemoveAll(nil) c.Assert(err, gc.IsNil) _, err = s.store.DB.BaseEntities().RemoveAll(nil) c.Assert(err, gc.IsNil) for _, e := range test.entities { err := s.store.DB.Entities().Insert(e) c.Assert(err, gc.IsNil) } for _, e := range test.baseEntities { err := s.store.DB.BaseEntities().Insert(e) c.Assert(err, gc.IsNil) } if test.method == "" { test.method = "PUT" } var calledEntities []audit.Entry s.PatchValue(v5.TestAddAuditCallback, func(e audit.Entry) { calledEntities = append(calledEntities, e) }) client := httpbakery.NewHTTPClient() s.discharge = func(_, _ string) ([]checkers.Caveat, error) { return test.caveats, nil } s.idM.groups = test.groups p := httptesting.JSONCallParams{ Handler: s.srv, // TODO avoid using channel=unpublished here URL: storeURL(test.id + "/promulgate?channel=unpublished"), Method: test.method, Body: test.body, Header: http.Header{"Content-Type": {"application/json"}}, Username: test.username, Password: test.password, ExpectStatus: test.expectStatus, ExpectBody: test.expectBody, } if !test.useHTTPDo { p.Do = bakeryDo(client) } httptesting.AssertJSONCall(c, p) n, err := s.store.DB.Entities().Count() c.Assert(err, gc.IsNil) c.Assert(n, gc.Equals, len(test.expectEntities)) for _, e := range test.expectEntities { storetesting.AssertEntity(c, s.store.DB.Entities(), e) } n, err = s.store.DB.BaseEntities().Count() c.Assert(err, gc.IsNil) c.Assert(n, gc.Equals, len(test.expectBaseEntities)) for _, e := range test.expectBaseEntities { storetesting.AssertBaseEntity(c, s.store.DB.BaseEntities(), e) } if test.expectStatus == http.StatusOK { ref := charm.MustParseURL(test.id) ref.Series = "trusty" ref.Revision = 0 e := audit.Entry{ User: test.expectUser, Op: audit.OpUnpromulgate, Entity: ref, } if test.expectPromulgate { e.Op = audit.OpPromulgate } c.Assert(calledEntities, jc.DeepEquals, []audit.Entry{e}) } else { c.Assert(len(calledEntities), gc.Equals, 0) } calledEntities = nil } } func (s *APISuite) TestEndpointRequiringBaseEntityWithPromulgatedId(c *gc.C) { // Add a promulgated charm. url := newResolvedURL("~charmers/precise/wordpress-23", 23) s.addPublicCharmFromRepo(c, "wordpress", url) // Unpromulgate the base entity err := s.store.SetPromulgated(url, false) c.Assert(err, gc.IsNil) // Check that we can still enquire about the promulgation status // of the entity when using its promulgated URL. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("precise/wordpress-23/meta/promulgated"), ExpectBody: params.PromulgatedResponse{ Promulgated: false, }, }) } func (s *APISuite) TestTooManyConcurrentRequests(c *gc.C) { // We don't have any control over the number of concurrent // connections allowed by s.srv, so we make our own // server here with custom config. config := charmstore.ServerParams{ MaxMgoSessions: 1, } db := s.Session.DB("charmstore") srv, err := charmstore.NewServer(db, nil, config, map[string]charmstore.NewAPIHandlerFunc{"v5": v5.NewAPIHandler}) c.Assert(err, gc.IsNil) defer srv.Close() // Get a store from the pool so that we'll be // at the concurrent request limit. store := srv.Pool().Store() defer store.Close() httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: srv, Do: bakeryDo(nil), URL: storeURL("debug/status"), ExpectStatus: http.StatusServiceUnavailable, ExpectBody: params.Error{ Message: "service unavailable: too many mongo sessions in use", Code: params.ErrServiceUnavailable, }, }) } // dischargeRequiredBody returns a httptesting.BodyAsserter that checks // that the response body contains a discharge required error holding a macaroon // with a third-party caveat addressed to expectedEntityLocation. var dischargeRequiredBody httptesting.BodyAsserter = func(c *gc.C, body json.RawMessage) { var response httpbakery.Error err := json.Unmarshal(body, &response) c.Assert(err, gc.IsNil) c.Assert(response.Code, gc.Equals, httpbakery.ErrDischargeRequired) c.Assert(response.Message, gc.Equals, "verification failed: no macaroon cookies in request") c.Assert(response.Info.Macaroon, gc.NotNil) for _, cav := range response.Info.Macaroon.Caveats() { if cav.Location != "" { return } } c.Fatalf("no third party caveat found in response macaroon; caveats %#v", response.Info.Macaroon.Caveats()) } func (s *APISuite) TestSetAuthCookie(c *gc.C) { m, err := macaroon.New([]byte("key"), "id", "location") c.Assert(err, jc.ErrorIsNil) ms := macaroon.Slice{m} rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("set-auth-cookie"), Method: "PUT", Header: http.Header{"Origin": []string{"https://1.2.3.4"}}, JSONBody: params.SetAuthCookie{ Macaroons: ms, }, }) // The request is successful. c.Assert(rec.Code, gc.Equals, http.StatusOK) // The response includes the CORS header for the specific request. c.Assert(rec.Header().Get("Access-Control-Allow-Origin"), gc.Equals, "https://1.2.3.4") // The response includes the macaroons cookie. resp := http.Response{Header: rec.Header()} cookies := resp.Cookies() c.Assert(len(cookies), gc.Equals, 1) expected, err := httpbakery.NewCookie(ms) expected.Path = "/" c.Assert(err, jc.ErrorIsNil) c.Assert(cookies[0].Value, gc.Equals, expected.Value) } func (s *APISuite) TestSetAuthCookieBodyError(c *gc.C) { m, err := macaroon.New([]byte("key"), "id", "location") c.Assert(err, jc.ErrorIsNil) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("set-auth-cookie"), Method: "PUT", JSONBody: macaroon.Slice{m}, ExpectStatus: http.StatusInternalServerError, ExpectBody: params.Error{ Message: "cannot unmarshal macaroons: json: cannot unmarshal array into Go value of type params.SetAuthCookie", }, }) } func (s *APISuite) TestSetAuthCookieMethodError(c *gc.C) { m, err := macaroon.New([]byte("key"), "id", "location") c.Assert(err, jc.ErrorIsNil) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("set-auth-cookie"), Method: "POST", JSONBody: macaroon.Slice{m}, ExpectStatus: http.StatusMethodNotAllowed, ExpectBody: params.Error{ Code: params.ErrMethodNotAllowed, Message: "POST not allowed", }, }) } func (s *APISuite) TestLogout(c *gc.C) { m, err := macaroon.New([]byte("key"), "id", "location") c.Assert(err, jc.ErrorIsNil) ms := macaroon.Slice{m} rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("logout"), Method: "GET", Cookies: []*http.Cookie{{ Name: "macaroon-1234567890", Value: "test value", }, { Name: "test cookie", Value: "test value also", }}, JSONBody: params.SetAuthCookie{ Macaroons: ms, }, }) // The response includes the macaroons cookie. resp := http.Response{Header: rec.Header()} cookies := resp.Cookies() c.Assert(len(cookies), gc.Equals, 1) c.Assert(cookies[0], jc.DeepEquals, &http.Cookie{ Name: "macaroon-1234567890", Value: "", Path: "/", MaxAge: -1, Raw: "macaroon-1234567890=; Path=/; Max-Age=0", }) } // entityACLs returns the ACLs that apply to the entity with the given URL. func entityACLs(store *charmstore.Store, url *router.ResolvedURL) (mongodoc.ACL, error) { e, err := store.FindEntity(url, nil) if err != nil { return mongodoc.ACL{}, err } be, err := store.FindBaseEntity(&url.URL, nil) if err != nil { return mongodoc.ACL{}, err } ch := params.UnpublishedChannel if e.Stable { ch = params.StableChannel } else if e.Development { ch = params.DevelopmentChannel } return be.ChannelACLs[ch], nil } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/log_test.go������������������������0000664�0001750�0001750�00000035204�12672604603�025715� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( "bytes" "encoding/json" "net/http" "time" jc "github.com/juju/testing/checkers" "github.com/juju/testing/httptesting" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" ) type logSuite struct { commonSuite } var _ = gc.Suite(&logSuite{}) func (s *logSuite) SetUpSuite(c *gc.C) { s.enableIdentity = true s.commonSuite.SetUpSuite(c) } var logResponses = map[string]*params.LogResponse{ "info1": { Data: rawMessage("info data 1"), Level: params.InfoLevel, Type: params.IngestionType, URLs: nil, }, "error1": { Data: rawMessage("error data 1"), Level: params.ErrorLevel, Type: params.IngestionType, URLs: nil, }, "info2": { Data: rawMessage("info data 2"), Level: params.InfoLevel, Type: params.IngestionType, URLs: []*charm.URL{ charm.MustParseURL("precise/django"), charm.MustParseURL("django"), charm.MustParseURL("rails"), }, }, "warning1": { Data: rawMessage("warning data 1"), Level: params.WarningLevel, Type: params.IngestionType, URLs: nil, }, "error2": { Data: rawMessage("error data 2"), Level: params.ErrorLevel, Type: params.IngestionType, URLs: []*charm.URL{ charm.MustParseURL("hadoop"), }, }, "info3": { Data: rawMessage("info data 3"), Level: params.InfoLevel, Type: params.IngestionType, URLs: []*charm.URL{ charm.MustParseURL("trusty/django"), charm.MustParseURL("django"), charm.MustParseURL("utopic/hadoop"), charm.MustParseURL("hadoop"), }, }, "error3": { Data: rawMessage("error data 3"), Level: params.ErrorLevel, Type: params.IngestionType, URLs: []*charm.URL{ charm.MustParseURL("utopic/hadoop"), charm.MustParseURL("hadoop"), charm.MustParseURL("precise/django"), charm.MustParseURL("django"), }, }, "stats": { Data: rawMessage("statistics info data"), Level: params.InfoLevel, Type: params.LegacyStatisticsType, URLs: nil, }, } var getLogsTests = []struct { about string querystring string expectBody []*params.LogResponse }{{ about: "retrieve logs", expectBody: []*params.LogResponse{ logResponses["stats"], logResponses["error3"], logResponses["info3"], logResponses["error2"], logResponses["warning1"], logResponses["info2"], logResponses["error1"], logResponses["info1"], }, }, { about: "use limit", querystring: "?limit=2", expectBody: []*params.LogResponse{ logResponses["stats"], logResponses["error3"], }, }, { about: "use offset", querystring: "?skip=3", expectBody: []*params.LogResponse{ logResponses["error2"], logResponses["warning1"], logResponses["info2"], logResponses["error1"], logResponses["info1"], }, }, { about: "zero offset", querystring: "?skip=0", expectBody: []*params.LogResponse{ logResponses["stats"], logResponses["error3"], logResponses["info3"], logResponses["error2"], logResponses["warning1"], logResponses["info2"], logResponses["error1"], logResponses["info1"], }, }, { about: "use both limit and offset", querystring: "?limit=3&skip=1", expectBody: []*params.LogResponse{ logResponses["error3"], logResponses["info3"], logResponses["error2"], }, }, { about: "filter by level", querystring: "?level=info", expectBody: []*params.LogResponse{ logResponses["stats"], logResponses["info3"], logResponses["info2"], logResponses["info1"], }, }, { about: "filter by type", querystring: "?type=ingestion", expectBody: []*params.LogResponse{ logResponses["error3"], logResponses["info3"], logResponses["error2"], logResponses["warning1"], logResponses["info2"], logResponses["error1"], logResponses["info1"], }, }, { about: "filter by level with a limit", querystring: "?level=error&limit=2", expectBody: []*params.LogResponse{ logResponses["error3"], logResponses["error2"], }, }, { about: "filter by id", querystring: "?id=precise/django", expectBody: []*params.LogResponse{ logResponses["error3"], logResponses["info2"], }, }, { about: "multiple query", querystring: "?id=utopic/hadoop&limit=1&level=error", expectBody: []*params.LogResponse{ logResponses["error3"], }, }, { about: "empty response offset", querystring: "?id=utopic/hadoop&skip=10", }, { about: "empty response id not found", querystring: "?id=utopic/mysql", }, { about: "empty response level", querystring: "?id=trusty/rails&level=error", }, { about: "filter by type - legacyStatistics", querystring: "?type=legacyStatistics", expectBody: []*params.LogResponse{ logResponses["stats"], }, }} var paramsLogLevels = map[params.LogLevel]mongodoc.LogLevel{ params.InfoLevel: mongodoc.InfoLevel, params.WarningLevel: mongodoc.WarningLevel, params.ErrorLevel: mongodoc.ErrorLevel, } // paramsLogTypes maps API params log types to internal mongodoc ones. var paramsLogTypes = map[params.LogType]mongodoc.LogType{ params.IngestionType: mongodoc.IngestionType, params.LegacyStatisticsType: mongodoc.LegacyStatisticsType, } func (s *logSuite) TestGetLogs(c *gc.C) { // Add logs to the database. beforeAdding := time.Now().Add(-time.Second) for _, key := range []string{"info1", "error1", "info2", "warning1", "error2", "info3", "error3", "stats"} { resp := logResponses[key] err := s.store.AddLog(&resp.Data, paramsLogLevels[resp.Level], paramsLogTypes[resp.Type], resp.URLs) c.Assert(err, gc.IsNil) } afterAdding := time.Now().Add(time.Second) // Run the tests. for i, test := range getLogsTests { c.Logf("test %d: %s", i, test.about) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("log" + test.querystring), Username: testUsername, Password: testPassword, }) // Ensure the response is what we expect. c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "application/json") // Decode the response. var logs []*params.LogResponse decoder := json.NewDecoder(rec.Body) err := decoder.Decode(&logs) c.Assert(err, gc.IsNil) // Check and then reset the response time so that the whole body // can be more easily compared later. for _, log := range logs { c.Assert(log.Time, jc.TimeBetween(beforeAdding, afterAdding)) log.Time = time.Time{} } // Ensure the response includes the expected logs. c.Assert(logs, jc.DeepEquals, test.expectBody) } } func rawMessage(msg string) json.RawMessage { message, err := json.Marshal(msg) if err != nil { panic(err) } return json.RawMessage(message) } var getLogsErrorsTests = []struct { about string querystring string expectStatus int expectMessage string expectCode params.ErrorCode }{{ about: "invalid limit (negative number)", querystring: "?limit=-100", expectStatus: http.StatusBadRequest, expectMessage: "invalid limit value: value must be >= 1", expectCode: params.ErrBadRequest, }, { about: "invalid limit (zero value)", querystring: "?limit=0", expectStatus: http.StatusBadRequest, expectMessage: "invalid limit value: value must be >= 1", expectCode: params.ErrBadRequest, }, { about: "invalid limit (not a number)", querystring: "?limit=foo", expectStatus: http.StatusBadRequest, expectMessage: "invalid limit value: value must be a number", expectCode: params.ErrBadRequest, }, { about: "invalid offset (negative number)", querystring: "?skip=-100", expectStatus: http.StatusBadRequest, expectMessage: "invalid skip value: value must be >= 0", expectCode: params.ErrBadRequest, }, { about: "invalid offset (not a number)", querystring: "?skip=bar", expectStatus: http.StatusBadRequest, expectMessage: "invalid skip value: value must be a number", expectCode: params.ErrBadRequest, }, { about: "invalid id", querystring: "?id=no-such:reference", expectStatus: http.StatusBadRequest, expectMessage: `invalid id value: charm or bundle URL has invalid schema: "no-such:reference"`, expectCode: params.ErrBadRequest, }, { about: "invalid log level", querystring: "?level=bar", expectStatus: http.StatusBadRequest, expectMessage: "invalid log level value", expectCode: params.ErrBadRequest, }, { about: "invalid log type", querystring: "?type=no-such", expectStatus: http.StatusBadRequest, expectMessage: "invalid log type value", expectCode: params.ErrBadRequest, }} func (s *logSuite) TestGetLogsErrors(c *gc.C) { for i, test := range getLogsErrorsTests { c.Logf("test %d: %s", i, test.about) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("log" + test.querystring), Username: testUsername, Password: testPassword, ExpectStatus: test.expectStatus, ExpectBody: params.Error{ Message: test.expectMessage, Code: test.expectCode, }, }) } } func (s *logSuite) TestGetLogsErrorInvalidLog(c *gc.C) { // Add a non-parsable log message to the db directly. err := s.store.DB.Logs().Insert(mongodoc.Log{ Data: []byte("!"), Level: mongodoc.InfoLevel, Type: mongodoc.IngestionType, Time: time.Now(), }) c.Assert(err, gc.IsNil) // The log is just ignored. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("log"), Username: testUsername, Password: testPassword, ExpectStatus: http.StatusOK, ExpectBody: []params.LogResponse{}, }) } func (s *logSuite) TestPostLogs(c *gc.C) { // Prepare the request body. body := makeByteLogs(rawMessage("info data"), params.InfoLevel, params.IngestionType, []*charm.URL{ charm.MustParseURL("trusty/django"), charm.MustParseURL("utopic/rails"), }) // Send the request. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("log"), Method: "POST", Username: testUsername, Password: testPassword, Header: http.Header{ "Content-Type": {"application/json"}, }, Body: bytes.NewReader(body), ExpectStatus: http.StatusOK, }) // Ensure the log message has been added to the database. var doc mongodoc.Log err := s.store.DB.Logs().Find(nil).One(&doc) c.Assert(err, gc.IsNil) c.Assert(string(doc.Data), gc.Equals, `"info data"`) c.Assert(doc.Level, gc.Equals, mongodoc.InfoLevel) c.Assert(doc.Type, gc.Equals, mongodoc.IngestionType) c.Assert(doc.URLs, jc.DeepEquals, []*charm.URL{ charm.MustParseURL("trusty/django"), charm.MustParseURL("django"), charm.MustParseURL("utopic/rails"), charm.MustParseURL("rails"), }) } func (s *logSuite) TestPostLogsMultipleEntries(c *gc.C) { // Prepare the request body. infoData := rawMessage("info data") warningData := rawMessage("warning data") logs := []params.Log{{ Data: &infoData, Level: params.InfoLevel, Type: params.IngestionType, }, { Data: &warningData, Level: params.WarningLevel, Type: params.IngestionType, }} body, err := json.Marshal(logs) c.Assert(err, gc.IsNil) // Send the request. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("log"), Method: "POST", Username: testUsername, Password: testPassword, Header: http.Header{ "Content-Type": {"application/json"}, }, Body: bytes.NewReader(body), ExpectStatus: http.StatusOK, }) // Ensure the log messages has been added to the database. var docs []mongodoc.Log err = s.store.DB.Logs().Find(nil).Sort("id").All(&docs) c.Assert(err, gc.IsNil) c.Assert(docs, gc.HasLen, 2) c.Assert(string(docs[0].Data), gc.Equals, string(infoData)) c.Assert(docs[0].Level, gc.Equals, mongodoc.InfoLevel) c.Assert(string(docs[1].Data), gc.Equals, string(warningData)) c.Assert(docs[1].Level, gc.Equals, mongodoc.WarningLevel) } var postLogsErrorsTests = []struct { about string contentType string body []byte expectStatus int expectMessage string expectCode params.ErrorCode }{{ about: "invalid content type", contentType: "application/zip", expectStatus: http.StatusBadRequest, expectMessage: `unexpected Content-Type "application/zip"; expected 'application/json'`, expectCode: params.ErrBadRequest, }, { about: "invalid body", body: []byte("!"), expectStatus: http.StatusBadRequest, expectMessage: "cannot unmarshal body: invalid character '!' looking for beginning of value", expectCode: params.ErrBadRequest, }, { about: "invalid log level", body: makeByteLogs(rawMessage("message"), params.LogLevel(42), params.IngestionType, nil), expectStatus: http.StatusBadRequest, expectMessage: "invalid log level", expectCode: params.ErrBadRequest, }, { about: "invalid log type", body: makeByteLogs(rawMessage("message"), params.WarningLevel, params.LogType(42), nil), expectStatus: http.StatusBadRequest, expectMessage: "invalid log type", expectCode: params.ErrBadRequest, }} func (s *logSuite) TestPostLogsErrors(c *gc.C) { url := storeURL("log") for i, test := range postLogsErrorsTests { c.Logf("test %d: %s", i, test.about) if test.contentType == "" { test.contentType = "application/json" } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: url, Method: "POST", Header: http.Header{ "Content-Type": {test.contentType}, }, Body: bytes.NewReader(test.body), Username: testUsername, Password: testPassword, ExpectStatus: test.expectStatus, ExpectBody: params.Error{ Message: test.expectMessage, Code: test.expectCode, }, }) } } func (s *logSuite) TestGetLogsUnauthorizedError(c *gc.C) { s.AssertEndpointAuth(c, httptesting.JSONCallParams{ URL: storeURL("log"), ExpectStatus: http.StatusOK, ExpectBody: []params.LogResponse{}, }) } func (s *logSuite) TestPostLogsUnauthorizedError(c *gc.C) { // Add a non-parsable log message to the db. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.noMacaroonSrv, URL: storeURL("log"), Method: "POST", Header: http.Header{ "Content-Type": {"application/json"}, }, ExpectStatus: http.StatusUnauthorized, ExpectBody: params.Error{ Message: "authentication failed: missing HTTP auth header", Code: params.ErrUnauthorized, }, }) } func makeByteLogs(data json.RawMessage, logLevel params.LogLevel, logType params.LogType, urls []*charm.URL) []byte { logs := []params.Log{{ Data: &data, Level: logLevel, Type: logType, URLs: urls, }} b, err := json.Marshal(logs) if err != nil { panic(err) } return b } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/api.go�����������������������������0000664�0001750�0001750�00000146372�12672604603�024657� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( "archive/zip" "bytes" "encoding/json" "io/ioutil" "net/http" "net/url" "strconv" "strings" "time" "github.com/juju/httprequest" "github.com/juju/idmclient" "github.com/juju/loggo" "github.com/juju/mempool" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/juju/charmstore.v5-unstable/audit" "gopkg.in/juju/charmstore.v5-unstable/internal/agent" "gopkg.in/juju/charmstore.v5-unstable/internal/cache" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" "gopkg.in/juju/charmstore.v5-unstable/internal/entitycache" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" "gopkg.in/juju/charmstore.v5-unstable/internal/router" ) var logger = loggo.GetLogger("charmstore.internal.v5") // reqHandlerPool holds a cache of ReqHandlers to save // on allocation time. When a handler is done with, // it is put back into the pool. var reqHandlerPool = mempool.Pool{ New: func() interface{} { return newReqHandler() }, } type Handler struct { // Pool holds the store pool that the handler was created // with. Pool *charmstore.Pool config charmstore.ServerParams locator bakery.PublicKeyLocator identityClient *idmclient.Client rootPath string // searchCache is a cache of search results keyed on the query // parameters of the search. It should only be used for searches // from unauthenticated users. searchCache *cache.Cache } // ReqHandler holds the context for a single HTTP request. // It uses an independent mgo session from the handler // used by other requests. type ReqHandler struct { // Router holds the router that the ReqHandler will use // to route HTTP requests. This is usually set by // Handler.NewReqHandler to the result of RouterHandlers. Router *router.Router // Handler holds the Handler that the ReqHandler // is derived from. Handler *Handler // Store holds the charmstore Store instance // for the request, associated with the channel specified // in the request. Store *StoreWithChannel // auth holds the results of any authorization that // has been done on this request. auth authorization // cache holds the per-request entity cache. Cache *entitycache.Cache } const ( DelegatableMacaroonExpiry = time.Minute reqHandlerCacheSize = 50 ) func New(pool *charmstore.Pool, config charmstore.ServerParams, rootPath string) *Handler { h := &Handler{ Pool: pool, config: config, rootPath: rootPath, searchCache: cache.New(config.SearchCacheMaxAge), locator: config.PublicKeyLocator, identityClient: idmclient.New(idmclient.NewParams{ BaseURL: config.IdentityAPIURL, Client: agent.NewClient(config.AgentUsername, config.AgentKey), }), } return h } // Close closes the Handler. func (h *Handler) Close() { } var ( RequiredEntityFields = charmstore.FieldSelector( "baseurl", "user", "name", "revision", "series", "promulgated-revision", "promulgated-url", "development", "stable", ) RequiredBaseEntityFields = charmstore.FieldSelector( "user", "name", "channelacls", "channelentities", "promulgated", ) ) // StoreWithChannel associates a Store with a channel that will be used // to resolve any channel-ambiguous requests. type StoreWithChannel struct { *charmstore.Store Channel params.Channel } func (s *StoreWithChannel) FindBestEntity(url *charm.URL, fields map[string]int) (*mongodoc.Entity, error) { return s.Store.FindBestEntity(url, s.Channel, fields) } func (s *StoreWithChannel) FindBaseEntity(url *charm.URL, fields map[string]int) (*mongodoc.BaseEntity, error) { return s.Store.FindBaseEntity(url, fields) } // ValidChannels holds the set of all allowed channels // that can be passed as a "?channel=" parameter. var ValidChannels = map[params.Channel]bool{ params.UnpublishedChannel: true, params.DevelopmentChannel: true, params.StableChannel: true, } // NewReqHandler returns an instance of a *ReqHandler // suitable for handling the given HTTP request. After use, the ReqHandler.Close // method should be called to close it. // // If no handlers are available, it returns an error with // a charmstore.ErrTooManySessions cause. func (h *Handler) NewReqHandler(req *http.Request) (*ReqHandler, error) { req.ParseForm() // Validate all the values for channel, even though // most endpoints will only ever use the first one. // PUT to an archive is the notable exception. for _, ch := range req.Form["channel"] { if !ValidChannels[params.Channel(ch)] { return nil, badRequestf(nil, "invalid channel %q specified in request", ch) } } store, err := h.Pool.RequestStore() if err != nil { if errgo.Cause(err) == charmstore.ErrTooManySessions { return nil, errgo.WithCausef(err, params.ErrServiceUnavailable, "") } return nil, errgo.Mask(err) } rh := reqHandlerPool.Get().(*ReqHandler) rh.Handler = h rh.Store = &StoreWithChannel{ Store: store, Channel: params.Channel(req.Form.Get("channel")), } rh.Cache = entitycache.New(rh.Store) rh.Cache.AddEntityFields(RequiredEntityFields) rh.Cache.AddBaseEntityFields(RequiredBaseEntityFields) return rh, nil } // RouterHandlers returns router handlers that will route requests to // the given ReqHandler. This is provided so that different API versions // can override selected parts of the handlers to serve their own API // while still using ReqHandler to serve the majority of the API. func RouterHandlers(h *ReqHandler) *router.Handlers { resolveId := h.ResolvedIdHandler authId := h.AuthIdHandler return &router.Handlers{ Global: map[string]http.Handler{ "changes/published": router.HandleJSON(h.serveChangesPublished), "debug": http.HandlerFunc(h.serveDebug), "debug/pprof/": newPprofHandler(h), "debug/status": router.HandleJSON(h.serveDebugStatus), "list": router.HandleJSON(h.serveList), "log": router.HandleErrors(h.serveLog), "logout": http.HandlerFunc(logout), "search": router.HandleJSON(h.serveSearch), "search/interesting": http.HandlerFunc(h.serveSearchInteresting), "set-auth-cookie": router.HandleErrors(h.serveSetAuthCookie), "stats/": router.NotFoundHandler(), "stats/counter/": router.HandleJSON(h.serveStatsCounter), "stats/update": router.HandleErrors(h.serveStatsUpdate), "macaroon": router.HandleJSON(h.serveMacaroon), "delegatable-macaroon": router.HandleJSON(h.serveDelegatableMacaroon), "whoami": router.HandleJSON(h.serveWhoAmI), }, Id: map[string]router.IdHandler{ "archive": h.serveArchive, "archive/": resolveId(authId(h.serveArchiveFile), "blobname", "blobhash"), "diagram.svg": resolveId(authId(h.serveDiagram), "bundledata"), "expand-id": resolveId(authId(h.serveExpandId)), "icon.svg": resolveId(authId(h.serveIcon), "contents", "blobname"), "publish": resolveId(h.servePublish), "promulgate": resolveId(h.serveAdminPromulgate), "readme": resolveId(authId(h.serveReadMe), "contents", "blobname"), "resources": resolveId(authId(h.serveResources)), }, Meta: map[string]router.BulkIncludeHandler{ "archive-size": h.EntityHandler(h.metaArchiveSize, "size"), "archive-upload-time": h.EntityHandler(h.metaArchiveUploadTime, "uploadtime"), "bundle-machine-count": h.EntityHandler(h.metaBundleMachineCount, "bundlemachinecount"), "bundle-metadata": h.EntityHandler(h.metaBundleMetadata, "bundledata"), "bundles-containing": h.EntityHandler(h.metaBundlesContaining), "bundle-unit-count": h.EntityHandler(h.metaBundleUnitCount, "bundleunitcount"), "published": h.EntityHandler(h.metaPublished, "development", "stable"), "charm-actions": h.EntityHandler(h.metaCharmActions, "charmactions"), "charm-config": h.EntityHandler(h.metaCharmConfig, "charmconfig"), "charm-metadata": h.EntityHandler(h.metaCharmMetadata, "charmmeta"), "charm-related": h.EntityHandler(h.metaCharmRelated, "charmprovidedinterfaces", "charmrequiredinterfaces"), "common-info": h.puttableBaseEntityHandler( h.metaCommonInfo, h.putMetaCommonInfo, "commoninfo", ), "common-info/": h.puttableBaseEntityHandler( h.metaCommonInfoWithKey, h.putMetaCommonInfoWithKey, "commoninfo", ), "extra-info": h.puttableEntityHandler( h.metaExtraInfo, h.putMetaExtraInfo, "extrainfo", ), "extra-info/": h.puttableEntityHandler( h.metaExtraInfoWithKey, h.putMetaExtraInfoWithKey, "extrainfo", ), "hash": h.EntityHandler(h.metaHash, "blobhash"), "hash256": h.EntityHandler(h.metaHash256, "blobhash256"), "id": h.EntityHandler(h.metaId, "_id"), "id-name": h.EntityHandler(h.metaIdName, "_id"), "id-user": h.EntityHandler(h.metaIdUser, "_id"), "id-revision": h.EntityHandler(h.metaIdRevision, "_id"), "id-series": h.EntityHandler(h.metaIdSeries, "_id"), "manifest": h.EntityHandler(h.metaManifest, "blobname"), "perm": h.puttableBaseEntityHandler(h.metaPerm, h.putMetaPerm, "channelacls"), "perm/": h.puttableBaseEntityHandler(h.metaPermWithKey, h.putMetaPermWithKey, "channelacls"), "promulgated": h.baseEntityHandler(h.metaPromulgated, "promulgated"), "resources": h.EntityHandler(h.metaResources, "charmmeta"), "revision-info": router.SingleIncludeHandler(h.metaRevisionInfo), "stats": h.EntityHandler(h.metaStats), "supported-series": h.EntityHandler(h.metaSupportedSeries, "supportedseries"), "tags": h.EntityHandler(h.metaTags, "charmmeta", "bundledata"), "terms": h.EntityHandler(h.metaTerms, "charmmeta"), // endpoints not yet implemented: // "color": router.SingleIncludeHandler(h.metaColor), }, } } // newReqHandler returns a new instance of the v4 API handler. // The returned value has nil handler and store fields. func newReqHandler() *ReqHandler { var h ReqHandler h.Router = router.New(RouterHandlers(&h), &h) return &h } // ServeHTTP implements http.Handler by first retrieving a // request-specific instance of ReqHandler and // calling ServeHTTP on that. func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { rh, err := h.NewReqHandler(req) if err != nil { router.WriteError(w, err) return } defer rh.Close() rh.ServeHTTP(w, req) } // ServeHTTP implements http.Handler by calling h.Router.ServeHTTP. func (h *ReqHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { h.Router.ServeHTTP(w, req) } // NewAPIHandler returns a new Handler as an http Handler. // It is defined for the convenience of callers that require a // charmstore.NewAPIHandlerFunc. func NewAPIHandler(pool *charmstore.Pool, config charmstore.ServerParams, rootPath string) charmstore.HTTPCloseHandler { return New(pool, config, rootPath) } // Close closes the ReqHandler. This should always be called when the // ReqHandler is done with. func (h *ReqHandler) Close() { h.Store.Close() h.Cache.Close() h.Reset() reqHandlerPool.Put(h) } // Reset resets the request-specific fields of the ReqHandler // so that it's suitable for putting back into a pool for reuse. func (h *ReqHandler) Reset() { h.Store = nil h.Handler = nil h.Cache = nil h.auth = authorization{} } // ResolveURL implements router.Context.ResolveURL. func (h *ReqHandler) ResolveURL(url *charm.URL) (*router.ResolvedURL, error) { return resolveURL(h.Cache, url) } // ResolveURL implements router.Context.ResolveURLs. func (h *ReqHandler) ResolveURLs(urls []*charm.URL) ([]*router.ResolvedURL, error) { h.Cache.StartFetch(urls) rurls := make([]*router.ResolvedURL, len(urls)) for i, url := range urls { var err error rurls[i], err = resolveURL(h.Cache, url) if err != nil && errgo.Cause(err) != params.ErrNotFound { return nil, err } } return rurls, nil } // WillIncludeMetadata implements router.Context.WillIncludeMetadata. func (h *ReqHandler) WillIncludeMetadata(includes []string) { for _, inc := range includes { // Find what handler will be used for the include // and prime the cache so that it will preemptively fetch // any fields involved. fi, ok := h.Router.MetaHandler(inc).(*router.FieldIncludeHandler) if !ok || len(fi.P.Fields) == 0 { continue } fields := make(map[string]int) for _, f := range fi.P.Fields { fields[f] = 1 } switch fi.P.Key { case entityHandlerKey{}: h.Cache.AddEntityFields(fields) case baseEntityHandlerKey{}: h.Cache.AddBaseEntityFields(fields) } } } // resolveURL implements URL resolving for the ReqHandler. // It's defined as a separate function so it can be more // easily unit-tested. func resolveURL(cache *entitycache.Cache, url *charm.URL) (*router.ResolvedURL, error) { // We've added promulgated-url as a required field, so // we'll always get it from the Entity result. entity, err := cache.Entity(url, nil) if err != nil { return nil, errgo.Mask(err, errgo.Is(params.ErrNotFound)) } rurl := &router.ResolvedURL{ URL: *entity.URL, PromulgatedRevision: -1, } if url.User == "" { rurl.PromulgatedRevision = entity.PromulgatedRevision } // Ensure the base URL is in the cache too, so that // its canonical URL is in the cache, so that when // we come to look up the base URL from the resolved // URL, it will hit the cached base entity. // We don't actually care if it succeeds or fails, so we ignore // the result. cache.BaseEntity(entity.BaseURL, nil) return rurl, nil } type EntityHandlerFunc func(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) type baseEntityHandlerFunc func(entity *mongodoc.BaseEntity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) // EntityHandler returns a Handler that calls f with a *mongodoc.Entity that // contains at least the given fields. It allows only GET requests. func (h *ReqHandler) EntityHandler(f EntityHandlerFunc, fields ...string) router.BulkIncludeHandler { return h.puttableEntityHandler(f, nil, fields...) } type entityHandlerKey struct{} func (h *ReqHandler) puttableEntityHandler(get EntityHandlerFunc, handlePut router.FieldPutFunc, fields ...string) router.BulkIncludeHandler { handleGet := func(doc interface{}, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { edoc := doc.(*mongodoc.Entity) val, err := get(edoc, id, path, flags, req) return val, errgo.Mask(err, errgo.Any) } return router.NewFieldIncludeHandler(router.FieldIncludeHandlerParams{ Key: entityHandlerKey{}, Query: h.entityQuery, Fields: fields, HandleGet: handleGet, HandlePut: handlePut, Update: h.updateEntity, UpdateSearch: h.updateSearch, }) } // baseEntityHandler returns a Handler that calls f with a *mongodoc.Entity that // contains at least the given fields. It allows only GET requests. func (h *ReqHandler) baseEntityHandler(f baseEntityHandlerFunc, fields ...string) router.BulkIncludeHandler { return h.puttableBaseEntityHandler(f, nil, fields...) } type baseEntityHandlerKey struct{} func (h *ReqHandler) puttableBaseEntityHandler(get baseEntityHandlerFunc, handlePut router.FieldPutFunc, fields ...string) router.BulkIncludeHandler { handleGet := func(doc interface{}, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { edoc := doc.(*mongodoc.BaseEntity) val, err := get(edoc, id, path, flags, req) return val, errgo.Mask(err, errgo.Any) } return router.NewFieldIncludeHandler(router.FieldIncludeHandlerParams{ Key: baseEntityHandlerKey{}, Query: h.baseEntityQuery, Fields: fields, HandleGet: handleGet, HandlePut: handlePut, Update: h.updateBaseEntity, UpdateSearch: h.updateSearchBase, }) } func (h *ReqHandler) processEntries(entries []audit.Entry) { for _, e := range entries { h.addAudit(e) } } func (h *ReqHandler) updateBaseEntity(id *router.ResolvedURL, fields map[string]interface{}, entries []audit.Entry) error { if err := h.Store.UpdateBaseEntity(id, entityUpdateOp(fields)); err != nil { return errgo.Notef(err, "cannot update base entity %q", id) } h.processEntries(entries) return nil } func (h *ReqHandler) updateEntity(id *router.ResolvedURL, fields map[string]interface{}, entries []audit.Entry) error { err := h.Store.UpdateEntity(id, entityUpdateOp(fields)) if err != nil { return errgo.Notef(err, "cannot update %q", &id.URL) } err = h.Store.UpdateSearchFields(id, fields) if err != nil { return errgo.Notef(err, "cannot update %q", &id.URL) } h.processEntries(entries) return nil } // entityUpdateOp returns a mongo update operation that // sets the given fields. Any nil fields will be unset. func entityUpdateOp(fields map[string]interface{}) bson.D { setFields := make(bson.D, 0, len(fields)) var unsetFields bson.D for name, val := range fields { if val != nil { setFields = append(setFields, bson.DocElem{name, val}) } else { unsetFields = append(unsetFields, bson.DocElem{name, val}) } } op := make(bson.D, 0, 2) if len(setFields) > 0 { op = append(op, bson.DocElem{"$set", setFields}) } if len(unsetFields) > 0 { op = append(op, bson.DocElem{"$unset", unsetFields}) } return op } func (h *ReqHandler) updateSearch(id *router.ResolvedURL, fields map[string]interface{}) error { return h.Store.UpdateSearch(id) } // updateSearchBase updates the search records for all entities with // the same base URL as the given id. func (h *ReqHandler) updateSearchBase(id *router.ResolvedURL, fields map[string]interface{}) error { baseURL := id.URL baseURL.Series = "" baseURL.Revision = -1 if err := h.Store.UpdateSearchBaseURL(&baseURL); err != nil { return errgo.Mask(err) } return nil } func (h *ReqHandler) baseEntityQuery(id *router.ResolvedURL, fields map[string]int, req *http.Request) (interface{}, error) { val, err := h.Cache.BaseEntity(&id.URL, fields) if errgo.Cause(err) == params.ErrNotFound { return nil, errgo.WithCausef(nil, params.ErrNotFound, "no matching charm or bundle for %s", id) } if err != nil { return nil, errgo.Mask(err) } return val, nil } func (h *ReqHandler) entityQuery(id *router.ResolvedURL, selector map[string]int, req *http.Request) (interface{}, error) { val, err := h.Cache.Entity(&id.URL, selector) if errgo.Cause(err) == params.ErrNotFound { logger.Infof("entity %#v not found: %#v", id, err) return nil, errgo.WithCausef(nil, params.ErrNotFound, "no matching charm or bundle for %s", id) } if err != nil { return nil, errgo.Mask(err) } return val, nil } var errNotImplemented = errgo.Newf("method not implemented") // GET /debug // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-debug func (h *ReqHandler) serveDebug(w http.ResponseWriter, req *http.Request) { router.WriteError(w, errNotImplemented) } // GET id/expand-id // https://docs.google.com/a/canonical.com/document/d/1TgRA7jW_mmXoKH3JiwBbtPvQu7WiM6XMrz1wSrhTMXw/edit#bookmark=id.4xdnvxphb2si func (h *ReqHandler) serveExpandId(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { baseURL := id.PreferredURL() baseURL.Revision = -1 baseURL.Series = "" // baseURL now represents the base URL of the given id; // it will be a promulgated URL iff the original URL was // specified without a user, which will cause EntitiesQuery // to return entities that match appropriately. // Retrieve all the entities with the same base URL. q := h.Store.EntitiesQuery(baseURL).Select(bson.D{{"_id", 1}, {"promulgated-url", 1}}) if id.PromulgatedRevision != -1 { q = q.Sort("-series", "-promulgated-revision") } else { q = q.Sort("-series", "-revision") } var docs []*mongodoc.Entity err := q.All(&docs) if err != nil && errgo.Cause(err) != mgo.ErrNotFound { return errgo.Mask(err) } // Collect all the expanded identifiers for each entity. response := make([]params.ExpandedId, 0, len(docs)) for _, doc := range docs { if err := h.AuthorizeEntity(charmstore.EntityResolvedURL(doc), req); err != nil { continue } url := doc.PreferredURL(id.PromulgatedRevision != -1) response = append(response, params.ExpandedId{Id: url.String()}) } // Write the response in JSON format. return httprequest.WriteJSON(w, http.StatusOK, response) } func badRequestf(underlying error, f string, a ...interface{}) error { err := errgo.WithCausef(underlying, params.ErrBadRequest, f, a...) err.(*errgo.Err).SetLocation(1) return err } // GET id/meta/charm-metadata // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetacharm-metadata func (h *ReqHandler) metaCharmMetadata(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { return entity.CharmMeta, nil } // GET id/meta/bundle-metadata // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetabundle-metadata func (h *ReqHandler) metaBundleMetadata(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { return entity.BundleData, nil } // GET id/meta/bundle-unit-count // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetabundle-unit-count func (h *ReqHandler) metaBundleUnitCount(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { return bundleCount(entity.BundleUnitCount), nil } // GET id/meta/bundle-machine-count // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetabundle-machine-count func (h *ReqHandler) metaBundleMachineCount(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { return bundleCount(entity.BundleMachineCount), nil } func bundleCount(x *int) interface{} { if x == nil { return nil } return params.BundleCount{ Count: *x, } } // GET id/meta/manifest // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetamanifest func (h *ReqHandler) metaManifest(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { r, size, err := h.Store.BlobStore.Open(entity.BlobName) if err != nil { return nil, errgo.Notef(err, "cannot open archive data for %s", id) } defer r.Close() zipReader, err := zip.NewReader(charmstore.ReaderAtSeeker(r), size) if err != nil { return nil, errgo.Notef(err, "cannot read archive data for %s", id) } // Collect the files. manifest := make([]params.ManifestFile, 0, len(zipReader.File)) for _, file := range zipReader.File { fileInfo := file.FileInfo() if fileInfo.IsDir() { continue } manifest = append(manifest, params.ManifestFile{ Name: file.Name, Size: fileInfo.Size(), }) } return manifest, nil } // GET id/meta/charm-actions // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetacharm-actions func (h *ReqHandler) metaCharmActions(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { return entity.CharmActions, nil } // GET id/meta/charm-config // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetacharm-config func (h *ReqHandler) metaCharmConfig(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { return entity.CharmConfig, nil } // GET id/meta/terms // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaterms func (h *ReqHandler) metaTerms(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { if entity.URL.Series == "bundle" { return nil, nil } if entity.CharmMeta == nil || len(entity.CharmMeta.Terms) == 0 { return []string{}, nil } return entity.CharmMeta.Terms, nil } // GET id/meta/color func (h *ReqHandler) metaColor(id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { return nil, errNotImplemented } // GET id/meta/archive-size // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaarchive-size func (h *ReqHandler) metaArchiveSize(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { return ¶ms.ArchiveSizeResponse{ Size: entity.Size, }, nil } // GET id/meta/hash // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetahash func (h *ReqHandler) metaHash(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { return ¶ms.HashResponse{ Sum: entity.BlobHash, }, nil } // GET id/meta/hash256 // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetahash256 func (h *ReqHandler) metaHash256(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { return ¶ms.HashResponse{ Sum: entity.BlobHash256, }, nil } // GET id/meta/tags // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetatags func (h *ReqHandler) metaTags(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { var tags []string switch { case id.URL.Series == "bundle": tags = entity.BundleData.Tags case len(entity.CharmMeta.Tags) > 0: // TODO only return whitelisted tags. tags = entity.CharmMeta.Tags default: tags = entity.CharmMeta.Categories } return params.TagsResponse{ Tags: tags, }, nil } // GET id/meta/stats/ // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetastats func (h *ReqHandler) metaStats(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { // Retrieve the aggregated downloads count for the specific revision. refresh, err := router.ParseBool(flags.Get("refresh")) if err != nil { return charmstore.SearchParams{}, badRequestf(err, "invalid refresh parameter") } counts, countsAllRevisions, err := h.Store.ArchiveDownloadCounts(id.PreferredURL(), refresh) if err != nil { return nil, errgo.Mask(err) } // Return the response. return ¶ms.StatsResponse{ ArchiveDownloadCount: counts.Total, ArchiveDownload: params.StatsCount{ Total: counts.Total, Day: counts.LastDay, Week: counts.LastWeek, Month: counts.LastMonth, }, ArchiveDownloadAllRevisions: params.StatsCount{ Total: countsAllRevisions.Total, Day: countsAllRevisions.LastDay, Week: countsAllRevisions.LastWeek, Month: countsAllRevisions.LastMonth, }, }, nil } // GET id/meta/revision-info // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetarevision-info func (h *ReqHandler) metaRevisionInfo(id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { searchURL := id.PreferredURL() searchURL.Revision = -1 q := h.Store.EntitiesQuery(searchURL) if id.PromulgatedRevision != -1 { q = q.Sort("-promulgated-revision") } else { q = q.Sort("-revision") } var response params.RevisionInfoResponse iter := h.Cache.Iter(q, nil) for iter.Next() { e := iter.Entity() rurl := charmstore.EntityResolvedURL(e) if err := h.AuthorizeEntity(rurl, req); err != nil { // We're not authorized to see the entity, so leave it out. // Note that the only time this will happen is when // the original URL is promulgated and has a development channel, // the charm has changed owners, and the old owner and // the new one have different dev ACLs. It's easiest // and most reliable just to check everything though. continue } if id.PromulgatedRevision != -1 { response.Revisions = append(response.Revisions, rurl.PromulgatedURL()) } else { response.Revisions = append(response.Revisions, &rurl.URL) } } if err := iter.Err(); err != nil { return nil, errgo.Notef(err, "iteration failed") } return &response, nil } // GET id/meta/id-user // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-user func (h *ReqHandler) metaIdUser(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { return params.IdUserResponse{ User: id.PreferredURL().User, }, nil } // GET id/meta/id-series // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-series func (h *ReqHandler) metaIdSeries(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { return params.IdSeriesResponse{ Series: id.PreferredURL().Series, }, nil } // GET id/meta/id // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid func (h *ReqHandler) metaId(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { u := id.PreferredURL() return params.IdResponse{ Id: u, User: u.User, Series: u.Series, Name: u.Name, Revision: u.Revision, }, nil } // GET id/meta/id-name // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-name func (h *ReqHandler) metaIdName(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { return params.IdNameResponse{ Name: id.URL.Name, }, nil } // GET id/meta/id-revision // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-revision func (h *ReqHandler) metaIdRevision(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { return params.IdRevisionResponse{ Revision: id.PreferredURL().Revision, }, nil } // GET id/meta/supported-series // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetasupported-series func (h *ReqHandler) metaSupportedSeries(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { if entity.URL.Series == "bundle" { return nil, nil } return ¶ms.SupportedSeriesResponse{ SupportedSeries: entity.SupportedSeries, }, nil } // GET id/meta/extra-info // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaextra-info func (h *ReqHandler) metaExtraInfo(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { // The extra-info is stored in mongo as simple byte // slices, so convert the values to json.RawMessages // so that the client will see the original JSON. m := make(map[string]*json.RawMessage) for key, val := range entity.ExtraInfo { jmsg := json.RawMessage(val) m[key] = &jmsg } return m, nil } // GET id/meta/extra-info/key // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaextra-infokey func (h *ReqHandler) metaExtraInfoWithKey(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { path = strings.TrimPrefix(path, "/") var data json.RawMessage = entity.ExtraInfo[path] if len(data) == 0 { return nil, nil } return &data, nil } // PUT id/meta/extra-info // https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idmetaextra-info func (h *ReqHandler) putMetaExtraInfo(id *router.ResolvedURL, path string, val *json.RawMessage, updater *router.FieldUpdater, req *http.Request) error { var fields map[string]*json.RawMessage if err := json.Unmarshal(*val, &fields); err != nil { return errgo.Notef(err, "cannot unmarshal extra-info body") } // Check all the fields are OK before adding any fields to be updated. for key := range fields { if err := checkExtraInfoKey(key, "extra-info"); err != nil { return err } } for key, val := range fields { if val == nil { updater.UpdateField("extrainfo."+key, nil, nil) } else { updater.UpdateField("extrainfo."+key, *val, nil) } } return nil } var nullBytes = []byte("null") // PUT id/meta/extra-info/key // https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idmetaextra-infokey func (h *ReqHandler) putMetaExtraInfoWithKey(id *router.ResolvedURL, path string, val *json.RawMessage, updater *router.FieldUpdater, req *http.Request) error { key := strings.TrimPrefix(path, "/") if err := checkExtraInfoKey(key, "extra-info"); err != nil { return err } // If the user puts null, we treat that as if they want to // delete the field. if val == nil || bytes.Equal(*val, nullBytes) { updater.UpdateField("extrainfo."+key, nil, nil) } else { updater.UpdateField("extrainfo."+key, *val, nil) } return nil } // GET id/meta/common-info // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetacommon-info func (h *ReqHandler) metaCommonInfo(entity *mongodoc.BaseEntity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { // The common-info is stored in mongo as simple byte // slices, so convert the values to json.RawMessages // so that the client will see the original JSON. m := make(map[string]*json.RawMessage) for key, val := range entity.CommonInfo { jmsg := json.RawMessage(val) m[key] = &jmsg } return m, nil } // GET id/meta/common-info/key // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetacommon-infokey func (h *ReqHandler) metaCommonInfoWithKey(entity *mongodoc.BaseEntity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { path = strings.TrimPrefix(path, "/") var data json.RawMessage = entity.CommonInfo[path] if len(data) == 0 { return nil, nil } return &data, nil } // PUT id/meta/common-info // https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idmetacommon-info func (h *ReqHandler) putMetaCommonInfo(id *router.ResolvedURL, path string, val *json.RawMessage, updater *router.FieldUpdater, req *http.Request) error { var fields map[string]*json.RawMessage if err := json.Unmarshal(*val, &fields); err != nil { return errgo.Notef(err, "cannot unmarshal common-info body") } // Check all the fields are OK before adding any fields to be updated. for key := range fields { if err := checkExtraInfoKey(key, "common-info"); err != nil { return err } } for key, val := range fields { if val == nil { updater.UpdateField("commoninfo."+key, nil, nil) } else { updater.UpdateField("commoninfo."+key, *val, nil) } } return nil } // PUT id/meta/common-info/key // https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idmetacommon-infokey func (h *ReqHandler) putMetaCommonInfoWithKey(id *router.ResolvedURL, path string, val *json.RawMessage, updater *router.FieldUpdater, req *http.Request) error { key := strings.TrimPrefix(path, "/") if err := checkExtraInfoKey(key, "common-info"); err != nil { return err } // If the user puts null, we treat that as if they want to // delete the field. if val == nil || bytes.Equal(*val, nullBytes) { updater.UpdateField("commoninfo."+key, nil, nil) } else { updater.UpdateField("commoninfo."+key, *val, nil) } return nil } func checkExtraInfoKey(key string, field string) error { if strings.ContainsAny(key, "./$") { return errgo.WithCausef(nil, params.ErrBadRequest, "bad key for "+field) } return nil } // GET id/meta/perm // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaperm func (h *ReqHandler) metaPerm(entity *mongodoc.BaseEntity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { ch, err := h.entityChannel(id) if err != nil { return nil, errgo.Mask(err) } acls := entity.ChannelACLs[ch] return params.PermResponse{ Read: acls.Read, Write: acls.Write, }, nil } // PUT id/meta/perm // https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idmeta func (h *ReqHandler) putMetaPerm(id *router.ResolvedURL, path string, val *json.RawMessage, updater *router.FieldUpdater, req *http.Request) error { var perms params.PermRequest if err := json.Unmarshal(*val, &perms); err != nil { return errgo.Mask(err) } ch, err := h.entityChannel(id) if err != nil { return errgo.Mask(err) } // TODO use only one UpdateField operation? updater.UpdateField(string("channelacls."+ch+".read"), perms.Read, &audit.Entry{ Op: audit.OpSetPerm, Entity: &id.URL, ACL: &audit.ACL{ Read: perms.Read, }, }) updater.UpdateField(string("channelacls."+ch+".write"), perms.Write, &audit.Entry{ Op: audit.OpSetPerm, Entity: &id.URL, ACL: &audit.ACL{ Write: perms.Write, }, }) updater.UpdateSearch() return nil } // GET id/meta/promulgated // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetapromulgated func (h *ReqHandler) metaPromulgated(entity *mongodoc.BaseEntity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { return params.PromulgatedResponse{ Promulgated: bool(entity.Promulgated), }, nil } // GET id/meta/perm/key // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetapermkey func (h *ReqHandler) metaPermWithKey(entity *mongodoc.BaseEntity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { ch, err := h.entityChannel(id) if err != nil { return nil, errgo.Mask(err) } acls := entity.ChannelACLs[ch] switch path { case "/read": return acls.Read, nil case "/write": return acls.Write, nil } return nil, errgo.WithCausef(nil, params.ErrNotFound, "unknown permission") } // PUT id/meta/perm/key // https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idmetapermkey func (h *ReqHandler) putMetaPermWithKey(id *router.ResolvedURL, path string, val *json.RawMessage, updater *router.FieldUpdater, req *http.Request) error { ch, err := h.entityChannel(id) if err != nil { return errgo.Mask(err) } var perms []string if err := json.Unmarshal(*val, &perms); err != nil { return errgo.Mask(err) } switch path { case "/read": updater.UpdateField(string("channelacls."+ch+".read"), perms, &audit.Entry{ Op: audit.OpSetPerm, Entity: &id.URL, ACL: &audit.ACL{ Read: perms, }, }) updater.UpdateSearch() return nil case "/write": updater.UpdateField(string("channelacls."+ch+".write"), perms, &audit.Entry{ Op: audit.OpSetPerm, Entity: &id.URL, ACL: &audit.ACL{ Write: perms, }, }) return nil } return errgo.WithCausef(nil, params.ErrNotFound, "unknown permission") } // GET id/meta/published // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetapublished func (h *ReqHandler) metaPublished(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { baseEntity, err := h.Cache.BaseEntity(entity.URL, charmstore.FieldSelector("channelentities")) if err != nil { return nil, errgo.Mask(err) } info := make([]params.PublishedInfo, 0, 2) if entity.Development { info = append(info, params.PublishedInfo{ Channel: params.DevelopmentChannel, }) } if entity.Stable { info = append(info, params.PublishedInfo{ Channel: params.StableChannel, }) } for i, pinfo := range info { // The entity is current for a channel if any series within // a channel refers to the entity. for _, url := range baseEntity.ChannelEntities[pinfo.Channel] { if *url == *entity.URL { info[i].Current = true } } } return ¶ms.PublishedResponse{ Info: info, }, nil } // GET id/meta/archive-upload-time // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaarchive-upload-time func (h *ReqHandler) metaArchiveUploadTime(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { return ¶ms.ArchiveUploadTimeResponse{ UploadTime: entity.UploadTime.UTC(), }, nil } // GET changes/published[?limit=$count][&from=$fromdate][&to=$todate] // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-changespublished func (h *ReqHandler) serveChangesPublished(_ http.Header, r *http.Request) (interface{}, error) { start, stop, err := parseDateRange(r.Form) if err != nil { return nil, errgo.Mask(err, errgo.Is(params.ErrBadRequest)) } limit := -1 if limitStr := r.Form.Get("limit"); limitStr != "" { limit, err = strconv.Atoi(limitStr) if err != nil || limit <= 0 { return nil, badRequestf(nil, "invalid 'limit' value") } } var tquery bson.D if !start.IsZero() { tquery = make(bson.D, 0, 2) tquery = append(tquery, bson.DocElem{ Name: "$gte", Value: start, }) } if !stop.IsZero() { tquery = append(tquery, bson.DocElem{ Name: "$lte", Value: stop, }) } var findQuery bson.D if len(tquery) > 0 { findQuery = bson.D{{"uploadtime", tquery}} } query := h.Store.DB.Entities(). Find(findQuery). Sort("-uploadtime") iter := h.Cache.Iter(query, charmstore.FieldSelector("uploadtime")) results := []params.Published{} var count int for iter.Next() { entity := iter.Entity() // Ignore entities that aren't readable by the current user. if err := h.AuthorizeEntity(charmstore.EntityResolvedURL(entity), r); err != nil { continue } results = append(results, params.Published{ Id: entity.URL, PublishTime: entity.UploadTime.UTC(), }) count++ if limit > 0 && limit <= count { iter.Close() break } } if err := iter.Err(); err != nil { return nil, errgo.Mask(err) } return results, nil } // GET /macaroon // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-macaroon func (h *ReqHandler) serveMacaroon(_ http.Header, _ *http.Request) (interface{}, error) { // will return a macaroon that will enable access to everything except archives // of charms that require agreement to terms and conditions. return h.newMacaroon([]checkers.Caveat{checkers.DenyCaveat(OpAccessCharmWithTerms)}...) } // GET /delegatable-macaroon // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-delegatable-macaroon func (h *ReqHandler) serveDelegatableMacaroon(_ http.Header, req *http.Request) (interface{}, error) { values, err := url.ParseQuery(req.URL.RawQuery) if err != nil { return nil, errgo.Mask(err) } entityIds := values["id"] // No entity ids, so we provide a macaroon that's good for any entity that the // user can access, as long as that entity doesn't have terms and conditions. if len(entityIds) == 0 { auth, err := h.authorize(req, []string{params.Everyone}, true, nil) if err != nil { return nil, errgo.Mask(err, errgo.Any) } if auth.Username == "" { return nil, errgo.WithCausef(nil, params.ErrForbidden, "delegatable macaroon is not obtainable using admin credentials") } // TODO propagate expiry time from macaroons in request. m, err := h.Store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ checkers.DeclaredCaveat(UsernameAttr, auth.Username), checkers.TimeBeforeCaveat(time.Now().Add(DelegatableMacaroonExpiry)), checkers.DenyCaveat(OpAccessCharmWithTerms), }) if err != nil { return nil, errgo.Mask(err) } return m, nil } resolvedURLs := make([]*router.ResolvedURL, len(entityIds)) for i, id := range entityIds { charmRef, err := charm.ParseURL(id) if err != nil { return nil, errgo.WithCausef(err, params.ErrBadRequest, `bad "id" parameter`) } resolvedURL, err := h.ResolveURL(charmRef) if err != nil { return nil, errgo.Mask(err) } resolvedURLs[i] = resolvedURL } // Note that we require authorization even though we allow // anyone to obtain a delegatable macaroon. This means // that we will be able to add the declared caveats to // the returned macaroon. auth, err := h.AuthorizeEntityAndTerms(req, resolvedURLs) if err != nil { return nil, errgo.Mask(err, errgo.Any) } if auth.Username == "" { return nil, errgo.WithCausef(nil, params.ErrForbidden, "delegatable macaroon is not obtainable using admin credentials") } resolvedURLstrings := make([]string, len(resolvedURLs)) for i, resolvedURL := range resolvedURLs { resolvedURLstrings[i] = resolvedURL.URL.String() } // TODO propagate expiry time from macaroons in request. m, err := h.Store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ checkers.DeclaredCaveat(UsernameAttr, auth.Username), checkers.TimeBeforeCaveat(time.Now().Add(DelegatableMacaroonExpiry)), checkers.Caveat{Condition: "is-entity " + strings.Join(resolvedURLstrings, " ")}, }) if err != nil { return nil, errgo.Mask(err) } return m, nil } // GET /whoami // See https://github.com/juju/charmstore/blob/v4/docs/API.md#whoami func (h *ReqHandler) serveWhoAmI(_ http.Header, req *http.Request) (interface{}, error) { auth, err := h.authorize(req, []string{params.Everyone}, true, nil) if err != nil { return nil, errgo.Mask(err, errgo.Any) } if auth.Admin { return nil, errgo.WithCausef(nil, params.ErrForbidden, "admin credentials used") } groups, err := h.GroupsForUser(auth.Username) if err != nil { return nil, errgo.Mask(err, errgo.Any) } return params.WhoAmIResponse{ User: auth.Username, Groups: groups, }, nil } // PUT id/promulgate // See https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idpromulgate func (h *ReqHandler) serveAdminPromulgate(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { if _, err := h.authorize(req, []string{PromulgatorsGroup}, false, id); err != nil { return errgo.Mask(err, errgo.Any) } if req.Method != "PUT" { return errgo.WithCausef(nil, params.ErrMethodNotAllowed, "%s not allowed", req.Method) } data, err := ioutil.ReadAll(req.Body) if err != nil { return errgo.Mask(err) } var promulgate params.PromulgateRequest if err := json.Unmarshal(data, &promulgate); err != nil { return errgo.WithCausef(err, params.ErrBadRequest, "") } if err := h.Store.SetPromulgated(id, promulgate.Promulgated); err != nil { return errgo.Mask(err, errgo.Any) } if promulgate.Promulgated { // Set write permissions to promulgators only, so that // the user cannot just publish newer promulgated // versions of the charm or bundle. Promulgators are // responsible of reviewing and publishing subsequent // revisions of this entity. if err := h.updateBaseEntity(id, map[string]interface{}{ "channelacls.stable.write": []string{PromulgatorsGroup}, }, nil); err != nil { return errgo.Notef(err, "cannot set permissions for %q", id) } } // Build an audit entry for this promulgation. e := audit.Entry{ Entity: &id.URL, } if promulgate.Promulgated { e.Op = audit.OpPromulgate } else { e.Op = audit.OpUnpromulgate } h.addAudit(e) return nil } // validPublishChannels holds the set of channels that can // be the target of a publish request. var validPublishChannels = map[params.Channel]bool{ params.DevelopmentChannel: true, params.StableChannel: true, } // PUT id/publish // See https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idpublish func (h *ReqHandler) servePublish(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { // Perform basic validation of the request. if req.Method != "PUT" { return errgo.WithCausef(nil, params.ErrMethodNotAllowed, "%s not allowed", req.Method) } // Retrieve the requested action from the request body. var publish struct { params.PublishRequest `httprequest:",body"` } if err := httprequest.Unmarshal(httprequest.Params{Request: req}, &publish); err != nil { return badRequestf(err, "cannot unmarshal publish request body") } chans := publish.Channels if len(chans) == 0 { return badRequestf(nil, "no channels provided") } for _, c := range chans { if !validPublishChannels[c] { return badRequestf(nil, "cannot publish to %q", c) } } // Retrieve the base entity so that we can check permissions. baseEntity, err := h.Cache.BaseEntity(&id.URL, charmstore.FieldSelector("channelacls")) if err != nil { return errgo.Mask(err, errgo.Is(params.ErrNotFound)) } // Authorize the operation. Users must have write permissions on the ACLs // on the channel being published to. for _, c := range chans { if _, err := h.authorize(req, baseEntity.ChannelACLs[c].Write, true, id); err != nil { return errgo.Mask(err, errgo.Any) } } // TODO(ericsnow) Actually handle the resources. if len(publish.Resources) > 0 { return errNotImplemented } if err := h.Store.Publish(id, chans...); err != nil { return errgo.NoteMask(err, "cannot publish charm or bundle", errgo.Is(params.ErrNotFound)) } // TODO add publish audit return nil } // serveSetAuthCookie sets the provided macaroon slice as a cookie on the // client. func (h *ReqHandler) serveSetAuthCookie(w http.ResponseWriter, req *http.Request) error { // Allow cross-domain requests for the origin of this specific request so // that cookies can be set even if the request is xhr. w.Header().Set("Access-Control-Allow-Origin", req.Header.Get("Origin")) if req.Method != "PUT" { return errgo.WithCausef(nil, params.ErrMethodNotAllowed, "%s not allowed", req.Method) } var p params.SetAuthCookie decoder := json.NewDecoder(req.Body) if err := decoder.Decode(&p); err != nil { return errgo.Notef(err, "cannot unmarshal macaroons") } cookie, err := httpbakery.NewCookie(p.Macaroons) if err != nil { return errgo.Notef(err, "cannot create macaroons cookie") } cookie.Path = "/" cookie.Name = "macaroon-ui" http.SetCookie(w, cookie) return nil } // ResolvedIdHandler represents a HTTP handler that is invoked // on a resolved entity id. type ResolvedIdHandler func(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error // AuthIdHandler returns a ResolvedIdHandler that uses h.Router.Context.AuthorizeEntity to // check that the client is authorized to perform the HTTP request method before // invoking f. // // Note that it only accesses h.Router.Context when the returned // handler is called. func (h *ReqHandler) AuthIdHandler(f ResolvedIdHandler) ResolvedIdHandler { return func(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { if err := h.Router.Context.AuthorizeEntity(id, req); err != nil { return errgo.Mask(err, errgo.Any) } if err := f(id, w, req); err != nil { return errgo.Mask(err, errgo.Any) } return nil } } // ResolvedIdHandler returns an id handler that uses h.Router.Context.ResolveURL // to resolves any entity ids before calling f with the resolved id. // // Any specified fields will be added to the fields required by the cache, so // they will be pre-fetched by ResolveURL. // // Note that it only accesses h.Router.Context when the returned // handler is called. func (h *ReqHandler) ResolvedIdHandler(f ResolvedIdHandler, cacheFields ...string) router.IdHandler { fields := charmstore.FieldSelector(cacheFields...) return func(id *charm.URL, w http.ResponseWriter, req *http.Request) error { h.Cache.AddEntityFields(fields) rid, err := h.Router.Context.ResolveURL(id) if err != nil { return errgo.Mask(err, errgo.Is(params.ErrNotFound)) } return f(rid, w, req) } } var testAddAuditCallback func(e audit.Entry) // addAudit delegates an audit entry to the store to record an audit log after // it has set correctly the user doing the action. func (h *ReqHandler) addAudit(e audit.Entry) { if h.auth.Username == "" && !h.auth.Admin { panic("No auth set in ReqHandler") } e.User = h.auth.Username if h.auth.Admin && e.User == "" { e.User = "admin" } h.Store.AddAudit(e) if testAddAuditCallback != nil { testAddAuditCallback(e) } } // logout handles the GET /v5/logout endpoint that is used to log out of // charmstore. func logout(w http.ResponseWriter, r *http.Request) { for _, c := range r.Cookies() { if !strings.HasPrefix(c.Name, "macaroon-") { continue } c.Value = "" c.MaxAge = -1 c.Path = "/" http.SetCookie(w, c) } } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/stats.go���������������������������0000664�0001750�0001750�00000011144�12672604603�025230� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Copyright 2012 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( "encoding/json" "net/http" "net/url" "strings" "time" "gopkg.in/errgo.v1" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" ) const dateFormat = "2006-01-02" // parseDateRange parses a date range as specified in an http // request. The returned times will be zero if not specified. func parseDateRange(form url.Values) (start, stop time.Time, err error) { if v := form.Get("start"); v != "" { var err error start, err = time.Parse(dateFormat, v) if err != nil { return time.Time{}, time.Time{}, badRequestf(err, "invalid 'start' value %q", v) } } if v := form.Get("stop"); v != "" { var err error stop, err = time.Parse(dateFormat, v) if err != nil { return time.Time{}, time.Time{}, badRequestf(err, "invalid 'stop' value %q", v) } // Cover all timestamps within the stop day. stop = stop.Add(24*time.Hour - 1*time.Second) } return } // GET stats/counter/key[:key]...?[by=unit]&start=date][&stop=date][&list=1] // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-statscounter func (h *ReqHandler) serveStatsCounter(_ http.Header, r *http.Request) (interface{}, error) { base := strings.TrimPrefix(r.URL.Path, "/") if strings.Index(base, "/") > 0 { return nil, errgo.WithCausef(nil, params.ErrNotFound, "invalid key") } if base == "" { return nil, params.ErrForbidden } var by charmstore.CounterRequestBy switch v := r.Form.Get("by"); v { case "": by = charmstore.ByAll case "day": by = charmstore.ByDay case "week": by = charmstore.ByWeek default: return nil, badRequestf(nil, "invalid 'by' value %q", v) } req := charmstore.CounterRequest{ Key: strings.Split(base, ":"), List: r.Form.Get("list") == "1", By: by, } var err error req.Start, req.Stop, err = parseDateRange(r.Form) if err != nil { return nil, errgo.Mask(err, errgo.Is(params.ErrBadRequest)) } if req.Key[len(req.Key)-1] == "*" { req.Prefix = true req.Key = req.Key[:len(req.Key)-1] if len(req.Key) == 0 { return nil, errgo.WithCausef(nil, params.ErrForbidden, "unknown key") } } entries, err := h.Store.Counters(&req) if err != nil { return nil, errgo.Notef(err, "cannot query counters") } var buf []byte var items []params.Statistic for i := range entries { entry := &entries[i] buf = buf[:0] if req.List { for j := range entry.Key { buf = append(buf, entry.Key[j]...) buf = append(buf, ':') } if entry.Prefix { buf = append(buf, '*') } else { buf = buf[:len(buf)-1] } } stat := params.Statistic{ Key: string(buf), Count: entry.Count, } if !entry.Time.IsZero() { stat.Date = entry.Time.Format("2006-01-02") } items = append(items, stat) } return items, nil } // PUT stats/update // https://github.com/juju/charmstore/blob/v4/docs/API.md#put-statsupdate func (h *ReqHandler) serveStatsUpdate(w http.ResponseWriter, r *http.Request) error { if _, err := h.authorize(r, []string{"statsupdate@cs"}, true, nil); err != nil { return err } if r.Method != "PUT" { return errgo.WithCausef(nil, params.ErrMethodNotAllowed, "%s not allowed", r.Method) } var req params.StatsUpdateRequest if ct := r.Header.Get("Content-Type"); ct != "application/json" { return errgo.WithCausef(nil, params.ErrBadRequest, "unexpected Content-Type %q; expected %q", ct, "application/json") } dec := json.NewDecoder(r.Body) if err := dec.Decode(&req); err != nil { return errgo.Notef(err, "cannot unmarshal body") } errors := make([]error, 0) for _, entry := range req.Entries { rid, err := h.Router.Context.ResolveURL(entry.CharmReference) if err != nil { errors = append(errors, errgo.Notef(err, "cannot find entity for url %s", entry.CharmReference)) continue } logger.Infof("Increase download stats for id: %s at time: %s", rid, entry.Timestamp) if err := h.Store.IncrementDownloadCountsAtTime(rid, entry.Timestamp); err != nil { errors = append(errors, err) continue } } if len(errors) != 0 { logger.Infof("Errors detected during /stats/update processing: %v", errors) if len(errors) > 1 { return errgo.Newf("%s (and %d more errors)", errors[0], len(errors)-1) } return errors[0] } return nil } // StatsEnabled reports whether statistics should be gathered for // the given HTTP request. func StatsEnabled(req *http.Request) bool { // It's fine to parse the form more than once, and it avoids // bugs from not parsing it. req.ParseForm() return req.Form.Get("stats") != "0" } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/relations.go�����������������������0000664�0001750�0001750�00000023757�12672604603�026107� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( "net/http" "net/url" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/mgo.v2/bson" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" "gopkg.in/juju/charmstore.v5-unstable/internal/entitycache" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" "gopkg.in/juju/charmstore.v5-unstable/internal/router" ) // GET id/meta/charm-related[?include=meta[&include=meta…]] // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetacharm-related func (h *ReqHandler) metaCharmRelated(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { if id.URL.Series == "bundle" { return nil, nil } // If the charm does not define any relation we can just return without // hitting the db. if len(entity.CharmProvidedInterfaces)+len(entity.CharmRequiredInterfaces) == 0 { return ¶ms.RelatedResponse{}, nil } q := h.Store.MatchingInterfacesQuery(entity.CharmProvidedInterfaces, entity.CharmRequiredInterfaces) fields := bson.D{ {"_id", 1}, {"supportedseries", 1}, {"charmrequiredinterfaces", 1}, {"charmprovidedinterfaces", 1}, {"promulgated-url", 1}, {"promulgated-revision", 1}, } var entities []*mongodoc.Entity if err := q.Select(fields).Sort("_id").All(&entities); err != nil { return nil, errgo.Notef(err, "cannot retrieve the related charms") } // If no entities are found there is no need for further processing the // results. if len(entities) == 0 { return ¶ms.RelatedResponse{}, nil } // Build the results, by grouping entities based on their relations' roles // and interfaces. includes := flags["include"] requires, err := h.getRelatedCharmsResponse(entity.CharmProvidedInterfaces, entities, func(e *mongodoc.Entity) []string { return e.CharmRequiredInterfaces }, includes, req) if err != nil { return nil, errgo.Notef(err, "cannot retrieve the charm requires") } provides, err := h.getRelatedCharmsResponse(entity.CharmRequiredInterfaces, entities, func(e *mongodoc.Entity) []string { return e.CharmProvidedInterfaces }, includes, req) if err != nil { return nil, errgo.Notef(err, "cannot retrieve the charm provides") } // Return the response. return ¶ms.RelatedResponse{ Requires: requires, Provides: provides, }, nil } // allEntities returns all the entities from the given iterator. It may // return some entities and an error if some were read before the // iterator completed. func allEntities(iter *entitycache.Iter) ([]*mongodoc.Entity, error) { var entities []*mongodoc.Entity for iter.Next() { entities = append(entities, iter.Entity()) } return entities, iter.Err() } type entityRelatedInterfacesGetter func(*mongodoc.Entity) []string // getRelatedCharmsResponse returns a response mapping interfaces to related // charms. For instance: // map[string][]params.MetaAnyResponse{ // "http": []params.MetaAnyResponse{ // {Id: "cs:utopic/django-42", Meta: ...}, // {Id: "cs:trusty/wordpress-47", Meta: ...}, // }, // "memcache": []params.MetaAnyResponse{ // {Id: "cs:utopic/memcached-0", Meta: ...}, // }, // } func (h *ReqHandler) getRelatedCharmsResponse( ifaces []string, entities []*mongodoc.Entity, getInterfaces entityRelatedInterfacesGetter, includes []string, req *http.Request, ) (map[string][]params.EntityResult, error) { results := make(map[string][]params.EntityResult, len(ifaces)) for _, iface := range ifaces { responses, err := h.getRelatedIfaceResponses(iface, entities, getInterfaces, includes, req) if err != nil { return nil, err } if len(responses) > 0 { results[iface] = responses } } return results, nil } func (h *ReqHandler) getRelatedIfaceResponses( iface string, entities []*mongodoc.Entity, getInterfaces entityRelatedInterfacesGetter, includes []string, req *http.Request, ) ([]params.EntityResult, error) { // Build a list of responses including only entities which are related // to the given interface. usesInterface := func(e *mongodoc.Entity) bool { for _, entityIface := range getInterfaces(e) { if entityIface == iface { return true } } return false } resp, err := h.getMetadataForEntities(entities, includes, req, usesInterface) if err != nil { return nil, errgo.Mask(err) } return resp, nil } // GET id/meta/bundles-containing[?include=meta[&include=meta…]][&any-series=1][&any-revision=1][&all-results=1] // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetabundles-containing func (h *ReqHandler) metaBundlesContaining(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { if id.URL.Series == "bundle" { return nil, nil } // Validate the URL query values. anySeries, err := router.ParseBool(flags.Get("any-series")) if err != nil { return nil, badRequestf(err, "invalid value for any-series") } anyRevision, err := router.ParseBool(flags.Get("any-revision")) if err != nil { return nil, badRequestf(err, "invalid value for any-revision") } allResults, err := router.ParseBool(flags.Get("all-results")) if err != nil { return nil, badRequestf(err, "invalid value for all-results") } // Mutate the reference so that it represents a base URL if required. prefURL := id.PreferredURL() searchId := *prefURL if anySeries || anyRevision { searchId.Revision = -1 searchId.Series = "" } // Retrieve the bundles containing the resulting charm id. q := h.Store.DB.Entities().Find(bson.D{{"bundlecharms", &searchId}}) iter := h.Cache.Iter(q, charmstore.FieldSelector("bundlecharms", "promulgated-url")) entities, err := allEntities(iter) if err != nil { return nil, errgo.Notef(err, "cannot retrieve the related bundles") } // Further filter the entities if required, by only including latest // bundle revisions and/or excluding specific charm series or revisions. // Filter entities so it contains only entities that actually // match the desired search criteria. filterEntities(&entities, func(e *mongodoc.Entity) bool { if anySeries == anyRevision { // If neither anySeries or anyRevision are true, then // the search will be exact and therefore e must be // matched. // If both anySeries and anyRevision are true, then // the base entity that we are searching for is exactly // what we want to search for, therefore e must be matched. return true } for _, charmId := range e.BundleCharms { if charmId.Name == prefURL.Name && charmId.User == prefURL.User && (anySeries || charmId.Series == prefURL.Series) && (anyRevision || charmId.Revision == prefURL.Revision) { return true } } return false }) var latest map[charm.URL]int if !allResults { // Include only the latest revision of any bundle. // This is made somewhat tricky by the fact that // each bundle can have two URLs, its canonical // URL (with user) and its promulgated URL. // // We want to maximise the URL revision regardless of // whether the URL is promulgated or not, so we // we build a map holding the latest revision for both // promulgated and non-promulgated revisions // and then include entities that have the latest // revision for either. latest = make(map[charm.URL]int) // updateLatest updates the latest revision for u // without its revision if it's greater than the existing // entry. updateLatest := func(u *charm.URL) { u1 := *u u1.Revision = -1 if rev, ok := latest[u1]; !ok || rev < u.Revision { latest[u1] = u.Revision } } for _, e := range entities { updateLatest(e.URL) if e.PromulgatedURL != nil { updateLatest(e.PromulgatedURL) } } filterEntities(&entities, func(e *mongodoc.Entity) bool { if e.PromulgatedURL != nil { u := *e.PromulgatedURL u.Revision = -1 if latest[u] == e.PromulgatedURL.Revision { return true } } u := *e.URL u.Revision = -1 return latest[u] == e.URL.Revision }) } resp, err := h.getMetadataForEntities(entities, flags["include"], req, nil) if err != nil { return nil, errgo.Mask(err) } return resp, nil } func (h *ReqHandler) getMetadataForEntities(entities []*mongodoc.Entity, includes []string, req *http.Request, includeEntity func(*mongodoc.Entity) bool) ([]params.EntityResult, error) { for _, inc := range includes { if h.Router.MetaHandler(inc) == nil { return nil, errgo.Newf("unrecognized metadata name %q", inc) } } response := make([]params.EntityResult, 0, len(entities)) for _, e := range entities { if includeEntity != nil && !includeEntity(e) { continue } meta, err := h.getMetadataForEntity(e, includes, req) if err == errMetadataUnauthorized { continue } if err != nil { // Unfortunately it is possible to get errors here due to // internal inconsistency, so rather than throwing away // all the search results, we just log the error and move on. logger.Errorf("cannot retrieve metadata for %v: %v", e.PreferredURL(true), err) continue } response = append(response, params.EntityResult{ Id: e.PreferredURL(true), Meta: meta, }) } return response, nil } var errMetadataUnauthorized = errgo.Newf("metadata unauthorized") func (h *ReqHandler) getMetadataForEntity(e *mongodoc.Entity, includes []string, req *http.Request) (map[string]interface{}, error) { rurl := charmstore.EntityResolvedURL(e) // Ignore entities that aren't readable by the current user. if err := h.AuthorizeEntity(rurl, req); err != nil { return nil, errMetadataUnauthorized } return h.Router.GetMetadata(rurl, includes, req) } // filterEntities deletes all entities from *entities for which // the given predicate returns false. func filterEntities(entities *[]*mongodoc.Entity, predicate func(*mongodoc.Entity) bool) { entities1 := *entities j := 0 for _, e := range entities1 { if predicate(e) { entities1[j] = e j++ } } *entities = entities1[0:j] } �����������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/defaulticon.go���������������������0000664�0001750�0001750�00000022174�12672604603�026374� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" // DefaultIcon holds the default charm icon SVG content. const DefaultIcon = defaultIcon const defaultIcon = ` image/svg+xml ` ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/package_test.go��������������������0000664�0001750�0001750�00000000462�12672604603�026525� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( "testing" jujutesting "github.com/juju/testing" ) func TestPackage(t *testing.T) { jujutesting.MgoTestPackage(t, nil) } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/list_test.go�����������������������0000664�0001750�0001750�00000037763�12672604603�026123� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( "bytes" "encoding/json" "net/http" "sort" "strings" "github.com/juju/loggo" jc "github.com/juju/testing/checkers" "github.com/juju/testing/httptesting" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/macaroon.v1" "gopkg.in/juju/charmstore.v5-unstable/internal/router" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" ) type ListSuite struct { commonSuite } var _ = gc.Suite(&ListSuite{}) var exportListTestCharms = map[string]*router.ResolvedURL{ "wordpress": newResolvedURL("cs:~charmers/precise/wordpress-23", 23), "mysql": newResolvedURL("cs:~openstack-charmers/trusty/mysql-7", 7), "varnish": newResolvedURL("cs:~foo/trusty/varnish-1", -1), "riak": newResolvedURL("cs:~charmers/trusty/riak-67", 67), } var exportListTestBundles = map[string]*router.ResolvedURL{ "wordpress-simple": newResolvedURL("cs:~charmers/bundle/wordpress-simple-4", 4), } func (s *ListSuite) SetUpSuite(c *gc.C) { s.enableIdentity = true s.commonSuite.SetUpSuite(c) } func (s *ListSuite) SetUpTest(c *gc.C) { s.commonSuite.SetUpTest(c) s.addCharmsToStore(c) // hide the riak charm err := s.store.SetPerms(charm.MustParseURL("cs:~charmers/riak"), "stable.read", "charmers", "test-user") c.Assert(err, gc.IsNil) } func (s *ListSuite) addCharmsToStore(c *gc.C) { for name, id := range exportListTestCharms { s.addPublicCharm(c, getListCharm(name), id) } for name, id := range exportListTestBundles { s.addPublicBundle(c, getListBundle(name), id, false) } } func getListCharm(name string) *storetesting.Charm { ca := storetesting.Charms.CharmDir(name) meta := ca.Meta() meta.Categories = append(strings.Split(name, "-"), "bar") return storetesting.NewCharm(meta) } func getListBundle(name string) *storetesting.Bundle { ba := storetesting.Charms.BundleDir(name) data := ba.Data() data.Tags = append(strings.Split(name, "-"), "baz") return storetesting.NewBundle(data) } func (s *ListSuite) TestSuccessfulList(c *gc.C) { tests := []struct { about string query string results []*router.ResolvedURL }{{ about: "bare list", query: "", results: []*router.ResolvedURL{ exportTestBundles["wordpress-simple"], exportTestCharms["wordpress"], exportTestCharms["varnish"], exportTestCharms["mysql"], }, }, { about: "name filter list", query: "name=mysql", results: []*router.ResolvedURL{ exportTestCharms["mysql"], }, }, { about: "owner filter list", query: "owner=foo", results: []*router.ResolvedURL{ exportTestCharms["varnish"], }, }, { about: "series filter list", query: "series=trusty", results: []*router.ResolvedURL{ exportTestCharms["varnish"], exportTestCharms["mysql"], }, }, { about: "type filter list", query: "type=bundle", results: []*router.ResolvedURL{ exportTestBundles["wordpress-simple"], }, }, { about: "promulgated", query: "promulgated=1", results: []*router.ResolvedURL{ exportTestBundles["wordpress-simple"], exportTestCharms["wordpress"], exportTestCharms["mysql"], }, }, { about: "not promulgated", query: "promulgated=0", results: []*router.ResolvedURL{ exportTestCharms["varnish"], }, }, { about: "promulgated with owner", query: "promulgated=1&owner=openstack-charmers", results: []*router.ResolvedURL{ exportTestCharms["mysql"], }, }} for i, test := range tests { c.Logf("test %d. %s", i, test.about) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("list?" + test.query), }) var sr params.ListResponse err := json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) c.Assert(sr.Results, gc.HasLen, len(test.results)) c.Logf("results: %s", rec.Body.Bytes()) for i := range test.results { c.Assert(sr.Results[i].Id.String(), gc.Equals, test.results[i].PreferredURL().String(), gc.Commentf("element %d")) } } } func (s *ListSuite) TestMetadataFields(c *gc.C) { tests := []struct { about string query string meta map[string]interface{} }{{ about: "archive-size", query: "name=mysql&include=archive-size", meta: map[string]interface{}{ "archive-size": params.ArchiveSizeResponse{getListCharm("mysql").Size()}, }, }, { about: "bundle-metadata", query: "name=wordpress-simple&type=bundle&include=bundle-metadata", meta: map[string]interface{}{ "bundle-metadata": getListBundle("wordpress-simple").Data(), }, }, { about: "bundle-machine-count", query: "name=wordpress-simple&type=bundle&include=bundle-machine-count", meta: map[string]interface{}{ "bundle-machine-count": params.BundleCount{2}, }, }, { about: "bundle-unit-count", query: "name=wordpress-simple&type=bundle&include=bundle-unit-count", meta: map[string]interface{}{ "bundle-unit-count": params.BundleCount{2}, }, }, { about: "charm-actions", query: "name=wordpress&type=charm&include=charm-actions", meta: map[string]interface{}{ "charm-actions": getListCharm("wordpress").Actions(), }, }, { about: "charm-config", query: "name=wordpress&type=charm&include=charm-config", meta: map[string]interface{}{ "charm-config": getListCharm("wordpress").Config(), }, }, { about: "charm-related", query: "name=wordpress&type=charm&include=charm-related", meta: map[string]interface{}{ "charm-related": params.RelatedResponse{ Provides: map[string][]params.EntityResult{ "mysql": { { Id: exportTestCharms["mysql"].PreferredURL(), }, }, "varnish": { { Id: exportTestCharms["varnish"].PreferredURL(), }, }, }, }, }, }, { about: "multiple values", query: "name=wordpress&type=charm&include=charm-related&include=charm-config", meta: map[string]interface{}{ "charm-related": params.RelatedResponse{ Provides: map[string][]params.EntityResult{ "mysql": { { Id: exportTestCharms["mysql"].PreferredURL(), }, }, "varnish": { { Id: exportTestCharms["varnish"].PreferredURL(), }, }, }, }, "charm-config": getListCharm("wordpress").Config(), }, }} for i, test := range tests { c.Logf("test %d. %s", i, test.about) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("list?" + test.query), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) var sr struct { Results []struct { Meta json.RawMessage } } err := json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) c.Assert(sr.Results, gc.HasLen, 1) c.Assert(string(sr.Results[0].Meta), jc.JSONEquals, test.meta) } } func (s *ListSuite) TestListIncludeError(c *gc.C) { // Perform a list for all charms, including the // manifest, which will try to retrieve all charm // blobs. rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("list?type=charm&include=manifest"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) var resp params.ListResponse err := json.Unmarshal(rec.Body.Bytes(), &resp) // cs:riak will not be found because it is not visible to "everyone". c.Assert(resp.Results, gc.HasLen, len(exportTestCharms)-1) // Now remove one of the blobs. The list should still // work, but only return a single result. entity, err := s.store.FindEntity(newResolvedURL("~charmers/precise/wordpress-23", 23), nil) c.Assert(err, gc.IsNil) err = s.store.BlobStore.Remove(entity.BlobName) c.Assert(err, gc.IsNil) // Now list again - we should get one result less // (and the error will be logged). // Register a logger that so that we can check the logging output. // It will be automatically removed later because IsolatedMgoESSuite // uses LoggingSuite. var tw loggo.TestWriter err = loggo.RegisterWriter("test-log", &tw, loggo.DEBUG) c.Assert(err, gc.IsNil) rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("list?type=charm&include=manifest"), }) c.Assert(rec.Code, gc.Equals, http.StatusOK) resp = params.ListResponse{} err = json.Unmarshal(rec.Body.Bytes(), &resp) // cs:riak will not be found because it is not visible to "everyone". // cs:wordpress will not be found because it has no manifest. c.Assert(resp.Results, gc.HasLen, len(exportTestCharms)-2) c.Assert(tw.Log(), jc.LogMatches, []string{"cannot retrieve metadata for cs:precise/wordpress-23: cannot open archive data for cs:precise/wordpress-23: .*"}) } func (s *ListSuite) TestSortingList(c *gc.C) { tests := []struct { about string query string results []*router.ResolvedURL }{{ about: "name ascending", query: "sort=name", results: []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }, { about: "name descending", query: "sort=-name", results: []*router.ResolvedURL{ exportTestBundles["wordpress-simple"], exportTestCharms["wordpress"], exportTestCharms["varnish"], exportTestCharms["mysql"], }, }, { about: "series ascending", query: "sort=series,name", results: []*router.ResolvedURL{ exportTestBundles["wordpress-simple"], exportTestCharms["wordpress"], exportTestCharms["mysql"], exportTestCharms["varnish"], }, }, { about: "series descending", query: "sort=-series&sort=name", results: []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }, { about: "owner ascending", query: "sort=owner,name", results: []*router.ResolvedURL{ exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], exportTestCharms["varnish"], exportTestCharms["mysql"], }, }, { about: "owner descending", query: "sort=-owner&sort=name", results: []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["varnish"], exportTestCharms["wordpress"], exportTestBundles["wordpress-simple"], }, }} for i, test := range tests { c.Logf("test %d. %s", i, test.about) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("list?" + test.query), }) var sr params.ListResponse err := json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) c.Assert(sr.Results, gc.HasLen, len(test.results), gc.Commentf("expected %#v", test.results)) c.Logf("results: %s", rec.Body.Bytes()) for i := range test.results { c.Assert(sr.Results[i].Id.String(), gc.Equals, test.results[i].PreferredURL().String(), gc.Commentf("element %d")) } } } func (s *ListSuite) TestSortUnsupportedListField(c *gc.C) { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("list?sort=text"), }) var e params.Error err := json.Unmarshal(rec.Body.Bytes(), &e) c.Assert(err, gc.IsNil) c.Assert(e.Code, gc.Equals, params.ErrBadRequest) c.Assert(e.Message, gc.Equals, "invalid sort field: unrecognized sort parameter \"text\"") } func (s *ListSuite) TestGetLatestRevisionOnly(c *gc.C) { id := newResolvedURL("cs:~charmers/precise/wordpress-24", 24) s.addPublicCharm(c, getListCharm("wordpress"), id) testresults := []*router.ResolvedURL{ exportTestBundles["wordpress-simple"], id, exportTestCharms["varnish"], exportTestCharms["mysql"], } rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("list"), }) var sr params.ListResponse err := json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) c.Assert(sr.Results, gc.HasLen, 4, gc.Commentf("expected %#v", testresults)) c.Logf("results: %s", rec.Body.Bytes()) for i := range testresults { c.Assert(sr.Results[i].Id.String(), gc.Equals, testresults[i].PreferredURL().String(), gc.Commentf("element %d")) } testresults = []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["varnish"], id, exportTestBundles["wordpress-simple"], } rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("list?sort=name"), }) err = json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) c.Assert(sr.Results, gc.HasLen, 4, gc.Commentf("expected %#v", testresults)) c.Logf("results: %s", rec.Body.Bytes()) for i := range testresults { c.Assert(sr.Results[i].Id.String(), gc.Equals, testresults[i].PreferredURL().String(), gc.Commentf("element %d")) } } func (s *ListSuite) assertPut(c *gc.C, url string, val interface{}) { body, err := json.Marshal(val) c.Assert(err, gc.IsNil) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(url), Method: "PUT", Header: http.Header{ "Content-Type": {"application/json"}, }, Username: testUsername, Password: testPassword, Body: bytes.NewReader(body), }) c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("headers: %v, body: %s", rec.HeaderMap, rec.Body.String())) c.Assert(rec.Body.String(), gc.HasLen, 0) } func (s *ListSuite) TestListWithAdminCredentials(c *gc.C) { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("list"), Username: testUsername, Password: testPassword, }) c.Assert(rec.Code, gc.Equals, http.StatusOK) expected := []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["wordpress"], exportTestCharms["riak"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], } var sr params.ListResponse err := json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) assertListResultSet(c, sr, expected) } func (s *ListSuite) TestListWithUserMacaroon(c *gc.C) { m, err := s.store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ checkers.DeclaredCaveat("username", "test-user"), }) c.Assert(err, gc.IsNil) macaroonCookie, err := httpbakery.NewCookie(macaroon.Slice{m}) c.Assert(err, gc.IsNil) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("list"), Cookies: []*http.Cookie{macaroonCookie}, }) c.Assert(rec.Code, gc.Equals, http.StatusOK) expected := []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["wordpress"], exportTestCharms["riak"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], } var sr params.ListResponse err = json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) assertListResultSet(c, sr, expected) } func (s *ListSuite) TestSearchWithBadAdminCredentialsAndACookie(c *gc.C) { m, err := s.store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ checkers.DeclaredCaveat("username", "test-user"), }) c.Assert(err, gc.IsNil) macaroonCookie, err := httpbakery.NewCookie(macaroon.Slice{m}) c.Assert(err, gc.IsNil) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("list"), Cookies: []*http.Cookie{macaroonCookie}, Username: testUsername, Password: "bad-password", }) c.Assert(rec.Code, gc.Equals, http.StatusOK) expected := []*router.ResolvedURL{ exportTestCharms["mysql"], exportTestCharms["wordpress"], exportTestCharms["varnish"], exportTestBundles["wordpress-simple"], } var sr params.ListResponse err = json.Unmarshal(rec.Body.Bytes(), &sr) c.Assert(err, gc.IsNil) assertListResultSet(c, sr, expected) } func assertListResultSet(c *gc.C, sr params.ListResponse, expected []*router.ResolvedURL) { sort.Sort(listResultById(sr.Results)) sort.Sort(resolvedURLByPreferredURL(expected)) c.Assert(sr.Results, gc.HasLen, len(expected), gc.Commentf("expected %#v", expected)) for i := range expected { c.Assert(sr.Results[i].Id.String(), gc.Equals, expected[i].PreferredURL().String(), gc.Commentf("element %d")) } } type listResultById []params.EntityResult func (s listResultById) Len() int { return len(s) } func (s listResultById) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s listResultById) Less(i, j int) bool { return s[i].Id.String() < s[j].Id.String() } �������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/auth.go����������������������������0000664�0001750�0001750�00000036576�12672604603�025053� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( "encoding/base64" "net/http" "strings" "time" idmparams "github.com/juju/idmclient/params" "gopkg.in/errgo.v1" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/macaroon.v1" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" "gopkg.in/juju/charmstore.v5-unstable/internal/router" ) const ( PromulgatorsGroup = "charmers" // OpAccessCharmWithTerms indicates an operation of accessing the archive of // a charm that requires agreement to certain terms and conditions. OpAccessCharmWithTerms = "op-get-with-terms" // OpOther indicates all other operations. // This operation should not be added as part of a macaroon caveat. OpOther = "op-other" defaultMacaroonExpiry = 24 * time.Hour ) // authorize checks that the current user is authorized based on the provided // ACL and optional entity. If an authenticated user is required, authorize tries to retrieve the // current user in the following ways: // - by checking that the request's headers HTTP basic auth credentials match // the superuser credentials stored in the API handler; // - by checking that there is a valid macaroon in the request's cookies. // A params.ErrUnauthorized error is returned if superuser credentials fail; // otherwise a macaroon is minted and a httpbakery discharge-required // error is returned holding the macaroon. // // This method also sets h.auth to the returned authorization info. func (h *ReqHandler) authorize(req *http.Request, acl []string, alwaysAuth bool, entityId *router.ResolvedURL) (authorization, error) { logger.Infof( "authorize, auth location %q, acl %q, path: %q, method: %q, entity: %#v", h.Handler.config.IdentityLocation, acl, req.URL.Path, req.Method, entityId) if !alwaysAuth { // No need to authenticate if the ACL is open to everyone. for _, name := range acl { if name == params.Everyone { return authorization{}, nil } } } entities := []*router.ResolvedURL{} if entityId != nil { entities = append(entities, entityId) } auth, verr := h.CheckRequest(req, entities, OpOther) if verr == nil { if err := h.checkACLMembership(auth, acl); err != nil { return authorization{}, errgo.WithCausef(err, params.ErrUnauthorized, "") } h.auth = auth return auth, nil } if _, ok := errgo.Cause(verr).(*bakery.VerificationError); !ok { return authorization{}, errgo.Mask(verr, errgo.Is(params.ErrUnauthorized)) } // Macaroon verification failed: mint a new macaroon. // We need to deny access for opAccessCharmWithTerms operations because they // may require more specific checks that terms and conditions have been // satisfied. m, err := h.newMacaroon(checkers.DenyCaveat(OpAccessCharmWithTerms)) if err != nil { return authorization{}, errgo.Notef(err, "cannot mint macaroon") } return authorization{}, h.newDischargeRequiredError(m, verr, req) } // AuthorizeEntityAndTerms is similar to the authorize method, but // in addition it also checks if the entity meta data specifies // and terms and conditions that the user needs to agree to. If so, // it will require the user to agree to those terms and conditions // by adding a third party caveat addressed to the terms service // requiring the user to have agreements to specified terms. func (h *ReqHandler) AuthorizeEntityAndTerms(req *http.Request, entityIds []*router.ResolvedURL) (authorization, error) { logger.Infof( "authorize entity and terms, auth location %q, terms location %q, path: %q, method: %q, entities: %#v", h.Handler.config.IdentityLocation, h.Handler.config.TermsLocation, req.URL.Path, req.Method, entityIds) if len(entityIds) == 0 { return authorization{}, errgo.WithCausef(nil, params.ErrUnauthorized, "entity id not specified") } public, acls, requiredTerms, err := h.entityAuthInfo(entityIds) if err != nil { return authorization{}, errgo.Mask(err) } // if all entities are open to everyone and non of the entities defines any Terms, then we return nil if public { return authorization{}, nil } if len(requiredTerms) > 0 && h.Handler.config.TermsLocation == "" { return authorization{}, errgo.WithCausef(nil, params.ErrUnauthorized, "charmstore not configured to serve charms with terms and conditions") } operation := OpOther if len(requiredTerms) > 0 { operation = OpAccessCharmWithTerms } auth, verr := h.CheckRequest(req, entityIds, operation) if verr == nil { for _, acl := range acls { if err := h.checkACLMembership(auth, acl); err != nil { return authorization{}, errgo.WithCausef(err, params.ErrUnauthorized, "") } } h.auth = auth return auth, nil } if _, ok := errgo.Cause(verr).(*bakery.VerificationError); !ok { return authorization{}, errgo.Mask(verr, errgo.Is(params.ErrUnauthorized)) } caveats := []checkers.Caveat{} if len(requiredTerms) > 0 { terms := []string{} for term, _ := range requiredTerms { terms = append(terms, term) } resolvedURLstrings := make([]string, len(entityIds)) for i, id := range entityIds { resolvedURLstrings[i] = id.String() } caveats = append(caveats, checkers.Caveat{Condition: "is-entity " + strings.Join(resolvedURLstrings, " ")}, checkers.Caveat{h.Handler.config.TermsLocation, "has-agreed " + strings.Join(terms, " ")}, ) } // Macaroon verification failed: mint a new macaroon. m, err := h.newMacaroon(caveats...) if err != nil { return authorization{}, errgo.Notef(err, "cannot mint macaroon") } return authorization{}, h.newDischargeRequiredError(m, verr, req) } func (h *ReqHandler) newDischargeRequiredError(m *macaroon.Macaroon, verr error, req *http.Request) error { // Request that this macaroon be supplied for all requests // to the whole handler. We use a relative path because // the charm store is conventionally under an external // root path, with other services also under the same // externally visible host name, and we don't want our // cookies to be presnted to those services. cookiePath := "/" if p, err := router.RelativeURLPath(req.RequestURI, "/"); err != nil { // Should never happen, as RequestURI should always be absolute. logger.Infof("cannot make relative URL from %v", req.RequestURI) } else { cookiePath = p } dischargeErr := httpbakery.NewDischargeRequiredErrorForRequest(m, cookiePath, verr, req) dischargeErr.(*httpbakery.Error).Info.CookieNameSuffix = "authn" return dischargeErr } // entityAuthInfo returns authorization on the entities with the given ids. // If public is true, no authorization is required, otherwise acls holds // an entry for each id with the corresponding ACL for each entity, // and requiredTerms holds entries for all required terms. func (h *ReqHandler) entityAuthInfo(entityIds []*router.ResolvedURL) (public bool, acls [][]string, requiredTerms map[string]bool, err error) { acls = make([][]string, len(entityIds)) requiredTerms = make(map[string]bool) public = true for i, entityId := range entityIds { entity, err := h.Cache.Entity(&entityId.URL, charmstore.FieldSelector("charmmeta")) if err != nil { return false, nil, nil, errgo.Mask(err, errgo.Is(params.ErrNotFound)) } acl, err := h.entityACLs(entityId) if err != nil { return false, nil, nil, errgo.Mask(err, errgo.Is(params.ErrNotFound)) } acls[i] = acl.Read if entity.CharmMeta == nil || len(entity.CharmMeta.Terms) == 0 { // No need to authenticate if the ACL is open to everyone. publicCharm := false for _, name := range acls[i] { if name == params.Everyone { publicCharm = true break } } public = public && publicCharm } else { public = false for _, term := range entity.CharmMeta.Terms { requiredTerms[term] = true } } } return public, acls, requiredTerms, nil } // CheckRequest checks for any authorization tokens in the request and // returns any found as an authorization. If no suitable credentials are // found, or an error occurs, then a zero valued authorization is // returned. It also checks any first party caveats. If the entityId is // provided, it will be used to check any "is-entity" first party caveat. // In addition it adds a checker that checks if operation specified by // the operation parameters is allowed. func (h *ReqHandler) CheckRequest(req *http.Request, entityIds []*router.ResolvedURL, operation string) (authorization, error) { user, passwd, err := parseCredentials(req) if err == nil { if user != h.Handler.config.AuthUsername || passwd != h.Handler.config.AuthPassword { return authorization{}, errgo.WithCausef(nil, params.ErrUnauthorized, "invalid user name or password") } return authorization{Admin: true}, nil } bk := h.Store.Bakery if errgo.Cause(err) != errNoCreds || bk == nil || h.Handler.config.IdentityLocation == "" { return authorization{}, errgo.WithCausef(err, params.ErrUnauthorized, "authentication failed") } attrMap, err := httpbakery.CheckRequest(bk, req, nil, checkers.New( checkers.CheckerFunc{ Condition_: "is-entity", Check_: func(_, args string) error { return areAllowedEntities(entityIds, args) }, }, checkers.OperationChecker(operation), )) if err != nil { return authorization{}, errgo.Mask(err, errgo.Any) } return authorization{ Admin: false, Username: attrMap[UsernameAttr], }, nil } // areAllowedEntities checks if all entityIds are in the allowedEntities list (space // separated). func areAllowedEntities(entityIds []*router.ResolvedURL, allowedEntities string) error { allowedEntitiesMap := make(map[string]bool) for _, curl := range strings.Fields(allowedEntities) { allowedEntitiesMap[curl] = true } if len(entityIds) == 0 { return errgo.Newf("operation does not involve any of the allowed entities %v", allowedEntities) } for _, entityId := range entityIds { if allowedEntitiesMap[entityId.URL.String()] { continue } purl := entityId.PromulgatedURL() if purl != nil { if allowedEntitiesMap[purl.String()] { continue } } return errgo.Newf("operation on entity %v not allowed", entityId) } return nil } // AuthorizeEntity checks that the given HTTP request // can access the entity with the given id. func (h *ReqHandler) AuthorizeEntity(id *router.ResolvedURL, req *http.Request) error { acls, err := h.entityACLs(id) if err != nil { return errgo.Mask(err, errgo.Is(params.ErrNotFound)) } return h.authorizeWithPerms(req, acls.Read, acls.Write, id) } func (h *ReqHandler) entityChannel(id *router.ResolvedURL) (params.Channel, error) { if h.Store.Channel != params.NoChannel { return h.Store.Channel, nil } entity, err := h.Cache.Entity(&id.URL, charmstore.FieldSelector("development", "stable")) if err != nil { if errgo.Cause(err) == params.ErrNotFound { return params.NoChannel, errgo.WithCausef(nil, params.ErrNotFound, "entity %q not found", id) } return params.NoChannel, errgo.Notef(err, "cannot retrieve entity %q for authorization", id) } var ch params.Channel switch { case entity.Stable: ch = params.StableChannel case entity.Development: ch = params.DevelopmentChannel default: ch = params.UnpublishedChannel } return ch, nil } // entityACLs calculates the ACLs for the specified entity. If the entity // has been published to the stable channel then the StableChannel ACLs will be // used; if the entity has been published to development, but not stable // then the StableChannel ACLs will be used; otherwise // the unpublished ACLs are used. func (h *ReqHandler) entityACLs(id *router.ResolvedURL) (mongodoc.ACL, error) { ch, err := h.entityChannel(id) if err != nil { return mongodoc.ACL{}, errgo.Mask(err, errgo.Is(params.ErrNotFound)) } baseEntity, err := h.Cache.BaseEntity(&id.URL, charmstore.FieldSelector("channelacls")) if err != nil { return mongodoc.ACL{}, errgo.Notef(err, "cannot retrieve base entity %q for authorization", id) } return baseEntity.ChannelACLs[ch], nil } func (h *ReqHandler) authorizeWithPerms(req *http.Request, read, write []string, entityId *router.ResolvedURL) error { alwaysAuth := false var acl []string switch req.Method { case "DELETE", "PATCH", "POST", "PUT": acl = write // We always require authentication when making changes // to the charm store so that auditing will work, even if the // entity is public. alwaysAuth = true default: acl = read } _, err := h.authorize(req, acl, alwaysAuth, entityId) return err } const UsernameAttr = "username" // authorization conatains authorization information extracted from an HTTP request. // The zero value for a authorization contains no privileges. type authorization struct { Admin bool Username string } // Groups for user fetches the list of groups to which the user belongs. func (h *ReqHandler) GroupsForUser(username string) ([]string, error) { if h.Handler.config.IdentityAPIURL == "" { logger.Debugf("IdentityAPIURL not configured, not retrieving groups for %s", username) return nil, nil } // TODO cache groups for a user groups, err := h.Handler.identityClient.UserGroups(&idmparams.UserGroupsRequest{Username: idmparams.Username(username)}) if err != nil { return nil, errgo.Notef(err, "cannot get groups for %s", username) } return groups, nil } func (h *ReqHandler) checkACLMembership(auth authorization, acl []string) error { if auth.Admin { return nil } if auth.Username == "" { return errgo.New("no username declared") } // First check if access is granted without querying for groups. for _, name := range acl { if name == auth.Username || name == params.Everyone { return nil } } groups, err := h.GroupsForUser(auth.Username) if err != nil { logger.Errorf("cannot get groups for %q: %v", auth.Username, err) return errgo.Newf("access denied for user %q", auth.Username) } for _, name := range acl { for _, g := range groups { if g == name { return nil } } } return errgo.Newf("access denied for user %q", auth.Username) } func (h *ReqHandler) newMacaroon(caveats ...checkers.Caveat) (*macaroon.Macaroon, error) { caveats = append(caveats, checkers.NeedDeclaredCaveat( checkers.Caveat{ Location: h.Handler.config.IdentityLocation, Condition: "is-authenticated-user", }, UsernameAttr, ), checkers.TimeBeforeCaveat(time.Now().Add(defaultMacaroonExpiry)), ) // TODO generate different caveats depending on the requested operation // and whether there's a charm id or not. // Mint an appropriate macaroon and send it back to the client. return h.Store.Bakery.NewMacaroon("", nil, caveats) } var errNoCreds = errgo.New("missing HTTP auth header") // parseCredentials parses the given request and returns the HTTP basic auth // credentials included in its header. func parseCredentials(req *http.Request) (username, password string, err error) { auth := req.Header.Get("Authorization") if auth == "" { return "", "", errNoCreds } parts := strings.Fields(auth) if len(parts) != 2 || parts[0] != "Basic" { return "", "", errgo.New("invalid HTTP auth header") } // Challenge is a base64-encoded "tag:pass" string. // See RFC 2617, Section 2. challenge, err := base64.StdEncoding.DecodeString(parts[1]) if err != nil { return "", "", errgo.New("invalid HTTP auth encoding") } tokens := strings.SplitN(string(challenge), ":", 2) if len(tokens) != 2 { return "", "", errgo.New("invalid HTTP auth contents") } return tokens[0], tokens[1], nil } ����������������������������������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/common_test.go���������������������0000664�0001750�0001750�00000034346�12672604603�026432� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������package v5_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( "bytes" "encoding/json" "io" "net/http" "net/http/httptest" "time" "github.com/juju/loggo" jujutesting "github.com/juju/testing" "github.com/juju/testing/httptesting" "github.com/julienschmidt/httprouter" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/bakerytest" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/macaroon.v1" "gopkg.in/mgo.v2" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" "gopkg.in/juju/charmstore.v5-unstable/internal/router" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" "gopkg.in/juju/charmstore.v5-unstable/internal/v5" ) var mgoLogger = loggo.GetLogger("mgo") func init() { mgo.SetLogger(mgoLog{}) } type mgoLog struct{} func (mgoLog) Output(calldepth int, s string) error { mgoLogger.LogCallf(calldepth+1, loggo.INFO, "%s", s) return nil } type commonSuite struct { jujutesting.IsolatedMgoSuite // srv holds the store HTTP handler. srv *charmstore.Server // srvParams holds the parameters that the // srv handler was started with srvParams charmstore.ServerParams // noMacaroonSrv holds the store HTTP handler // for an instance of the store without identity // enabled. If enableIdentity is false, this is // the same as srv. noMacaroonSrv *charmstore.Server // noMacaroonSrvParams holds the parameters that the // noMacaroonSrv handler was started with noMacaroonSrvParams charmstore.ServerParams // store holds an instance of *charm.Store // that can be used to access the charmstore database // directly. store *charmstore.Store // esSuite is set only when enableES is set to true. esSuite *storetesting.ElasticSearchSuite // discharge holds the function that will be used // to check third party caveats by the mock // discharger. This will be ignored if enableIdentity was // not true before commonSuite.SetUpTest is invoked. // // It may be set by tests to influence the behavior of the // discharger. discharge func(cav, arg string) ([]checkers.Caveat, error) discharger *bakerytest.Discharger idM *idM idMServer *httptest.Server dischargeTerms func(cav, arg string) ([]checkers.Caveat, error) termsDischarger *bakerytest.Discharger enableTerms bool // The following fields may be set before // SetUpSuite is invoked on commonSuite // and influences how the suite sets itself up. // enableIdentity holds whether the charmstore server // will be started with a configured identity service. enableIdentity bool // enableES holds whether the charmstore server will be // started with Elastic Search enabled. enableES bool // maxMgoSessions specifies the value that will be given // to config.MaxMgoSessions when calling charmstore.NewServer. maxMgoSessions int } func (s *commonSuite) SetUpSuite(c *gc.C) { s.IsolatedMgoSuite.SetUpSuite(c) if s.enableES { s.esSuite = new(storetesting.ElasticSearchSuite) s.esSuite.SetUpSuite(c) } } func (s *commonSuite) TearDownSuite(c *gc.C) { if s.esSuite != nil { s.esSuite.TearDownSuite(c) } } func (s *commonSuite) SetUpTest(c *gc.C) { s.IsolatedMgoSuite.SetUpTest(c) if s.esSuite != nil { s.esSuite.SetUpTest(c) } if s.enableIdentity { s.idM = newIdM() s.idMServer = httptest.NewServer(s.idM) } s.startServer(c) } func (s *commonSuite) TearDownTest(c *gc.C) { s.store.Pool().Close() s.store.Close() s.srv.Close() s.noMacaroonSrv.Close() if s.esSuite != nil { s.esSuite.TearDownTest(c) } if s.discharger != nil { s.discharger.Close() s.idMServer.Close() } if s.termsDischarger != nil { s.termsDischarger.Close() } s.IsolatedMgoSuite.TearDownTest(c) } // startServer creates a new charmstore server. func (s *commonSuite) startServer(c *gc.C) { config := charmstore.ServerParams{ AuthUsername: testUsername, AuthPassword: testPassword, StatsCacheMaxAge: time.Nanosecond, MaxMgoSessions: s.maxMgoSessions, } keyring := bakery.NewPublicKeyRing() if s.enableIdentity { s.discharge = func(_, _ string) ([]checkers.Caveat, error) { return nil, errgo.New("no discharge") } discharger := bakerytest.NewDischarger(nil, func(_ *http.Request, cond string, arg string) ([]checkers.Caveat, error) { return s.discharge(cond, arg) }) config.IdentityLocation = discharger.Location() config.IdentityAPIURL = s.idMServer.URL pk, err := httpbakery.PublicKeyForLocation(http.DefaultClient, discharger.Location()) c.Assert(err, gc.IsNil) err = keyring.AddPublicKeyForLocation(discharger.Location(), true, pk) c.Assert(err, gc.IsNil) } if s.enableTerms { s.dischargeTerms = func(_, _ string) ([]checkers.Caveat, error) { return nil, errgo.New("no discharge") } termsDischarger := bakerytest.NewDischarger(nil, func(_ *http.Request, cond string, arg string) ([]checkers.Caveat, error) { return s.dischargeTerms(cond, arg) }) config.TermsLocation = termsDischarger.Location() pk, err := httpbakery.PublicKeyForLocation(http.DefaultClient, termsDischarger.Location()) c.Assert(err, gc.IsNil) err = keyring.AddPublicKeyForLocation(termsDischarger.Location(), true, pk) c.Assert(err, gc.IsNil) } config.PublicKeyLocator = keyring var si *charmstore.SearchIndex if s.enableES { si = &charmstore.SearchIndex{ Database: s.esSuite.ES, Index: s.esSuite.TestIndex, } } db := s.Session.DB("charmstore") var err error s.srv, err = charmstore.NewServer(db, si, config, map[string]charmstore.NewAPIHandlerFunc{"v5": v5.NewAPIHandler}) c.Assert(err, gc.IsNil) s.srvParams = config if s.enableIdentity { config.IdentityLocation = "" config.PublicKeyLocator = nil config.IdentityAPIURL = "" s.noMacaroonSrv, err = charmstore.NewServer(db, si, config, map[string]charmstore.NewAPIHandlerFunc{"v5": v5.NewAPIHandler}) c.Assert(err, gc.IsNil) } else { s.noMacaroonSrv = s.srv } s.noMacaroonSrvParams = config s.store = s.srv.Pool().Store() } func (s *commonSuite) addPublicCharmFromRepo(c *gc.C, charmName string, rurl *router.ResolvedURL) (*router.ResolvedURL, charm.Charm) { return s.addPublicCharm(c, storetesting.Charms.CharmDir(charmName), rurl) } func (s *commonSuite) addPublicCharm(c *gc.C, ch charm.Charm, rurl *router.ResolvedURL) (*router.ResolvedURL, charm.Charm) { err := s.store.AddCharmWithArchive(rurl, ch) c.Assert(err, gc.IsNil) s.setPublic(c, rurl) return rurl, ch } func (s *commonSuite) setPublic(c *gc.C, rurl *router.ResolvedURL) { err := s.store.SetPerms(&rurl.URL, "stable.read", params.Everyone) c.Assert(err, gc.IsNil) err = s.store.Publish(rurl, params.StableChannel) c.Assert(err, gc.IsNil) } func (s *commonSuite) addPublicBundleFromRepo(c *gc.C, bundleName string, rurl *router.ResolvedURL, addRequiredCharms bool) (*router.ResolvedURL, charm.Bundle) { return s.addPublicBundle(c, storetesting.Charms.BundleDir(bundleName), rurl, addRequiredCharms) } func (s *commonSuite) addPublicBundle(c *gc.C, bundle charm.Bundle, rurl *router.ResolvedURL, addRequiredCharms bool) (*router.ResolvedURL, charm.Bundle) { if addRequiredCharms { s.addRequiredCharms(c, bundle) } err := s.store.AddBundleWithArchive(rurl, bundle) c.Assert(err, gc.IsNil) s.setPublic(c, rurl) return rurl, bundle } // addCharms adds all the given charms to s.store. The // map key is the id of the charm. func (s *commonSuite) addCharms(c *gc.C, charms map[string]charm.Charm) { for id, ch := range charms { s.addPublicCharm(c, storetesting.NewCharm(ch.Meta()), mustParseResolvedURL(id)) } } // setPerms sets the stable channel read permissions of a set of // entities. The map key is the is the id of each entity; its associated // value is its read ACL. func (s *commonSuite) setPerms(c *gc.C, readACLs map[string][]string) { for url, acl := range readACLs { err := s.store.SetPerms(charm.MustParseURL(url), "stable.read", acl...) c.Assert(err, gc.IsNil) } } // handler returns a request handler that can be // used to invoke private methods. The caller // is responsible for calling Put on the returned handler. func (s *commonSuite) handler(c *gc.C) *v5.ReqHandler { h := v5.New(s.store.Pool(), s.srvParams, "") defer h.Close() rh, err := h.NewReqHandler(new(http.Request)) c.Assert(err, gc.IsNil) // It would be nice if we could call s.AddCleanup here // to call rh.Put when the test has completed, but // unfortunately CleanupSuite.TearDownTest runs // after MgoSuite.TearDownTest, so that's not an option. return rh } func storeURL(path string) string { return "/v5/" + path } func (s *commonSuite) bakeryDoAsUser(c *gc.C, user string) func(*http.Request) (*http.Response, error) { bclient := httpbakery.NewClient() m, err := s.store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ checkers.DeclaredCaveat("username", user), }) c.Assert(err, gc.IsNil) macaroonCookie, err := httpbakery.NewCookie(macaroon.Slice{m}) c.Assert(err, gc.IsNil) return func(req *http.Request) (*http.Response, error) { req.AddCookie(macaroonCookie) if req.Body == nil { return bclient.Do(req) } body := req.Body.(io.ReadSeeker) req.Body = nil return bclient.DoWithBody(req, body) } } // addRequiredCharms adds any charms required by the given // bundle that are not already in the store. func (s *commonSuite) addRequiredCharms(c *gc.C, bundle charm.Bundle) { for _, svc := range bundle.Data().Services { u := charm.MustParseURL(svc.Charm) if _, err := s.store.FindBestEntity(u, params.StableChannel, nil); err == nil { continue } if u.Revision == -1 { u.Revision = 0 } var rurl router.ResolvedURL rurl.URL = *u chDir, err := charm.ReadCharmDir(storetesting.Charms.CharmDirPath(u.Name)) ch := charm.Charm(chDir) if err != nil { // The charm doesn't exist in the local charm repo; make one up. ch = storetesting.NewCharm(nil) } if len(ch.Meta().Series) == 0 && u.Series == "" { rurl.URL.Series = "trusty" } if u.User == "" { rurl.URL.User = "charmers" rurl.PromulgatedRevision = rurl.URL.Revision } else { rurl.PromulgatedRevision = -1 } c.Logf("adding charm %v %d required by bundle to fulfil %v", &rurl.URL, rurl.PromulgatedRevision, svc.Charm) s.addPublicCharm(c, ch, &rurl) } } func (s *commonSuite) assertPut(c *gc.C, url string, val interface{}) { s.assertPut0(c, url, val, false) } func (s *commonSuite) assertPutAsAdmin(c *gc.C, url string, val interface{}) { s.assertPut0(c, url, val, true) } func (s *commonSuite) assertPut0(c *gc.C, url string, val interface{}, asAdmin bool) { body, err := json.Marshal(val) c.Assert(err, gc.IsNil) p := httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(url), Method: "PUT", Do: bakeryDo(nil), Header: http.Header{ "Content-Type": {"application/json"}, }, Body: bytes.NewReader(body), } if asAdmin { p.Username = testUsername p.Password = testPassword } httptesting.AssertJSONCall(c, p) } func (s *commonSuite) assertGet(c *gc.C, url string, expectVal interface{}) { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), URL: storeURL(url), ExpectBody: expectVal, }) } // assertGetIsUnauthorized asserts that a GET to the given URL results // in an ErrUnauthorized response with the given error message. func (s *commonSuite) assertGetIsUnauthorized(c *gc.C, url, expectMessage string) { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), Method: "GET", URL: storeURL(url), ExpectStatus: http.StatusUnauthorized, ExpectBody: params.Error{ Code: params.ErrUnauthorized, Message: expectMessage, }, }) } // assertGetIsUnauthorized asserts that a PUT to the given URL with the // given body value results in an ErrUnauthorized response with the given // error message. func (s *commonSuite) assertPutIsUnauthorized(c *gc.C, url string, val interface{}, expectMessage string) { body, err := json.Marshal(val) c.Assert(err, gc.IsNil) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(url), Method: "PUT", Do: bakeryDo(nil), Header: http.Header{ "Content-Type": {"application/json"}, }, Body: bytes.NewReader(body), ExpectStatus: http.StatusUnauthorized, ExpectBody: params.Error{ Code: params.ErrUnauthorized, Message: expectMessage, }, }) } // doAsUser calls the given function, discharging any authorization // request as the given user name. func (s *commonSuite) doAsUser(user string, f func()) { old := s.discharge s.discharge = dischargeForUser(user) defer func() { s.discharge = old }() f() } func bakeryDo(client *http.Client) func(*http.Request) (*http.Response, error) { if client == nil { client = httpbakery.NewHTTPClient() } bclient := httpbakery.NewClient() bclient.Client = client return func(req *http.Request) (*http.Response, error) { if req.Body == nil { return bclient.Do(req) } body := req.Body.(io.ReadSeeker) req.Body = nil return bclient.DoWithBody(req, body) } } type idM struct { // groups may be set to determine the mapping // from user to groups for that user. groups map[string][]string // body may be set to cause serveGroups to return // an arbitrary HTTP response body. body string // contentType is the contentType to use when body is not "" contentType string // status may be set to indicate the HTTP status code // when body is not nil. status int router *httprouter.Router } func newIdM() *idM { idM := &idM{ groups: make(map[string][]string), router: httprouter.New(), } idM.router.GET("/v1/u/:user/groups", idM.serveGroups) idM.router.GET("/v1/u/:user/idpgroups", idM.serveGroups) return idM } func (idM *idM) ServeHTTP(w http.ResponseWriter, req *http.Request) { idM.router.ServeHTTP(w, req) } func (idM *idM) serveGroups(w http.ResponseWriter, req *http.Request, p httprouter.Params) { if idM.body != "" { if idM.contentType != "" { w.Header().Set("Content-Type", idM.contentType) } if idM.status != 0 { w.WriteHeader(idM.status) } w.Write([]byte(idM.body)) return } u := p.ByName("user") if u == "" { panic("no user") } w.Header().Set("Content-Type", "application/json") enc := json.NewEncoder(w) if err := enc.Encode(idM.groups[u]); err != nil { panic(err) } } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/status.go��������������������������0000664�0001750�0001750�00000011466�12672604603�025424� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( "encoding/json" "fmt" "net/http" "strings" "time" "github.com/juju/utils/debugstatus" "gopkg.in/errgo.v1" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/mgo.v2/bson" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" ) // GET /debug/status // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-debugstatus func (h *ReqHandler) serveDebugStatus(_ http.Header, req *http.Request) (interface{}, error) { h.Store.SetReconnectTimeout(500 * time.Millisecond) return debugstatus.Check( debugstatus.ServerStartTime, debugstatus.Connection(h.Store.DB.Session), debugstatus.MongoCollections(h.Store.DB), h.checkElasticSearch, h.checkEntities, h.checkBaseEntities, h.checkLogs( "ingestion", "Ingestion", mongodoc.IngestionType, params.IngestionStart, params.IngestionComplete, ), h.checkLogs( "legacy_statistics", "Legacy Statistics Load", mongodoc.LegacyStatisticsType, params.LegacyStatisticsImportStart, params.LegacyStatisticsImportComplete, ), ), nil } func (h *ReqHandler) checkElasticSearch() (key string, result debugstatus.CheckResult) { key = "elasticsearch" result.Name = "Elastic search is running" if h.Store.ES == nil || h.Store.ES.Database == nil { result.Value = "Elastic search is not configured" result.Passed = true return key, result } health, err := h.Store.ES.Health() if err != nil { result.Value = "Connection issues to Elastic Search: " + err.Error() return key, result } result.Value = health.String() result.Passed = health.Status == "green" return key, result } func (h *ReqHandler) checkEntities() (key string, result debugstatus.CheckResult) { result.Name = "Entities in charm store" charms, err := h.Store.DB.Entities().Find(bson.D{{"series", bson.D{{"$ne", "bundle"}}}}).Count() if err != nil { result.Value = "Cannot count charms for consistency check: " + err.Error() return "entities", result } bundles, err := h.Store.DB.Entities().Find(bson.D{{"series", "bundle"}}).Count() if err != nil { result.Value = "Cannot count bundles for consistency check: " + err.Error() return "entities", result } promulgated, err := h.Store.DB.Entities().Find(bson.D{{"promulgated-url", bson.D{{"$exists", true}}}}).Count() if err != nil { result.Value = "Cannot count promulgated for consistency check: " + err.Error() return "entities", result } result.Value = fmt.Sprintf("%d charms; %d bundles; %d promulgated", charms, bundles, promulgated) result.Passed = true return "entities", result } func (h *ReqHandler) checkBaseEntities() (key string, result debugstatus.CheckResult) { resultKey := "base_entities" result.Name = "Base entities in charm store" // Retrieve the number of base entities. baseNum, err := h.Store.DB.BaseEntities().Count() if err != nil { result.Value = "Cannot count base entities: " + err.Error() return resultKey, result } // Retrieve the number of entities. num, err := h.Store.DB.Entities().Count() if err != nil { result.Value = "Cannot count entities for consistency check: " + err.Error() return resultKey, result } result.Value = fmt.Sprintf("count: %d", baseNum) result.Passed = num >= baseNum return resultKey, result } func (h *ReqHandler) checkLogs( resultKey, resultName string, logType mongodoc.LogType, startPrefix, endPrefix string, ) debugstatus.CheckerFunc { return func() (key string, result debugstatus.CheckResult) { result.Name = resultName start, end, err := h.findTimesInLogs(logType, startPrefix, endPrefix) if err != nil { result.Value = err.Error() return resultKey, result } result.Value = fmt.Sprintf("started: %s, completed: %s", start.Format(time.RFC3339), end.Format(time.RFC3339)) result.Passed = !(start.IsZero() || end.IsZero()) return resultKey, result } } // findTimesInLogs goes through logs in reverse order finding when the start and // end messages were last added. func (h *ReqHandler) findTimesInLogs(logType mongodoc.LogType, startPrefix, endPrefix string) (start, end time.Time, err error) { var log mongodoc.Log iter := h.Store.DB.Logs(). Find(bson.D{ {"level", mongodoc.InfoLevel}, {"type", logType}, }).Sort("-time", "-id").Iter() for iter.Next(&log) { var msg string if err := json.Unmarshal(log.Data, &msg); err != nil { // an error here probably means the log isn't in the form we are looking for. continue } if start.IsZero() && strings.HasPrefix(msg, startPrefix) { start = log.Time } if end.IsZero() && strings.HasPrefix(msg, endPrefix) { end = log.Time } if !start.IsZero() && !end.IsZero() { break } } if err = iter.Close(); err != nil { return time.Time{}, time.Time{}, errgo.Notef(err, "Cannot query logs") } return } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/bench_test.go����������������������0000664�0001750�0001750�00000010122�12672604603�026203� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������package v5_test import ( "net/http" "net/http/httptest" "github.com/juju/testing/httptesting" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" ) type BenchmarkSuite struct { commonSuite } var _ = gc.Suite(&BenchmarkSuite{}) func (s *BenchmarkSuite) TestBenchmarkMeta(c *gc.C) { s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("~charmers/precise/wordpress-23", 23)) srv := httptest.NewServer(s.srv) defer srv.Close() url := srv.URL + storeURL("wordpress/meta/archive-size") c.Logf("benchmark start") resp, err := http.Get(url) if err != nil { c.Fatalf("get failed: %v", err) } resp.Body.Close() if resp.StatusCode != 200 { c.Fatalf("response failed with code %v", resp.Status) } } func (s *BenchmarkSuite) BenchmarkMeta(c *gc.C) { s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("~charmers/precise/wordpress-23", 23)) srv := httptest.NewServer(s.srv) defer srv.Close() url := srv.URL + storeURL("wordpress/meta/archive-size") c.ResetTimer() for i := 0; i < c.N; i++ { resp, err := http.Get(url) if err != nil { c.Fatalf("get failed: %v", err) } resp.Body.Close() if resp.StatusCode != 200 { c.Fatalf("response failed with code %v", resp.Status) } } } var benchmarkCharmRelatedAddCharms = map[string]charm.Charm{ "0 ~charmers/trusty/wordpress-0": storetesting.NewCharm(storetesting.RelationMeta( "requires cache memcache", "requires nfs mount", )), "1 ~charmers/utopic/memcached-1": storetesting.NewCharm(storetesting.RelationMeta( "provides cache memcache", )), "2 ~charmers/utopic/memcached-2": storetesting.NewCharm(storetesting.RelationMeta( "provides cache memcache", )), "90 ~charmers/utopic/redis-90": storetesting.NewCharm(storetesting.RelationMeta( "provides cache memcache", )), "47 ~charmers/trusty/nfs-47": storetesting.NewCharm(storetesting.RelationMeta( "provides nfs mount", )), "42 ~charmers/precise/nfs-42": storetesting.NewCharm(storetesting.RelationMeta( "provides nfs mount", )), "47 ~charmers/precise/nfs-47": storetesting.NewCharm(storetesting.RelationMeta( "provides nfs mount", )), } var benchmarkCharmRelatedExpectBody = params.RelatedResponse{ Provides: map[string][]params.EntityResult{ "memcache": {{ Id: charm.MustParseURL("utopic/memcached-1"), Meta: map[string]interface{}{ "id-name": params.IdNameResponse{"memcached"}, }, }, { Id: charm.MustParseURL("utopic/memcached-2"), Meta: map[string]interface{}{ "id-name": params.IdNameResponse{"memcached"}, }, }, { Id: charm.MustParseURL("utopic/redis-90"), Meta: map[string]interface{}{ "id-name": params.IdNameResponse{"redis"}, }, }}, "mount": {{ Id: charm.MustParseURL("precise/nfs-42"), Meta: map[string]interface{}{ "id-name": params.IdNameResponse{"nfs"}, }, }, { Id: charm.MustParseURL("precise/nfs-47"), Meta: map[string]interface{}{ "id-name": params.IdNameResponse{"nfs"}, }, }, { Id: charm.MustParseURL("trusty/nfs-47"), Meta: map[string]interface{}{ "id-name": params.IdNameResponse{"nfs"}, }, }}, }, } func (s *BenchmarkSuite) BenchmarkCharmRelated(c *gc.C) { s.addCharms(c, benchmarkCharmRelatedAddCharms) expectBody := benchmarkCharmRelatedExpectBody srv := httptest.NewServer(s.srv) defer srv.Close() url := srv.URL + storeURL("trusty/wordpress-0/meta/charm-related?include=id-name") c.ResetTimer() for i := 0; i < c.N; i++ { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: url, ExpectStatus: http.StatusOK, ExpectBody: expectBody, }) } } func (s *BenchmarkSuite) TestCharmRelated(c *gc.C) { s.addCharms(c, benchmarkCharmRelatedAddCharms) expectBody := benchmarkCharmRelatedExpectBody srv := httptest.NewServer(s.srv) defer srv.Close() url := srv.URL + storeURL("trusty/wordpress-0/meta/charm-related?include=id-name") httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: url, ExpectStatus: http.StatusOK, ExpectBody: expectBody, }) } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/pprof.go���������������������������0000664�0001750�0001750�00000003464�12672604603�025226� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( "net/http" runtimepprof "runtime/pprof" "strings" "text/template" "github.com/juju/httpprof" "gopkg.in/juju/charmstore.v5-unstable/internal/router" ) type pprofHandler struct { mux *http.ServeMux auth authorizer } type authorizer interface { authorize(req *http.Request, acl []string, alwaysAuth bool, entityId *router.ResolvedURL) (authorization, error) } func newPprofHandler(auth authorizer) http.Handler { mux := http.NewServeMux() mux.HandleFunc("/cmdline", pprof.Cmdline) mux.HandleFunc("/profile", pprof.Profile) mux.HandleFunc("/symbol", pprof.Symbol) mux.HandleFunc("/", pprofIndex) return &pprofHandler{ mux: mux, auth: auth, } } func (h *pprofHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { if _, err := h.auth.authorize(req, nil, true, nil); err != nil { router.WriteError(w, err) return } h.mux.ServeHTTP(w, req) } // pprofIndex is copied from pprof.Index with minor modifications // to make it work using a relative path. func pprofIndex(w http.ResponseWriter, req *http.Request) { if req.URL.Path == "/" { profiles := runtimepprof.Profiles() if err := indexTmpl.Execute(w, profiles); err != nil { logger.Errorf("cannot execute pprof template: %v", err) } return } name := strings.TrimPrefix(req.URL.Path, "/") pprof.Handler(name).ServeHTTP(w, req) } var indexTmpl = template.Must(template.New("index").Parse(` pprof

pprof

profiles:

{{range .}} {{end}}
{{.Count}} {{.Name}}

full goroutine stack dump

`)) charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/stats_test.go0000664000175000017500000004023012672604603026265 0ustar marcomarco// Copyright 2012 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" import ( "encoding/json" "net/http" "net/url" "strings" "time" "github.com/juju/testing/httptesting" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" "gopkg.in/juju/charmstore.v5-unstable/internal/v5" ) type StatsSuite struct { commonSuite } var _ = gc.Suite(&StatsSuite{}) func (s *StatsSuite) SetUpTest(c *gc.C) { s.enableIdentity = true s.commonSuite.SetUpTest(c) } func (s *StatsSuite) TestServerStatsStatus(c *gc.C) { tests := []struct { path string status int message string code params.ErrorCode }{{ path: "stats/counter/", status: http.StatusForbidden, message: "forbidden", code: params.ErrForbidden, }, { path: "stats/counter/*", status: http.StatusForbidden, message: "unknown key", code: params.ErrForbidden, }, { path: "stats/counter/any/", status: http.StatusNotFound, message: "invalid key", code: params.ErrNotFound, }, { path: "stats/", status: http.StatusNotFound, message: "not found", code: params.ErrNotFound, }, { path: "stats/any", status: http.StatusNotFound, message: "not found", code: params.ErrNotFound, }, { path: "stats/counter/any?by=fortnight", status: http.StatusBadRequest, message: `invalid 'by' value "fortnight"`, code: params.ErrBadRequest, }, { path: "stats/counter/any?start=tomorrow", status: http.StatusBadRequest, message: `invalid 'start' value "tomorrow": parsing time "tomorrow" as "2006-01-02": cannot parse "tomorrow" as "2006"`, code: params.ErrBadRequest, }, { path: "stats/counter/any?stop=3", status: http.StatusBadRequest, message: `invalid 'stop' value "3": parsing time "3" as "2006-01-02": cannot parse "3" as "2006"`, code: params.ErrBadRequest, }} for i, test := range tests { c.Logf("test %d. %s", i, test.path) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(test.path), ExpectStatus: test.status, ExpectBody: params.Error{ Message: test.message, Code: test.code, }, }) } } func (s *StatsSuite) TestServerStatsUpdate(c *gc.C) { ref := charm.MustParseURL("~charmers/precise/wordpress-23") tests := []struct { path string status int body params.StatsUpdateRequest expectBody map[string]interface{} previousMonth bool }{{ path: "stats/update", status: http.StatusOK, body: params.StatsUpdateRequest{ Entries: []params.StatsUpdateEntry{{ Timestamp: time.Now(), CharmReference: charm.MustParseURL("~charmers/wordpress"), }}}, }, { path: "stats/update", status: http.StatusOK, body: params.StatsUpdateRequest{ Entries: []params.StatsUpdateEntry{{ Timestamp: time.Now(), CharmReference: ref, }}, }, }, { path: "stats/update", status: http.StatusOK, body: params.StatsUpdateRequest{ Entries: []params.StatsUpdateEntry{{ Timestamp: time.Now().AddDate(0, -1, 0), CharmReference: ref, }}, }, previousMonth: true, }} s.addPublicCharm(c, storetesting.Charms.CharmDir("wordpress"), newResolvedURL("~charmers/precise/wordpress-23", 23)) var countsBefore, countsAfter charmstore.AggregatedCounts for i, test := range tests { c.Logf("test %d. %s", i, test.path) var err error _, countsBefore, err = s.store.ArchiveDownloadCounts(ref, true) c.Assert(err, gc.IsNil) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(test.path), Method: "PUT", Username: testUsername, Password: testPassword, JSONBody: test.body, }) c.Assert(rec.Code, gc.Equals, test.status) _, countsAfter, err = s.store.ArchiveDownloadCounts(ref, true) c.Assert(err, gc.IsNil) c.Assert(countsAfter.Total-countsBefore.Total, gc.Equals, int64(1)) if test.previousMonth { c.Assert(countsAfter.LastDay-countsBefore.LastDay, gc.Equals, int64(0)) } else { c.Assert(countsAfter.LastDay-countsBefore.LastDay, gc.Equals, int64(1)) } } } func (s *StatsSuite) TestServerStatsArchiveDownloadOnPromulgatedEntity(c *gc.C) { ref := charm.MustParseURL("~charmers/precise/wordpress-23") path := "/stats/counter/archive-download:*" rurl := newResolvedURL("~charmers/precise/wordpress-23", 23) s.addPublicCharm(c, storetesting.Charms.CharmDir("wordpress"), rurl) s.store.SetPromulgated(rurl, true) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(path), Method: "GET", }) c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Body.String(), gc.Equals, `[{"Count":0}]`) rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL("stats/update"), Method: "PUT", Username: testUsername, Password: testPassword, JSONBody: params.StatsUpdateRequest{ Entries: []params.StatsUpdateEntry{{ Timestamp: time.Now(), CharmReference: ref, }}}, }) c.Assert(rec.Code, gc.Equals, http.StatusOK) rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: storeURL(path), Method: "GET", }) c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Body.String(), gc.Equals, `[{"Count":1}]`) } func (s *StatsSuite) TestServerStatsUpdateErrors(c *gc.C) { ref := charm.MustParseURL("~charmers/precise/wordpress-23") tests := []struct { path string status int body params.StatsUpdateRequest expectMessage string expectCode params.ErrorCode partialUpdate bool }{{ path: "stats/update", status: http.StatusInternalServerError, body: params.StatsUpdateRequest{ Entries: []params.StatsUpdateEntry{{ Timestamp: time.Now(), CharmReference: charm.MustParseURL("~charmers/precise/unknown-23"), }}, }, expectMessage: `cannot find entity for url cs:~charmers/precise/unknown-23: no matching charm or bundle for cs:~charmers/precise/unknown-23`, }, { path: "stats/update", status: http.StatusInternalServerError, body: params.StatsUpdateRequest{ Entries: []params.StatsUpdateEntry{{ Timestamp: time.Now(), CharmReference: charm.MustParseURL("~charmers/precise/unknown-23"), }, { Timestamp: time.Now(), CharmReference: charm.MustParseURL("~charmers/precise/wordpress-23"), }}, }, expectMessage: `cannot find entity for url cs:~charmers/precise/unknown-23: no matching charm or bundle for cs:~charmers/precise/unknown-23`, partialUpdate: true, }} s.addPublicCharm(c, storetesting.Charms.CharmDir("wordpress"), newResolvedURL("~charmers/precise/wordpress-23", 23)) for i, test := range tests { c.Logf("test %d. %s", i, test.path) var countsBefore charmstore.AggregatedCounts if test.partialUpdate { var err error _, countsBefore, err = s.store.ArchiveDownloadCounts(ref, true) c.Assert(err, gc.IsNil) } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL(test.path), Method: "PUT", Username: testUsername, Password: testPassword, JSONBody: test.body, ExpectStatus: test.status, ExpectBody: params.Error{ Message: test.expectMessage, Code: test.expectCode, }, }) if test.partialUpdate { _, countsAfter, err := s.store.ArchiveDownloadCounts(ref, true) c.Assert(err, gc.IsNil) c.Assert(countsAfter.Total-countsBefore.Total, gc.Equals, int64(1)) c.Assert(countsAfter.LastDay-countsBefore.LastDay, gc.Equals, int64(1)) } } } func (s *StatsSuite) TestServerStatsUpdateNotPartOfStatsUpdateGroup(c *gc.C) { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("stats/update"), Method: "PUT", JSONBody: params.StatsUpdateRequest{ Entries: []params.StatsUpdateEntry{{ Timestamp: time.Now(), CharmReference: charm.MustParseURL("~charmers/precise/wordpress-23"), }}, }, ExpectStatus: http.StatusProxyAuthRequired, ExpectBody: dischargeRequiredBody, }) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("stats/update"), Method: "PUT", Username: "brad", Password: "pitt", JSONBody: params.StatsUpdateRequest{ Entries: []params.StatsUpdateEntry{{ Timestamp: time.Now(), CharmReference: charm.MustParseURL("~charmers/precise/wordpress-23"), }}, }, ExpectStatus: http.StatusUnauthorized, ExpectBody: ¶ms.Error{ Message: "invalid user name or password", Code: params.ErrUnauthorized, }, }) } func (s *StatsSuite) TestServerStatsUpdatePartOfStatsUpdateGroup(c *gc.C) { s.addPublicCharm(c, storetesting.Charms.CharmDir("wordpress"), newResolvedURL("~charmers/precise/wordpress-23", 23)) s.discharge = dischargeForUser("statsupdate") s.idM.groups = map[string][]string{ "statsupdate": []string{"statsupdate@cs"}, } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: storeURL("stats/update"), Do: bakeryDo(nil), Method: "PUT", JSONBody: params.StatsUpdateRequest{ Entries: []params.StatsUpdateEntry{{ Timestamp: time.Now(), CharmReference: charm.MustParseURL("~charmers/precise/wordpress-23"), }}, }, ExpectStatus: http.StatusOK, }) } func (s *StatsSuite) TestStatsCounter(c *gc.C) { if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } for _, key := range [][]string{{"a", "b"}, {"a", "b"}, {"a", "c"}, {"a"}} { err := s.store.IncCounter(key) c.Assert(err, gc.IsNil) } var all []interface{} err := s.store.DB.StatCounters().Find(nil).All(&all) c.Assert(err, gc.IsNil) data, err := json.Marshal(all) c.Assert(err, gc.IsNil) c.Logf("%s", data) expected := map[string]int64{ "a:b": 2, "a:b:*": 0, "a:*": 3, "a": 1, "a:b:c": 0, } for counter, n := range expected { c.Logf("test %q", counter) url := storeURL("stats/counter/" + counter) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: url, ExpectBody: []params.Statistic{{ Count: n, }}, }) } } func (s *StatsSuite) TestStatsCounterList(c *gc.C) { if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } incs := [][]string{ {"a"}, {"a", "b"}, {"a", "b", "c"}, {"a", "b", "c"}, {"a", "b", "d"}, {"a", "b", "e"}, {"a", "f", "g"}, {"a", "f", "h"}, {"a", "i"}, {"j", "k"}, } for _, key := range incs { err := s.store.IncCounter(key) c.Assert(err, gc.IsNil) } tests := []struct { key string result []params.Statistic }{{ key: "a", result: []params.Statistic{{ Key: "a", Count: 1, }}, }, { key: "a:*", result: []params.Statistic{{ Key: "a:b:*", Count: 4, }, { Key: "a:f:*", Count: 2, }, { Key: "a:b", Count: 1, }, { Key: "a:i", Count: 1, }}, }, { key: "a:b:*", result: []params.Statistic{{ Key: "a:b:c", Count: 2, }, { Key: "a:b:d", Count: 1, }, { Key: "a:b:e", Count: 1, }}, }, { key: "a:*", result: []params.Statistic{{ Key: "a:b:*", Count: 4, }, { Key: "a:f:*", Count: 2, }, { Key: "a:b", Count: 1, }, { Key: "a:i", Count: 1, }}, }} for i, test := range tests { c.Logf("test %d: %s", i, test.key) url := storeURL("stats/counter/" + test.key + "?list=1") httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: url, ExpectBody: test.result, }) } } func (s *StatsSuite) TestStatsCounterBy(c *gc.C) { if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } incs := []struct { key []string day int }{ {[]string{"a"}, 1}, {[]string{"a"}, 1}, {[]string{"b"}, 1}, {[]string{"a", "b"}, 1}, {[]string{"a", "c"}, 1}, {[]string{"a"}, 3}, {[]string{"a", "b"}, 3}, {[]string{"b"}, 9}, {[]string{"b"}, 9}, {[]string{"a", "c", "d"}, 9}, {[]string{"a", "c", "e"}, 9}, {[]string{"a", "c", "f"}, 9}, } day := func(i int) time.Time { return time.Date(2012, time.May, i, 0, 0, 0, 0, time.UTC) } for i, inc := range incs { t := day(inc.day) // Ensure each entry is unique by adding // a sufficient increment for each test. t = t.Add(time.Duration(i) * charmstore.StatsGranularity) err := s.store.IncCounterAtTime(inc.key, t) c.Assert(err, gc.IsNil) } tests := []struct { request charmstore.CounterRequest result []params.Statistic }{{ request: charmstore.CounterRequest{ Key: []string{"a"}, Prefix: false, List: false, By: charmstore.ByDay, }, result: []params.Statistic{{ Date: "2012-05-01", Count: 2, }, { Date: "2012-05-03", Count: 1, }}, }, { request: charmstore.CounterRequest{ Key: []string{"a"}, Prefix: true, List: false, By: charmstore.ByDay, }, result: []params.Statistic{{ Date: "2012-05-01", Count: 2, }, { Date: "2012-05-03", Count: 1, }, { Date: "2012-05-09", Count: 3, }}, }, { request: charmstore.CounterRequest{ Key: []string{"a"}, Prefix: true, List: false, By: charmstore.ByDay, Start: time.Date(2012, 5, 2, 0, 0, 0, 0, time.UTC), }, result: []params.Statistic{{ Date: "2012-05-03", Count: 1, }, { Date: "2012-05-09", Count: 3, }}, }, { request: charmstore.CounterRequest{ Key: []string{"a"}, Prefix: true, List: false, By: charmstore.ByDay, Stop: time.Date(2012, 5, 4, 0, 0, 0, 0, time.UTC), }, result: []params.Statistic{{ Date: "2012-05-01", Count: 2, }, { Date: "2012-05-03", Count: 1, }}, }, { request: charmstore.CounterRequest{ Key: []string{"a"}, Prefix: true, List: false, By: charmstore.ByDay, Start: time.Date(2012, 5, 3, 0, 0, 0, 0, time.UTC), Stop: time.Date(2012, 5, 3, 0, 0, 0, 0, time.UTC), }, result: []params.Statistic{{ Date: "2012-05-03", Count: 1, }}, }, { request: charmstore.CounterRequest{ Key: []string{"a"}, Prefix: true, List: true, By: charmstore.ByDay, }, result: []params.Statistic{{ Key: "a:b", Date: "2012-05-01", Count: 1, }, { Key: "a:c", Date: "2012-05-01", Count: 1, }, { Key: "a:b", Date: "2012-05-03", Count: 1, }, { Key: "a:c:*", Date: "2012-05-09", Count: 3, }}, }, { request: charmstore.CounterRequest{ Key: []string{"a"}, Prefix: true, List: false, By: charmstore.ByWeek, }, result: []params.Statistic{{ Date: "2012-05-06", Count: 3, }, { Date: "2012-05-13", Count: 3, }}, }, { request: charmstore.CounterRequest{ Key: []string{"a"}, Prefix: true, List: true, By: charmstore.ByWeek, }, result: []params.Statistic{{ Key: "a:b", Date: "2012-05-06", Count: 2, }, { Key: "a:c", Date: "2012-05-06", Count: 1, }, { Key: "a:c:*", Date: "2012-05-13", Count: 3, }}, }} for i, test := range tests { flags := make(url.Values) url := storeURL("stats/counter/" + strings.Join(test.request.Key, ":")) if test.request.Prefix { url += ":*" } if test.request.List { flags.Set("list", "1") } if !test.request.Start.IsZero() { flags.Set("start", test.request.Start.Format("2006-01-02")) } if !test.request.Stop.IsZero() { flags.Set("stop", test.request.Stop.Format("2006-01-02")) } switch test.request.By { case charmstore.ByDay: flags.Set("by", "day") case charmstore.ByWeek: flags.Set("by", "week") } if len(flags) > 0 { url += "?" + flags.Encode() } c.Logf("test %d: %s", i, url) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: url, ExpectBody: test.result, }) } } func (s *StatsSuite) TestStatsEnabled(c *gc.C) { statsEnabled := func(url string) bool { req, _ := http.NewRequest("GET", url, nil) return v5.StatsEnabled(req) } c.Assert(statsEnabled("http://foo.com"), gc.Equals, true) c.Assert(statsEnabled("http://foo.com?stats=1"), gc.Equals, true) c.Assert(statsEnabled("http://foo.com?stats=0"), gc.Equals, false) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc/0000775000175000017500000000000012672604603025015 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc/doc.go0000664000175000017500000002357212672604603026122 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package mongodoc // import "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" import ( "time" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/mgo.v2/bson" ) // Entity holds the in-database representation of charm or bundle's // document in the charms collection. It holds information // on one specific revision and series of the charm or bundle - see // also BaseEntity. // // We ensure that there is always a single BaseEntity for any // set of entities which share the same base URL. type Entity struct { // URL holds the fully specified URL of the charm or bundle. // e.g. cs:precise/wordpress-34, cs:~user/trusty/foo-2 URL *charm.URL `bson:"_id"` // BaseURL holds the reference URL of the charm or bundle // (this omits the series and revision from URL) // e.g. cs:wordpress, cs:~user/foo BaseURL *charm.URL // User holds the user part of the entity URL (for instance, "joe"). User string // Name holds the name of the entity (for instance "wordpress"). Name string // Revision holds the entity revision (it cannot be -1/unset). Revision int // Series holds the entity series (for instance "trusty" or "bundle"). // For multi-series charms, this will be empty. Series string // SupportedSeries holds the series supported by a charm. // For non-multi-series charms, this is a single element slice // containing the value in Series. SupportedSeries []string // PreV5BlobHash holds the hash checksum of the // blob that will be served from the v4 and legacy // APIs. This will be the same as BlobHash for single-series charms. PreV5BlobHash string // PreV5BlobSize holds the size of the // blob that will be served from the v4 and legacy // APIs. This will be the same as Size for single-series charms. PreV5BlobSize int64 // PreV5BlobHash256 holds the SHA256 hash checksum // of the blob that will be served from the v4 and legacy // APIs. This will be the same as Hash256 for single-series charms. PreV5BlobHash256 string // BlobHash holds the hash checksum of the blob, in hexadecimal format, // as created by blobstore.NewHash. BlobHash string // BlobHash256 holds the SHA256 hash checksum of the blob, // in hexadecimal format. This is only used by the legacy // API, and is calculated lazily the first time it is required. // Note that this is calculated from the pre-V5 blob. BlobHash256 string // Size holds the size of the archive blob. // TODO(rog) rename this to BlobSize. Size int64 // BlobName holds the name that the archive blob is given in the blob store. // For multi-series charms, there is also a second blob which // stores a "zip-suffix" that overrides metadata.yaml. // This is named BlobName + ".pre-v5-suffix". BlobName string UploadTime time.Time // ExtraInfo holds arbitrary extra metadata associated with // the entity. The byte slices hold JSON-encoded data. ExtraInfo map[string][]byte `bson:",omitempty" json:",omitempty"` // TODO(rog) verify that all these types marshal to the expected // JSON form. CharmMeta *charm.Meta CharmConfig *charm.Config CharmActions *charm.Actions // CharmProvidedInterfaces holds all the relation // interfaces provided by the charm CharmProvidedInterfaces []string // CharmRequiredInterfaces is similar to CharmProvidedInterfaces // for required interfaces. CharmRequiredInterfaces []string BundleData *charm.BundleData BundleReadMe string // BundleCharms includes all the charm URLs referenced // by the bundle, including base URLs where they are // not already included. BundleCharms []*charm.URL // BundleMachineCount counts the machines used or created // by the bundle. It is nil for charms. BundleMachineCount *int // BundleUnitCount counts the units created by the bundle. // It is nil for charms. BundleUnitCount *int // TODO Add fields denormalized for search purposes // and search ranking field(s). // Contents holds entries for frequently accessed // entries in the file's blob. Storing this avoids // the need to linearly read the zip file's manifest // every time we access one of these files. Contents map[FileId]ZipFile `json:",omitempty" bson:",omitempty"` // PromulgatedURL holds the promulgated URL of the entity. If the entity // is not promulgated this should be set to nil. PromulgatedURL *charm.URL `json:",omitempty" bson:"promulgated-url,omitempty"` // PromulgatedRevision holds the revision number from the promulgated URL. // If the entity is not promulgated this should be set to -1. PromulgatedRevision int `bson:"promulgated-revision"` // TODO we could potentially use map[params.Channel] bool // instead of having a separate field for each channel. // Development holds whether the entity has been published in the // "development" channel. Development bool // Stable holds whether the entity has been published in the // "stable" channel. Stable bool } // PreferredURL returns the preferred way to refer to this entity. If // the entity has a promulgated URL and usePromulgated is true then the // promulgated URL will be used, otherwise the standard URL is used. // // The returned URL may be modified freely. func (e *Entity) PreferredURL(usePromulgated bool) *charm.URL { var u charm.URL if usePromulgated && e.PromulgatedURL != nil { u = *e.PromulgatedURL } else { u = *e.URL } return &u } // BaseEntity holds metadata for a charm or bundle // independent of any specific uploaded revision or series. type BaseEntity struct { // URL holds the reference URL of of charm on bundle // regardless of its revision, series or promulgation status // (this omits the revision and series from URL). // e.g., cs:~user/collection/foo URL *charm.URL `bson:"_id"` // User holds the user part of the entity URL (for instance, "joe"). User string // Name holds the name of the entity (for instance "wordpress"). Name string // Promulgated specifies whether the charm or bundle should be // promulgated. Promulgated IntBool // CommonInfo holds arbitrary common extra metadata associated with // the base entity. Thhose data apply to all revisions. // The byte slices hold JSON-encoded data. CommonInfo map[string][]byte `bson:",omitempty" json:",omitempty"` // ChannelACLs holds a map from an entity channel to the ACLs // that apply to entities that use this base entity that are associated // with the given channel. ChannelACLs map[params.Channel]ACL // ChannelEntities holds a set of channels, each containing a set // of series holding the currently published entity revision for // that channel and series. ChannelEntities map[params.Channel]map[string]*charm.URL } // ACL holds lists of users and groups that are // allowed to perform specific actions. type ACL struct { // Read holds users and groups that are allowed to read the charm // or bundle. Read []string // Write holds users and groups that are allowed to upload/modify the charm // or bundle. Write []string } type FileId string const ( FileReadMe FileId = "readme" FileIcon FileId = "icon" ) // ZipFile refers to a specific file in the uploaded archive blob. type ZipFile struct { // Compressed specifies whether the file is compressed or not. Compressed bool // Offset holds the offset into the zip archive of the start of // the file's data. Offset int64 // Size holds the size of the file before decompression. Size int64 } // Valid reports whether f is a valid (non-zero) reference to // a zip file. func (f ZipFile) IsValid() bool { // Note that no valid zip files can start at offset zero, // because that's where the zip header lives. return f != ZipFile{} } // Log holds the in-database representation of a log message sent to the charm // store. type Log struct { // Data holds the JSON-encoded log message. Data []byte // Level holds the log level: whether the log is a warning, an error, etc. Level LogLevel // Type holds the log type. Type LogType // URLs holds a slice of entity URLs associated with the log message. URLs []*charm.URL // Time holds the time of the log. Time time.Time } // LogLevel holds the level associated with a log. type LogLevel int // When introducing a new log level, do the following: // 1) add the new level as a constant below; // 2) add the new level in params as a string for HTTP requests/responses; // 3) include the new level in the mongodocLogLevels and paramsLogLevels maps // in internal/v4. const ( _ LogLevel = iota InfoLevel WarningLevel ErrorLevel ) // LogType holds the type of the log. type LogType int // When introducing a new log type, do the following: // 1) add the new type as a constant below; // 2) add the new type in params as a string for HTTP requests/responses; // 3) include the new type in the mongodocLogTypes and paramsLogTypes maps // in internal/v4. const ( _ LogType = iota IngestionType LegacyStatisticsType ) type MigrationName string // Migration holds information about the database migration. type Migration struct { // Executed holds the migration names for migrations already executed. Executed []MigrationName } // IntBool is a bool that will be represented internally in the database as 1 for // true and -1 for false. type IntBool bool func (b IntBool) GetBSON() (interface{}, error) { if b { return 1, nil } return -1, nil } func (b *IntBool) SetBSON(raw bson.Raw) error { var x int if err := raw.Unmarshal(&x); err != nil { return errgo.Notef(err, "cannot unmarshal value") } switch x { case 1: *b = IntBool(true) case -1: *b = IntBool(false) default: return errgo.Newf("invalid value %d", x) } return nil } // BaseURL returns the "base" version of url. If // url represents an entity, then the returned URL // will represent its base entity. func BaseURL(url *charm.URL) *charm.URL { newURL := *url newURL.Revision = -1 newURL.Series = "" return &newURL } func copyURL(u *charm.URL) *charm.URL { u1 := *u return &u1 } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc/doc_test.go0000664000175000017500000000621512672604603027154 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package mongodoc_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" import ( "testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/mgo.v2/bson" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" ) func TestPackage(t *testing.T) { gc.TestingT(t) } type DocSuite struct{} var _ = gc.Suite(&DocSuite{}) func (s *DocSuite) TestIntBoolGetBSON(c *gc.C) { test := bson.D{{"true", mongodoc.IntBool(true)}, {"false", mongodoc.IntBool(false)}} b, err := bson.Marshal(test) c.Assert(err, gc.IsNil) result := make(map[string]int, 2) err = bson.Unmarshal(b, &result) c.Assert(err, gc.IsNil) c.Assert(result["true"], gc.Equals, 1) c.Assert(result["false"], gc.Equals, -1) } func (s *DocSuite) TestIntBoolSetBSON(c *gc.C) { test := bson.D{{"true", 1}, {"false", -1}} b, err := bson.Marshal(test) c.Assert(err, gc.IsNil) var result map[string]mongodoc.IntBool err = bson.Unmarshal(b, &result) c.Assert(err, gc.IsNil) c.Assert(result, jc.DeepEquals, map[string]mongodoc.IntBool{"true": true, "false": false}) } func (s *DocSuite) TestIntBoolSetBSONIncorrectType(c *gc.C) { test := bson.D{{"test", "true"}} b, err := bson.Marshal(test) c.Assert(err, gc.IsNil) var result map[string]mongodoc.IntBool err = bson.Unmarshal(b, &result) c.Assert(err, gc.ErrorMatches, "cannot unmarshal value: BSON kind 0x02 isn't compatible with type int") } func (s *DocSuite) TestIntBoolSetBSONInvalidValue(c *gc.C) { test := bson.D{{"test", 2}} b, err := bson.Marshal(test) c.Assert(err, gc.IsNil) var result map[string]mongodoc.IntBool err = bson.Unmarshal(b, &result) c.Assert(err, gc.ErrorMatches, `invalid value 2`) } var preferredURLTests = []struct { entity *mongodoc.Entity usePromulgated bool expectURLFalse string expectURLTrue string }{{ entity: &mongodoc.Entity{ URL: charm.MustParseURL("~ken/trusty/b-1"), }, expectURLFalse: "cs:~ken/trusty/b-1", expectURLTrue: "cs:~ken/trusty/b-1", }, { entity: &mongodoc.Entity{ URL: charm.MustParseURL("~dmr/trusty/c-1"), PromulgatedURL: charm.MustParseURL("trusty/c-2"), }, expectURLFalse: "cs:~dmr/trusty/c-1", expectURLTrue: "cs:trusty/c-2", }, { entity: &mongodoc.Entity{ URL: charm.MustParseURL("~dmr/trusty/c-1"), PromulgatedURL: charm.MustParseURL("trusty/c-2"), Development: true, }, expectURLFalse: "cs:~dmr/trusty/c-1", expectURLTrue: "cs:trusty/c-2", }, { entity: &mongodoc.Entity{ URL: charm.MustParseURL("~dmr/trusty/c-1"), Development: true, }, expectURLFalse: "cs:~dmr/trusty/c-1", expectURLTrue: "cs:~dmr/trusty/c-1", }} func (s *DocSuite) TestPreferredURL(c *gc.C) { for i, test := range preferredURLTests { c.Logf("test %d: %#v", i, test.entity) c.Assert(test.entity.PreferredURL(false).String(), gc.Equals, test.expectURLFalse) c.Assert(test.entity.PreferredURL(true).String(), gc.Equals, test.expectURLTrue) // Ensure no aliasing test.entity.PreferredURL(false).Series = "foo" c.Assert(test.entity.PreferredURL(false).Series, gc.Not(gc.Equals), "foo") } } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/cache/0000775000175000017500000000000012672604603024253 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/cache/cache_test.go0000664000175000017500000001350612672604603026711 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package cache_test import ( "fmt" "sync" "time" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/juju/charmstore.v5-unstable/internal/cache" ) type suite struct{} var _ = gc.Suite(&suite{}) func (*suite) TestSimpleGet(c *gc.C) { p := cache.New(time.Hour) v, err := p.Get("a", fetchValue(2)) c.Assert(err, gc.IsNil) c.Assert(v, gc.Equals, 2) } func (*suite) TestSimpleRefresh(c *gc.C) { p := cache.New(time.Hour) v, err := p.Get("a", fetchValue(2)) c.Assert(err, gc.IsNil) c.Assert(v, gc.Equals, 2) v, err = p.Get("a", fetchValue(4)) c.Assert(err, gc.IsNil) c.Assert(v, gc.Equals, 2) p.Evict("a") v, err = p.Get("a", fetchValue(3)) c.Assert(err, gc.IsNil) c.Assert(v, gc.Equals, 3) v, err = p.Get("a", fetchValue(4)) c.Assert(err, gc.IsNil) c.Assert(v, gc.Equals, 3) } func (*suite) TestFetchError(c *gc.C) { p := cache.New(time.Hour) expectErr := errgo.New("hello") v, err := p.Get("a", fetchError(expectErr)) c.Assert(err, gc.ErrorMatches, "hello") c.Assert(errgo.Cause(err), gc.Equals, expectErr) c.Assert(v, gc.Equals, nil) } func (*suite) TestFetchOnlyOnce(c *gc.C) { p := cache.New(time.Hour) v, err := p.Get("a", fetchValue(2)) c.Assert(err, gc.IsNil) c.Assert(v, gc.Equals, 2) v, err = p.Get("a", fetchError(errUnexpectedFetch)) c.Assert(err, gc.IsNil) c.Assert(v, gc.Equals, 2) } func (*suite) TestEntryExpiresAfterMaxEntryAge(c *gc.C) { now := time.Now() p := cache.New(time.Minute) v, err := cache.GetAtTime(p, "a", fetchValue(2), now) c.Assert(err, gc.IsNil) c.Assert(v, gc.Equals, 2) // Entry is definitely not expired before half the entry expiry time. v, err = cache.GetAtTime(p, "a", fetchError(errUnexpectedFetch), now.Add(time.Minute/2-1)) c.Assert(err, gc.IsNil) c.Assert(v, gc.Equals, 2) // Entry is definitely expired after the entry expiry time v, err = cache.GetAtTime(p, "a", fetchValue(3), now.Add(time.Minute+1)) c.Assert(v, gc.Equals, 3) } func (*suite) TestEntriesRemovedWhenNotRetrieved(c *gc.C) { now := time.Now() p := cache.New(time.Minute) // Populate the cache with an initial entry. v, err := cache.GetAtTime(p, "a", fetchValue("a"), now) c.Assert(err, gc.IsNil) c.Assert(v, gc.Equals, "a") c.Assert(p.Len(), gc.Equals, 1) // Fetch another item after the expiry time, // causing current entries to be moved to old. v, err = cache.GetAtTime(p, "b", fetchValue("b"), now.Add(time.Minute+1)) c.Assert(err, gc.IsNil) c.Assert(v, gc.Equals, "b") c.Assert(p.Len(), gc.Equals, 2) // Fetch the other item after another expiry time // causing the old entries to be discarded because // nothing has fetched them. v, err = cache.GetAtTime(p, "b", fetchValue("b"), now.Add(time.Minute*2+2)) c.Assert(err, gc.IsNil) c.Assert(v, gc.Equals, "b") c.Assert(p.Len(), gc.Equals, 1) } // TestRefreshedEntry tests the code path where a value is moved // from the old map to new. func (*suite) TestRefreshedEntry(c *gc.C) { now := time.Now() p := cache.New(time.Minute) // Populate the cache with an initial entry. v, err := cache.GetAtTime(p, "a", fetchValue("a"), now) c.Assert(err, gc.IsNil) c.Assert(v, gc.Equals, "a") c.Assert(p.Len(), gc.Equals, 1) // Fetch another item very close to the expiry time. v, err = cache.GetAtTime(p, "b", fetchValue("b"), now.Add(time.Minute-1)) c.Assert(err, gc.IsNil) c.Assert(v, gc.Equals, "b") c.Assert(p.Len(), gc.Equals, 2) // Fetch it again just after the expiry time, // which should move it into the new map. v, err = cache.GetAtTime(p, "b", fetchError(errUnexpectedFetch), now.Add(time.Minute+1)) c.Assert(err, gc.IsNil) c.Assert(v, gc.Equals, "b") c.Assert(p.Len(), gc.Equals, 2) // Fetch another item, causing "a" to be removed from the cache // and keeping "b" in there. v, err = cache.GetAtTime(p, "c", fetchValue("c"), now.Add(time.Minute*2+2)) c.Assert(err, gc.IsNil) c.Assert(v, gc.Equals, "c") c.Assert(p.Len(), gc.Equals, 2) } // TestConcurrentFetch checks that the cache is safe // to use concurrently. It is designed to fail when // tested with the race detector enabled. func (*suite) TestConcurrentFetch(c *gc.C) { p := cache.New(time.Minute) var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() v, err := p.Get("a", fetchValue("a")) c.Check(err, gc.IsNil) c.Check(v, gc.Equals, "a") }() wg.Add(1) go func() { defer wg.Done() v, err := p.Get("b", fetchValue("b")) c.Check(err, gc.IsNil) c.Check(v, gc.Equals, "b") }() wg.Wait() } func (*suite) TestRefreshSpread(c *gc.C) { now := time.Now() p := cache.New(time.Minute) // Get all values to start with. const N = 100 for i := 0; i < N; i++ { v, err := cache.GetAtTime(p, fmt.Sprint(i), fetchValue(i), now) c.Assert(err, gc.IsNil) c.Assert(v, gc.Equals, i) } counts := make([]int, time.Minute/time.Millisecond/10+1) // Continually get values over the course of the // expiry time; the fetches should be spread out. slot := 0 for t := now.Add(0); t.Before(now.Add(time.Minute + 1)); t = t.Add(time.Millisecond * 10) { for i := 0; i < N; i++ { cache.GetAtTime(p, fmt.Sprint(i), func() (interface{}, error) { counts[slot]++ return i, nil }, t) } slot++ } // There should be no fetches in the first half of the cycle. for i := 0; i < len(counts)/2; i++ { c.Assert(counts[i], gc.Equals, 0, gc.Commentf("slot %d", i)) } max := 0 total := 0 for _, count := range counts { if count > max { max = count } total += count } if max > 10 { c.Errorf("requests grouped too closely (max %d)", max) } c.Assert(total, gc.Equals, N) } var errUnexpectedFetch = errgo.New("fetch called unexpectedly") func fetchError(err error) func() (interface{}, error) { return func() (interface{}, error) { return nil, err } } func fetchValue(val interface{}) func() (interface{}, error) { return func() (interface{}, error) { return val, nil } } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/cache/export_test.go0000664000175000017500000000022012672604603027154 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package cache var GetAtTime = (*Cache).getAtTime charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/cache/cache.go0000664000175000017500000001067212672604603025653 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package cache // import "gopkg.in/juju/charmstore.v5-unstable/internal/cache" import ( "math/rand" "sync" "time" "gopkg.in/errgo.v1" ) type entry struct { value interface{} expire time.Time } // Cache holds a time-limited cache of values for string keys. type Cache struct { maxAge time.Duration // mu guards the fields below it. mu sync.Mutex // expire holds when the cache is due to expire. expire time.Time // We hold two maps so that can avoid scanning through all the // items in the cache when the cache needs to be refreshed. // Instead, we move items from old to new when they're accessed // and throw away the old map at refresh time. old, new map[string]entry } // New returns a new Cache that will cache items for // at most maxAge. func New(maxAge time.Duration) *Cache { // A maxAge is < 2ns then the expiry code will panic because the // actual expiry time will be maxAge - a random value in the // interval [0. maxAge/2). If maxAge is < 2ns then this requires // a random interval in [0, 0) which causes a panic. if maxAge < 2*time.Nanosecond { maxAge = 2 * time.Nanosecond } // The returned cache will have a zero-valued expire // time, so will expire immediately, causing the new // map to be created. return &Cache{ maxAge: maxAge, } } // Len returns the total number of cached entries. func (c *Cache) Len() int { c.mu.Lock() defer c.mu.Unlock() return len(c.old) + len(c.new) } // Evict removes the entry with the given key from the cache if present. func (c *Cache) Evict(key string) { c.mu.Lock() defer c.mu.Unlock() delete(c.new, key) } // EvictAll removes all entries from the cache. func (c *Cache) EvictAll() { c.mu.Lock() defer c.mu.Unlock() c.new = make(map[string]entry) c.old = nil } // Get returns the value for the given key, using fetch to fetch // the value if it is not found in the cache. // If fetch returns an error, the returned error from Get will have // the same cause. func (c *Cache) Get(key string, fetch func() (interface{}, error)) (interface{}, error) { return c.getAtTime(key, fetch, time.Now()) } // getAtTime is the internal version of Get, useful for testing; now represents the current // time. func (c *Cache) getAtTime(key string, fetch func() (interface{}, error), now time.Time) (interface{}, error) { if val, ok := c.cachedValue(key, now); ok { return val, nil } // Fetch the data without the mutex held // so that one slow fetch doesn't hold up // all the other cache accesses. val, err := fetch() if err != nil { // TODO consider caching cache misses. return nil, errgo.Mask(err, errgo.Any) } c.mu.Lock() defer c.mu.Unlock() // Add the new cache entry. Because it's quite likely that a // large number of cache entries will be initially fetched at // the same time, we want to avoid a thundering herd of fetches // when they all expire at the same time, so we set the expiry // time to a random interval between [now + t.maxAge/2, now + // t.maxAge] and so they'll be spread over time without // compromising the maxAge value. c.new[key] = entry{ value: val, expire: now.Add(c.maxAge - time.Duration(rand.Int63n(int64(c.maxAge/2)))), } return val, nil } // cachedValue returns any cached value for the given key // and whether it was found. func (c *Cache) cachedValue(key string, now time.Time) (interface{}, bool) { c.mu.Lock() defer c.mu.Unlock() if now.After(c.expire) { c.old = c.new c.new = make(map[string]entry) c.expire = now.Add(c.maxAge) } if e, ok := c.entry(c.new, key, now); ok { return e.value, true } if e, ok := c.entry(c.old, key, now); ok { // An old entry has been accessed; move it to the new // map so that we only use a single map access for // subsequent lookups. Note that because we use the same // duration for cache refresh (c.expire) as for max // entry age, this is strictly speaking unnecessary // because any entries in old will have expired by the // time it is dropped. c.new[key] = e delete(c.old, key) return e.value, true } return nil, false } // entry returns an entry from the map and whether it // was found. If the entry has expired, it is deleted from the map. func (c *Cache) entry(m map[string]entry, key string, now time.Time) (entry, bool) { e, ok := m[key] if !ok { return entry{}, false } if now.After(e.expire) { // Delete expired entries. delete(m, key) return entry{}, false } return e, true } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/cache/package_test.go0000664000175000017500000000032512672604603027234 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package cache_test import ( "testing" gc "gopkg.in/check.v1" ) func TestPackage(t *testing.T) { gc.TestingT(t) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/legacy/0000775000175000017500000000000012672604603024454 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/legacy/api_test.go0000664000175000017500000004612512672604603026623 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package legacy_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/legacy" import ( "crypto/sha256" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/http/httptest" "os" "time" jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/testing/httptesting" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" "gopkg.in/juju/charmstore.v5-unstable/internal/legacy" "gopkg.in/juju/charmstore.v5-unstable/internal/router" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/stats" ) var serverParams = charmstore.ServerParams{ AuthUsername: "test-user", AuthPassword: "test-password", } type APISuite struct { jujutesting.IsolatedMgoSuite srv *charmstore.Server store *charmstore.Store } var _ = gc.Suite(&APISuite{}) func (s *APISuite) SetUpTest(c *gc.C) { s.IsolatedMgoSuite.SetUpTest(c) s.srv, s.store = newServer(c, s.Session, serverParams) } func (s *APISuite) TearDownTest(c *gc.C) { s.store.Close() s.store.Pool().Close() s.srv.Close() s.IsolatedMgoSuite.TearDownTest(c) } func newServer(c *gc.C, session *mgo.Session, config charmstore.ServerParams) (*charmstore.Server, *charmstore.Store) { db := session.DB("charmstore") pool, err := charmstore.NewPool(db, nil, nil, config) c.Assert(err, gc.IsNil) srv, err := charmstore.NewServer(db, nil, config, map[string]charmstore.NewAPIHandlerFunc{"": legacy.NewAPIHandler}) c.Assert(err, gc.IsNil) return srv, pool.Store() } func (s *APISuite) TestCharmArchive(c *gc.C) { _, wordpress := s.addPublicCharm(c, "wordpress", "cs:precise/wordpress-0") archiveBytes, err := ioutil.ReadFile(wordpress.Path) c.Assert(err, gc.IsNil) rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: "/charm/precise/wordpress-0", }) c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Body.Bytes(), gc.DeepEquals, archiveBytes) c.Assert(rec.Header().Get("Content-Length"), gc.Equals, fmt.Sprint(len(rec.Body.Bytes()))) // Test with unresolved URL. rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: "/charm/wordpress", }) c.Assert(rec.Code, gc.Equals, http.StatusOK) c.Assert(rec.Body.Bytes(), gc.DeepEquals, archiveBytes) c.Assert(rec.Header().Get("Content-Length"), gc.Equals, fmt.Sprint(len(rec.Body.Bytes()))) // Check that the HTTP range logic is plugged in OK. If this // is working, we assume that the whole thing is working OK, // as net/http is well-tested. rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: "/charm/precise/wordpress-0", Header: http.Header{"Range": {"bytes=10-100"}}, }) c.Assert(rec.Code, gc.Equals, http.StatusPartialContent, gc.Commentf("body: %q", rec.Body.Bytes())) c.Assert(rec.Body.Bytes(), gc.HasLen, 100-10+1) c.Assert(rec.Body.Bytes(), gc.DeepEquals, archiveBytes[10:101]) } func (s *APISuite) TestGetElidesSeriesFromMultiSeriesCharmMetadata(c *gc.C) { _, ch := s.addPublicCharm(c, "multi-series", "cs:~charmers/multi-series-0") rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: "/charm/~charmers/multi-series", }) c.Assert(rec.Code, gc.Equals, http.StatusOK) gotCh, err := charm.ReadCharmArchiveBytes(rec.Body.Bytes()) c.Assert(err, gc.IsNil) chMeta := ch.Meta() chMeta.Series = nil c.Assert(gotCh.Meta(), jc.DeepEquals, chMeta) } func (s *APISuite) TestPostNotAllowed(c *gc.C) { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Method: "POST", URL: "/charm/precise/wordpress", ExpectStatus: http.StatusMethodNotAllowed, ExpectBody: params.Error{ Code: params.ErrMethodNotAllowed, Message: params.ErrMethodNotAllowed.Error(), }, }) } func (s *APISuite) TestCharmArchiveUnresolvedURL(c *gc.C) { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: "/charm/wordpress", ExpectStatus: http.StatusNotFound, ExpectBody: params.Error{ Code: params.ErrNotFound, Message: `no matching charm or bundle for cs:wordpress`, }, }) } func (s *APISuite) TestCharmInfoNotFound(c *gc.C) { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: "/charm-info?charms=cs:precise/something-23", ExpectStatus: http.StatusOK, ExpectBody: map[string]charmrepo.InfoResponse{ "cs:precise/something-23": { Errors: []string{"entry not found"}, }, }, }) } func (s *APISuite) TestServeCharmInfo(c *gc.C) { wordpressURL, wordpress := s.addPublicCharm(c, "wordpress", "cs:precise/wordpress-1") hashSum := fileSHA256(c, wordpress.Path) digest, err := json.Marshal("who@canonical.com-bzr-digest") c.Assert(err, gc.IsNil) tests := []struct { about string url string extrainfo map[string][]byte canonical string sha string digest string revision int err string }{{ about: "full charm URL with digest extra info", url: wordpressURL.String(), extrainfo: map[string][]byte{ params.BzrDigestKey: digest, }, canonical: "cs:precise/wordpress-1", sha: hashSum, digest: "who@canonical.com-bzr-digest", revision: 1, }, { about: "full charm URL without digest extra info", url: wordpressURL.String(), canonical: "cs:precise/wordpress-1", sha: hashSum, revision: 1, }, { about: "partial charm URL with digest extra info", url: "cs:wordpress", extrainfo: map[string][]byte{ params.BzrDigestKey: digest, }, canonical: "cs:precise/wordpress-1", sha: hashSum, digest: "who@canonical.com-bzr-digest", revision: 1, }, { about: "partial charm URL without extra info", url: "cs:wordpress", canonical: "cs:precise/wordpress-1", sha: hashSum, revision: 1, }, { about: "invalid digest extra info", url: "cs:wordpress", extrainfo: map[string][]byte{ params.BzrDigestKey: []byte("[]"), }, canonical: "cs:precise/wordpress-1", sha: hashSum, revision: 1, err: `cannot unmarshal digest: json: cannot unmarshal array into Go value of type string`, }, { about: "charm not found", url: "cs:precise/non-existent", err: "entry not found", }, { about: "invalid charm URL", url: "cs:/bad", err: `entry not found`, }, { about: "invalid charm schema", url: "gopher:archie-server", err: `entry not found`, }, { about: "invalid URL", url: "/charm-info?charms=cs:not-found", err: "entry not found", }} for i, test := range tests { c.Logf("test %d: %s", i, test.about) err = s.store.UpdateEntity(wordpressURL, bson.D{{ "$set", bson.D{{"extrainfo", test.extrainfo}}, }}) c.Assert(err, gc.IsNil) expectInfo := charmrepo.InfoResponse{ CanonicalURL: test.canonical, Sha256: test.sha, Revision: test.revision, Digest: test.digest, } if test.err != "" { expectInfo.Errors = []string{test.err} } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: "/charm-info?charms=" + test.url, ExpectStatus: http.StatusOK, ExpectBody: map[string]charmrepo.InfoResponse{ test.url: expectInfo, }, }) } } func (s *APISuite) TestCharmInfoCounters(c *gc.C) { if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } // Add two charms to the database, a promulgated one and a user owned one. s.addPublicCharm(c, "wordpress", "cs:utopic/wordpress-42") s.addPublicCharm(c, "wordpress", "cs:~who/trusty/wordpress-47") requestInfo := func(id string, times int) { for i := 0; i < times; i++ { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: "/charm-info?charms=" + id, }) c.Assert(rec.Code, gc.Equals, http.StatusOK) } } // Request charm info several times for the promulgated charm, // the user owned one and a missing charm. requestInfo("utopic/wordpress-42", 4) requestInfo("~who/trusty/wordpress-47", 3) requestInfo("precise/django-0", 2) // The charm-info count for the promulgated charm has been updated. key := []string{params.StatsCharmInfo, "utopic", "wordpress"} stats.CheckCounterSum(c, s.store, key, false, 4) // The charm-info count for the user owned charm has been updated. key = []string{params.StatsCharmInfo, "trusty", "wordpress", "who"} stats.CheckCounterSum(c, s.store, key, false, 3) // The charm-missing count for the missing charm has been updated. key = []string{params.StatsCharmMissing, "precise", "django"} stats.CheckCounterSum(c, s.store, key, false, 2) // The charm-info count for the missing charm is still zero. key = []string{params.StatsCharmInfo, "precise", "django"} stats.CheckCounterSum(c, s.store, key, false, 0) } func (s *APISuite) TestAPIInfoWithGatedCharm(c *gc.C) { wordpressURL, _ := s.addPublicCharm(c, "wordpress", "cs:precise/wordpress-0") s.store.SetPerms(&wordpressURL.URL, "stable.read", "bob") httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: "/charm-info?charms=" + wordpressURL.URL.String(), ExpectStatus: http.StatusOK, ExpectBody: map[string]charmrepo.InfoResponse{ wordpressURL.URL.String(): { Errors: []string{"entry not found"}, }, }, }) } func fileSHA256(c *gc.C, path string) string { f, err := os.Open(path) c.Assert(err, gc.IsNil) hash := sha256.New() _, err = io.Copy(hash, f) c.Assert(err, gc.IsNil) return fmt.Sprintf("%x", hash.Sum(nil)) } func (s *APISuite) TestCharmPackageGet(c *gc.C) { wordpressURL, wordpress := s.addPublicCharm(c, "wordpress", "cs:precise/wordpress-0") archiveBytes, err := ioutil.ReadFile(wordpress.Path) c.Assert(err, gc.IsNil) srv := httptest.NewServer(s.srv) defer srv.Close() s.PatchValue(&charmrepo.CacheDir, c.MkDir()) s.PatchValue(&charmrepo.LegacyStore.BaseURL, srv.URL) ch, err := charmrepo.LegacyStore.Get(&wordpressURL.URL) c.Assert(err, gc.IsNil) chArchive := ch.(*charm.CharmArchive) data, err := ioutil.ReadFile(chArchive.Path) c.Assert(err, gc.IsNil) c.Assert(data, gc.DeepEquals, archiveBytes) } func (s *APISuite) TestCharmPackageCharmInfo(c *gc.C) { wordpressURL, wordpress := s.addPublicCharm(c, "wordpress", "cs:precise/wordpress-0") wordpressSHA256 := fileSHA256(c, wordpress.Path) mysqlURL, mySQL := s.addPublicCharm(c, "wordpress", "cs:precise/mysql-2") mysqlSHA256 := fileSHA256(c, mySQL.Path) notFoundURL := charm.MustParseURL("cs:precise/not-found-3") srv := httptest.NewServer(s.srv) defer srv.Close() s.PatchValue(&charmrepo.LegacyStore.BaseURL, srv.URL) resp, err := charmrepo.LegacyStore.Info(wordpressURL.PreferredURL(), mysqlURL.PreferredURL(), notFoundURL) c.Assert(err, gc.IsNil) c.Assert(resp, gc.HasLen, 3) c.Assert(resp, jc.DeepEquals, []*charmrepo.InfoResponse{{ CanonicalURL: wordpressURL.String(), Sha256: wordpressSHA256, }, { CanonicalURL: mysqlURL.String(), Sha256: mysqlSHA256, Revision: 2, }, { Errors: []string{"charm not found: " + notFoundURL.String()}, }}) } var serverStatusTests = []struct { path string code int }{ {"/charm-info/any", 404}, {"/charm/bad-url", 404}, {"/charm/bad-series/wordpress", 404}, } func (s *APISuite) TestServerStatus(c *gc.C) { // TODO(rog) add tests from old TestServerStatus tests // when we implement charm-info. for i, test := range serverStatusTests { c.Logf("test %d: %s", i, test.path) resp := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, URL: test.path, }) c.Assert(resp.Code, gc.Equals, test.code, gc.Commentf("body: %s", resp.Body)) } } func (s *APISuite) addPublicCharm(c *gc.C, charmName, curl string) (*router.ResolvedURL, *charm.CharmArchive) { rurl := &router.ResolvedURL{ URL: *charm.MustParseURL(curl), PromulgatedRevision: -1, } if rurl.URL.User == "" { rurl.URL.User = "charmers" rurl.PromulgatedRevision = rurl.URL.Revision } archive := storetesting.Charms.CharmArchive(c.MkDir(), charmName) err := s.store.AddCharmWithArchive(rurl, archive) c.Assert(err, gc.IsNil) s.setPublic(c, rurl) return rurl, archive } func (s *APISuite) setPublic(c *gc.C, rurl *router.ResolvedURL) { err := s.store.SetPerms(&rurl.URL, "stable.read", params.Everyone) c.Assert(err, gc.IsNil) err = s.store.Publish(rurl, params.StableChannel) c.Assert(err, gc.IsNil) } var serveCharmEventErrorsTests = []struct { about string url string responseUrl string err string }{{ about: "invalid charm URL", url: "no-such:charm", err: `invalid charm URL: charm or bundle URL has invalid schema: "no-such:charm"`, }, { about: "revision specified", url: "cs:utopic/django-42", err: "got charm URL with revision: cs:utopic/django-42", }, { about: "charm not found", url: "cs:trusty/django", err: "entry not found", }, { about: "ignoring digest", url: "precise/django-47@a-bzr-digest", responseUrl: "precise/django-47", err: "got charm URL with revision: cs:precise/django-47", }} func (s *APISuite) TestServeCharmEventErrors(c *gc.C) { for i, test := range serveCharmEventErrorsTests { c.Logf("test %d: %s", i, test.about) if test.responseUrl == "" { test.responseUrl = test.url } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: "/charm-event?charms=" + test.url, ExpectStatus: http.StatusOK, ExpectBody: map[string]charmrepo.EventResponse{ test.responseUrl: { Errors: []string{test.err}, }, }, }) } } func (s *APISuite) TestServeCharmEvent(c *gc.C) { // Add three charms to the charm store. mysqlUrl, _ := s.addPublicCharm(c, "mysql", "cs:trusty/mysql-2") riakUrl, _ := s.addPublicCharm(c, "riak", "cs:utopic/riak-3") // Update the mysql charm with a valid digest extra-info. s.addExtraInfoDigest(c, mysqlUrl, "who@canonical.com-bzr-digest") // Update the riak charm with an invalid digest extra-info. err := s.store.UpdateEntity(riakUrl, bson.D{{ "$set", bson.D{{"extrainfo", map[string][]byte{ params.BzrDigestKey: []byte(":"), }}}, }}) c.Assert(err, gc.IsNil) // Retrieve the entities. mysql, err := s.store.FindEntity(mysqlUrl, nil) c.Assert(err, gc.IsNil) riak, err := s.store.FindEntity(riakUrl, nil) c.Assert(err, gc.IsNil) tests := []struct { about string query string expect map[string]*charmrepo.EventResponse }{{ about: "valid digest", query: "?charms=cs:trusty/mysql", expect: map[string]*charmrepo.EventResponse{ "cs:trusty/mysql": { Kind: "published", Revision: mysql.Revision, Time: mysql.UploadTime.UTC().Format(time.RFC3339), Digest: "who@canonical.com-bzr-digest", }, }, }, { about: "invalid digest", query: "?charms=cs:utopic/riak", expect: map[string]*charmrepo.EventResponse{ "cs:utopic/riak": { Kind: "published", Revision: riak.Revision, Time: riak.UploadTime.UTC().Format(time.RFC3339), Errors: []string{"cannot unmarshal digest: invalid character ':' looking for beginning of value"}, }, }, }, { about: "partial charm URL", query: "?charms=cs:mysql", expect: map[string]*charmrepo.EventResponse{ "cs:mysql": { Kind: "published", Revision: mysql.Revision, Time: mysql.UploadTime.UTC().Format(time.RFC3339), Digest: "who@canonical.com-bzr-digest", }, }, }, { about: "digest in request", query: "?charms=cs:trusty/mysql@my-digest", expect: map[string]*charmrepo.EventResponse{ "cs:trusty/mysql": { Kind: "published", Revision: mysql.Revision, Time: mysql.UploadTime.UTC().Format(time.RFC3339), Digest: "who@canonical.com-bzr-digest", }, }, }, { about: "multiple charms", query: "?charms=cs:mysql&charms=utopic/riak", expect: map[string]*charmrepo.EventResponse{ "cs:mysql": { Kind: "published", Revision: mysql.Revision, Time: mysql.UploadTime.UTC().Format(time.RFC3339), Digest: "who@canonical.com-bzr-digest", }, "utopic/riak": { Kind: "published", Revision: riak.Revision, Time: riak.UploadTime.UTC().Format(time.RFC3339), Errors: []string{"cannot unmarshal digest: invalid character ':' looking for beginning of value"}, }, }, }} for i, test := range tests { c.Logf("test %d: %s", i, test.about) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: "/charm-event" + test.query, ExpectStatus: http.StatusOK, ExpectBody: test.expect, }) } } func (s *APISuite) TestServeCharmEventDigestNotFound(c *gc.C) { // Add a charm without a Bazaar digest. url, _ := s.addPublicCharm(c, "wordpress", "cs:trusty/wordpress-42") // Pretend the entity has been uploaded right now, and assume the test does // not take more than two minutes to run. s.updateUploadTime(c, url, time.Now()) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: "/charm-event?charms=cs:trusty/wordpress", ExpectStatus: http.StatusOK, ExpectBody: map[string]charmrepo.EventResponse{ "cs:trusty/wordpress": { Errors: []string{"entry not found"}, }, }, }) // Now change the entity upload time to be more than 2 minutes ago. s.updateUploadTime(c, url, time.Now().Add(-121*time.Second)) httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: "/charm-event?charms=cs:trusty/wordpress", ExpectStatus: http.StatusOK, ExpectBody: map[string]charmrepo.EventResponse{ "cs:trusty/wordpress": { Errors: []string{"digest not found: this can be due to an error while ingesting the entity"}, }, }, }) } func (s *APISuite) TestServeCharmEventLastRevision(c *gc.C) { // Add two revisions of the same charm. url1, _ := s.addPublicCharm(c, "wordpress", "cs:trusty/wordpress-1") url2, _ := s.addPublicCharm(c, "wordpress", "cs:trusty/wordpress-2") // Update the resulting entities with Bazaar digests. s.addExtraInfoDigest(c, url1, "digest-1") s.addExtraInfoDigest(c, url2, "digest-2") // Retrieve the most recent revision of the entity. entity, err := s.store.FindEntity(url2, nil) c.Assert(err, gc.IsNil) // Ensure the last revision is correctly returned. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, URL: "/charm-event?charms=wordpress", ExpectStatus: http.StatusOK, ExpectBody: map[string]*charmrepo.EventResponse{ "wordpress": { Kind: "published", Revision: 2, Time: entity.UploadTime.UTC().Format(time.RFC3339), Digest: "digest-2", }, }, }) } func (s *APISuite) addExtraInfoDigest(c *gc.C, id *router.ResolvedURL, digest string) { b, err := json.Marshal(digest) c.Assert(err, gc.IsNil) err = s.store.UpdateEntity(id, bson.D{{ "$set", bson.D{{"extrainfo", map[string][]byte{ params.BzrDigestKey: b, }}}, }}) c.Assert(err, gc.IsNil) } func (s *APISuite) updateUploadTime(c *gc.C, id *router.ResolvedURL, uploadTime time.Time) { err := s.store.UpdateEntity(id, bson.D{{ "$set", bson.D{{"uploadtime", uploadTime}}, }}) c.Assert(err, gc.IsNil) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/legacy/api.go0000664000175000017500000002436112672604603025562 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. // The legacy package implements the legacy API, as follows: // // /charm-info // // A GET call to `/charm-info` returns info about one or more charms, including // its canonical URL, revision, SHA256 checksum and VCS revision digest. // The returned info is in JSON format. // For instance a request to `/charm-info?charms=cs:trusty/juju-gui` returns the // following response: // // {"cs:trusty/juju-gui": { // "canonical-url": "cs:trusty/juju-gui", // "revision": 3, // "sha256": "a15c77f3f92a0fb7b61e9...", // "digest": jeff.pihach@canonical.com-20140612210347-6cc9su1jqjkhbi84" // }} // // /charm-event: // // A GET call to `/charm-event` returns info about an event occurred in the life // of the specified charm(s). Currently two types of events are logged: // "published" (a charm has been published and it's available in the store) and // "publish-error" (an error occurred while importing the charm). // E.g. a call to `/charm-event?charms=cs:trusty/juju-gui` generates the following // JSON response: // // {"cs:trusty/juju-gui": { // "kind": "published", // "revision": 3, // "digest": "jeff.pihach@canonicalcom-20140612210347-6cc9su1jqjkhbi84", // "time": "2014-06-16T14:41:19Z" // }} // // /charm/ // // The `charm` API provides the ability to download a charm as a Zip archive, // given the charm identifier. For instance, it is possible to download the Juju // GUI charm by performing a GET call to `/charm/trusty/juju-gui-42`. Both the // revision and OS series can be omitted, e.g. `/charm/juju-gui` will download the // last revision of the Juju GUI charm with support to the more recent Ubuntu LTS // series. // // /stats/counter/ // // Stats can be retrieved by calling `/stats/counter/{key}` where key is a query // that specifies the counter stats to calculate and return. // // For instance, a call to `/stats/counter/charm-bundle:*` returns the number of // times a charm has been downloaded from the store. To get the same value for // a specific charm, it is possible to filter the results by passing the charm // series and name, e.g. `/stats/counter/charm-bundle:trusty:juju-gui`. // // The results can be grouped by specifying the `by` query (possible values are // `day` and `week`), and time delimited using the `start` and `stop` queries. // // It is also possible to list the results by passing `list=1`. For example, a GET // call to `/stats/counter/charm-bundle:trusty:*?by=day&list=1` returns an // aggregated count of trusty charms downloads, grouped by charm and day, similar // to the following: // // charm-bundle:trusty:juju-gui 2014-06-17 5 // charm-bundle:trusty:mysql 2014-06-17 1 package legacy // import "gopkg.in/juju/charmstore.v5-unstable/internal/legacy" import ( "encoding/json" "fmt" "net/http" "strings" "time" "github.com/juju/mempool" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" "gopkg.in/juju/charmstore.v5-unstable/internal/router" "gopkg.in/juju/charmstore.v5-unstable/internal/v4" ) type Handler struct { v4 v4.Handler } type reqHandler struct { v4 v4.ReqHandler mux *http.ServeMux store *charmstore.Store } // reqHandlerPool holds a cache of ReqHandlers to save // on allocation time. var reqHandlerPool = mempool.Pool{ New: func() interface{} { return newReqHandler() }, } func NewAPIHandler(pool *charmstore.Pool, config charmstore.ServerParams, rootPath string) charmstore.HTTPCloseHandler { return &Handler{ v4: v4.New(pool, config, rootPath), } } func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { req.ParseForm() rh, err := h.newReqHandler() if err != nil { router.WriteError(w, err) return } defer rh.close() rh.mux.ServeHTTP(w, req) } func (h *Handler) Close() { } func (h *Handler) newReqHandler() (*reqHandler, error) { v4h, err := h.v4.NewReqHandler(new(http.Request)) if err != nil { return nil, errgo.Mask(err, errgo.Is(charmstore.ErrTooManySessions)) } rh := reqHandlerPool.Get().(*reqHandler) rh.v4 = v4h rh.store = v4h.Store.Store return rh, nil } // newReqHandler returns a new instance of the legacy API handler. // The returned value has a nil v4 field. func newReqHandler() *reqHandler { h := &reqHandler{ mux: http.NewServeMux(), } h.handle("/charm-info", router.HandleJSON(h.serveCharmInfo)) h.handle("/charm/", router.HandleErrors(h.serveCharm)) h.handle("/charm-event", router.HandleJSON(h.serveCharmEvent)) return h } func (h *reqHandler) handle(path string, handler http.Handler) { prefix := strings.TrimSuffix(path, "/") h.mux.Handle(path, http.StripPrefix(prefix, handler)) } func (h *reqHandler) close() { h.v4.Close() h.v4 = v4.ReqHandler{} reqHandlerPool.Put(h) } func (h *reqHandler) serveCharm(w http.ResponseWriter, req *http.Request) error { if req.Method != "GET" && req.Method != "HEAD" { return params.ErrMethodNotAllowed } curl, err := charm.ParseURL(strings.TrimPrefix(req.URL.Path, "/")) if err != nil { return errgo.WithCausef(err, params.ErrNotFound, "") } return h.v4.Router.Handlers().Id["archive"](curl, w, req) } // charmStatsKey returns a stats key for the given charm reference and kind. func charmStatsKey(url *charm.URL, kind string) []string { if url.User == "" { return []string{kind, url.Series, url.Name} } return []string{kind, url.Series, url.Name, url.User} } var errNotFound = fmt.Errorf("entry not found") func (h *reqHandler) serveCharmInfo(_ http.Header, req *http.Request) (interface{}, error) { response := make(map[string]*charmrepo.InfoResponse) for _, url := range req.Form["charms"] { c := &charmrepo.InfoResponse{} response[url] = c curl, err := charm.ParseURL(url) if err != nil { err = errNotFound } var entity *mongodoc.Entity if err == nil { entity, err = h.store.FindBestEntity(curl, params.UnpublishedChannel, nil) if errgo.Cause(err) == params.ErrNotFound { // The old API actually returned "entry not found" // on *any* error, but it seems reasonable to be // a little more descriptive for other errors. err = errNotFound } } var rurl *router.ResolvedURL if err == nil { rurl = charmstore.EntityResolvedURL(entity) if h.v4.AuthorizeEntity(rurl, req) != nil { // The charm is unauthorized and there's no way to // authorize it as part of the legacy API so we // just treat it as a not-found error. err = errNotFound } } // Prepare the response part for this charm. if err == nil { curl = entity.PreferredURL(curl.User == "") c.CanonicalURL = curl.String() c.Revision = curl.Revision c.Sha256 = entity.BlobHash256 c.Digest, err = entityBzrDigest(entity) if err != nil { c.Errors = append(c.Errors, err.Error()) } if v4.StatsEnabled(req) { h.store.IncCounterAsync(charmStatsKey(curl, params.StatsCharmInfo)) } } else { c.Errors = append(c.Errors, err.Error()) if curl != nil && v4.StatsEnabled(req) { h.store.IncCounterAsync(charmStatsKey(curl, params.StatsCharmMissing)) } } } return response, nil } // serveCharmEvent returns events related to the charms specified in the // "charms" query. In this implementation, the only supported event is // "published", required by the "juju publish" command. func (h *reqHandler) serveCharmEvent(_ http.Header, req *http.Request) (interface{}, error) { response := make(map[string]*charmrepo.EventResponse) for _, url := range req.Form["charms"] { c := &charmrepo.EventResponse{} // Ignore the digest part of the request. if i := strings.Index(url, "@"); i != -1 { url = url[:i] } // We intentionally do not implement the long_keys query parameter that // the legacy charm store supported, as "juju publish" does not use it. response[url] = c // Validate the charm URL. id, err := charm.ParseURL(url) if err != nil { c.Errors = []string{"invalid charm URL: " + err.Error()} continue } if id.Revision != -1 { c.Errors = []string{"got charm URL with revision: " + id.String()} continue } // Retrieve the charm. entity, err := h.store.FindBestEntity(id, params.UnpublishedChannel, charmstore.FieldSelector("_id", "uploadtime", "extrainfo")) if err != nil { if errgo.Cause(err) == params.ErrNotFound { // The old API actually returned "entry not found" // on *any* error, but it seems reasonable to be // a little more descriptive for other errors. err = errNotFound } c.Errors = []string{err.Error()} continue } // Retrieve the entity Bazaar digest. c.Digest, err = entityBzrDigest(entity) if err != nil { c.Errors = []string{err.Error()} } else if c.Digest == "" { // There are two possible reasons why an entity is found without a // digest: // 1) the entity has been recently added in the ingestion process, // but the extra-info has not been sent yet by "charmload"; // 2) there was an error while ingesting the entity. // If the entity has been recently published, we assume case 1), // and therefore we return a not found error, forcing // "juju publish" to keep retrying and possibly succeed later. // Otherwise, we return an error so that "juju publish" exits with // an error and avoids an infinite loop. if time.Since(entity.UploadTime).Minutes() < 2 { c.Errors = []string{errNotFound.Error()} } else { c.Errors = []string{"digest not found: this can be due to an error while ingesting the entity"} } continue } // Prepare the response part for this charm. c.Kind = "published" if id.User == "" { c.Revision = entity.PromulgatedRevision } else { c.Revision = entity.Revision } c.Time = entity.UploadTime.UTC().Format(time.RFC3339) if v4.StatsEnabled(req) { h.store.IncCounterAsync(charmStatsKey(id, params.StatsCharmEvent)) } } return response, nil } func entityBzrDigest(entity *mongodoc.Entity) (string, error) { value, found := entity.ExtraInfo[params.BzrDigestKey] if !found { return "", nil } var digest string if err := json.Unmarshal(value, &digest); err != nil { return "", errgo.Notef(err, "cannot unmarshal digest") } return digest, nil } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/legacy/package_test.go0000664000175000017500000000047212672604603027440 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package legacy_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/legacy" import ( "testing" jujutesting "github.com/juju/testing" ) func TestPackage(t *testing.T) { jujutesting.MgoTestPackage(t, nil) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/entitycache/0000775000175000017500000000000012672604603025510 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/entitycache/cache_test.go0000664000175000017500000010061012672604603030137 0ustar marcomarcopackage entitycache_test import ( "fmt" "reflect" "strings" "sync" "time" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/charmstore.v5-unstable/internal/entitycache" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" ) var _ = gc.Suite(&suite{}) type suite struct{} type entityQuery struct { url *charm.URL fields map[string]int reply chan entityReply } type entityReply struct { entity *mongodoc.Entity err error } type baseEntityQuery struct { url *charm.URL fields map[string]int reply chan baseEntityReply } type baseEntityReply struct { entity *mongodoc.BaseEntity err error } func (*suite) TestEntityIssuesBaseEntityQueryConcurrently(c *gc.C) { store := newChanStore() cache := entitycache.New(store) defer cache.Close() cache.AddBaseEntityFields(map[string]int{"name": 1}) entity := &mongodoc.Entity{ URL: charm.MustParseURL("~bob/wordpress-1"), BaseURL: charm.MustParseURL("~bob/wordpress"), BlobName: "w1", Size: 99, } baseEntity := &mongodoc.BaseEntity{ URL: charm.MustParseURL("~bob/wordpress"), Name: "wordpress", } queryDone := make(chan struct{}) go func() { defer close(queryDone) e, err := cache.Entity(charm.MustParseURL("~bob/wordpress-1"), map[string]int{"blobname": 1}) c.Check(err, gc.IsNil) c.Check(e, jc.DeepEquals, selectEntityFields(entity, entityFields("blobname"))) }() // Acquire both the queries before replying so that we know they've been // issued concurrently. query1 := <-store.entityqc c.Assert(query1.url, jc.DeepEquals, charm.MustParseURL("~bob/wordpress-1")) c.Assert(query1.fields, jc.DeepEquals, entityFields("blobname")) query2 := <-store.baseEntityqc c.Assert(query2.url, jc.DeepEquals, charm.MustParseURL("~bob/wordpress")) c.Assert(query2.fields, jc.DeepEquals, baseEntityFields("name")) query1.reply <- entityReply{ entity: entity, } query2.reply <- baseEntityReply{ entity: baseEntity, } <-queryDone // Accessing the same entity again and the base entity should // not call any method on the store - if it does, then it'll send // on the query channels and we won't receive it, so the test // will deadlock. e, err := cache.Entity(charm.MustParseURL("~bob/wordpress-1"), map[string]int{"baseurl": 1, "blobname": 1}) c.Check(err, gc.IsNil) c.Check(e, jc.DeepEquals, selectEntityFields(entity, entityFields("blobname"))) be, err := cache.BaseEntity(charm.MustParseURL("~bob/wordpress"), map[string]int{"name": 1}) c.Check(err, gc.IsNil) c.Check(be, jc.DeepEquals, selectBaseEntityFields(baseEntity, baseEntityFields("name"))) } func (*suite) TestEntityIssuesBaseEntityQuerySequentiallyForPromulgatedURL(c *gc.C) { store := newChanStore() cache := entitycache.New(store) defer cache.Close() cache.AddBaseEntityFields(map[string]int{"name": 1}) entity := &mongodoc.Entity{ URL: charm.MustParseURL("~bob/wordpress-1"), PromulgatedURL: charm.MustParseURL("wordpress-5"), BaseURL: charm.MustParseURL("~bob/wordpress"), BlobName: "w1", Size: 1, } baseEntity := &mongodoc.BaseEntity{ URL: charm.MustParseURL("~bob/wordpress"), Name: "wordpress", } queryDone := make(chan struct{}) go func() { defer close(queryDone) e, err := cache.Entity(charm.MustParseURL("wordpress-1"), map[string]int{"blobname": 1}) c.Check(err, gc.IsNil) c.Check(e, jc.DeepEquals, selectEntityFields(entity, entityFields("blobname"))) }() // Acquire both the queries before replying so that we know they've been // issued concurrently. query1 := <-store.entityqc c.Assert(query1.url, jc.DeepEquals, charm.MustParseURL("wordpress-1")) c.Assert(query1.fields, jc.DeepEquals, entityFields("blobname")) query1.reply <- entityReply{ entity: entity, } <-queryDone // The base entity query is only issued when the original entity // is received. We can tell this because the URL in the query // contains the ~bob user which can't be inferred from the // original URL. query2 := <-store.baseEntityqc c.Assert(query2.url, jc.DeepEquals, charm.MustParseURL("~bob/wordpress")) c.Assert(query2.fields, jc.DeepEquals, baseEntityFields("name")) query2.reply <- baseEntityReply{ entity: baseEntity, } // Accessing the same entity again and the base entity should // not call any method on the store - if it does, then it'll send // on the query channels and we won't receive it, so the test // will deadlock. e, err := cache.Entity(charm.MustParseURL("wordpress-1"), map[string]int{"baseurl": 1, "blobname": 1}) c.Check(err, gc.IsNil) c.Check(e, jc.DeepEquals, selectEntityFields(entity, entityFields("blobname"))) be, err := cache.BaseEntity(charm.MustParseURL("~bob/wordpress"), map[string]int{"name": 1}) c.Check(err, gc.IsNil) c.Check(be, jc.DeepEquals, selectBaseEntityFields(baseEntity, baseEntityFields("name"))) } func (*suite) TestFetchWhenFieldsChangeBeforeQueryResult(c *gc.C) { store := newChanStore() cache := entitycache.New(store) defer cache.Close() cache.AddBaseEntityFields(map[string]int{"name": 1}) entity := &mongodoc.Entity{ URL: charm.MustParseURL("~bob/wordpress-1"), BaseURL: charm.MustParseURL("~bob/wordpress"), BlobName: "w1", } baseEntity := &mongodoc.BaseEntity{ URL: charm.MustParseURL("~bob/wordpress"), Name: "wordpress", } store.findBaseEntity = func(url *charm.URL, fields map[string]int) (*mongodoc.BaseEntity, error) { c.Check(url, jc.DeepEquals, baseEntity.URL) c.Check(fields, jc.DeepEquals, baseEntityFields("name")) return baseEntity, nil } queryDone := make(chan struct{}) go func() { defer close(queryDone) e, err := cache.Entity(charm.MustParseURL("~bob/wordpress-1"), nil) c.Check(err, gc.IsNil) c.Check(e, jc.DeepEquals, selectEntityFields(entity, entityFields())) }() query1 := <-store.entityqc c.Assert(query1.url, jc.DeepEquals, charm.MustParseURL("~bob/wordpress-1")) c.Assert(query1.fields, jc.DeepEquals, entityFields()) // Before we send the reply, make another query with different fields, // so the version changes. entity2 := &mongodoc.Entity{ URL: charm.MustParseURL("~bob/wordpress-1"), BaseURL: charm.MustParseURL("~bob/wordpress"), BlobName: "w1", Size: 99, } query2Done := make(chan struct{}) go func() { defer close(query2Done) // Note the extra "size" field. e, err := cache.Entity(charm.MustParseURL("~bob/wordpress-1"), map[string]int{"size": 1}) c.Check(err, gc.IsNil) c.Check(e, jc.DeepEquals, selectEntityFields(entity2, entityFields("size"))) }() // The second query should be sent immediately without waiting // for the first because it invalidates the cache.. query2 := <-store.entityqc c.Assert(query2.url, jc.DeepEquals, charm.MustParseURL("~bob/wordpress-1")) c.Assert(query2.fields, jc.DeepEquals, entityFields("size")) query2.reply <- entityReply{ entity: entity2, } <-query2Done // Reply to the first query and make sure that it completed. query1.reply <- entityReply{ entity: entity, } <-queryDone // Accessing the same entity again not call any method on the store, so close the query channels // to ensure it doesn't. close(store.entityqc) close(store.baseEntityqc) e, err := cache.Entity(charm.MustParseURL("~bob/wordpress-1"), nil) c.Check(err, gc.IsNil) c.Check(e, jc.DeepEquals, selectEntityFields(entity2, entityFields("size"))) } func (*suite) TestSecondFetchesWaitForFirst(c *gc.C) { store := newChanStore() cache := entitycache.New(store) defer cache.Close() cache.AddBaseEntityFields(map[string]int{"name": 1}) entity := &mongodoc.Entity{ URL: charm.MustParseURL("~bob/wordpress-1"), BaseURL: charm.MustParseURL("~bob/wordpress"), BlobName: "w1", } baseEntity := &mongodoc.BaseEntity{ URL: charm.MustParseURL("~bob/wordpress"), Name: "wordpress", } store.findBaseEntity = func(url *charm.URL, fields map[string]int) (*mongodoc.BaseEntity, error) { c.Check(url, jc.DeepEquals, baseEntity.URL) c.Check(fields, jc.DeepEquals, baseEntityFields("name")) return baseEntity, nil } var initialRequestGroup sync.WaitGroup initialRequestGroup.Add(1) go func() { defer initialRequestGroup.Done() e, err := cache.Entity(charm.MustParseURL("~bob/wordpress-1"), nil) c.Check(err, gc.IsNil) c.Check(e, jc.DeepEquals, selectEntityFields(entity, entityFields())) }() query1 := <-store.entityqc c.Assert(query1.url, jc.DeepEquals, charm.MustParseURL("~bob/wordpress-1")) c.Assert(query1.fields, jc.DeepEquals, entityFields()) // Send some more queries for the same charm. These should not send a // store request but instead wait for the first one. for i := 0; i < 5; i++ { initialRequestGroup.Add(1) go func() { defer initialRequestGroup.Done() e, err := cache.Entity(charm.MustParseURL("~bob/wordpress-1"), nil) c.Check(err, gc.IsNil) c.Check(e, jc.DeepEquals, selectEntityFields(entity, entityFields())) }() } select { case q := <-store.entityqc: c.Fatalf("unexpected store query %#v", q) case <-time.After(10 * time.Millisecond): } entity2 := &mongodoc.Entity{ URL: charm.MustParseURL("~bob/wordpress-2"), BaseURL: charm.MustParseURL("~bob/wordpress"), BlobName: "w2", } // Send another query for a different charm. This will cause the // waiting goroutines to be woken up but go back to sleep again // because their entry isn't yet available. otherRequestDone := make(chan struct{}) go func() { defer close(otherRequestDone) e, err := cache.Entity(charm.MustParseURL("~bob/wordpress-2"), nil) c.Check(err, gc.IsNil) c.Check(e, jc.DeepEquals, selectEntityFields(entity2, entityFields())) }() query2 := <-store.entityqc c.Assert(query2.url, jc.DeepEquals, charm.MustParseURL("~bob/wordpress-2")) c.Assert(query2.fields, jc.DeepEquals, entityFields()) query2.reply <- entityReply{ entity: entity2, } // Now reply to the initial store request, which should make // everything complete. query1.reply <- entityReply{ entity: entity, } initialRequestGroup.Wait() } func (*suite) TestGetEntityNotFound(c *gc.C) { entityFetchCount := 0 baseEntityFetchCount := 0 store := &callbackStore{ findBestEntity: func(url *charm.URL, fields map[string]int) (*mongodoc.Entity, error) { entityFetchCount++ return nil, errgo.NoteMask(params.ErrNotFound, "entity", errgo.Any) }, findBaseEntity: func(url *charm.URL, fields map[string]int) (*mongodoc.BaseEntity, error) { baseEntityFetchCount++ return nil, errgo.NoteMask(params.ErrNotFound, "base entity", errgo.Any) }, } cache := entitycache.New(store) defer cache.Close() e, err := cache.Entity(charm.MustParseURL("~bob/wordpress-1"), nil) c.Assert(e, gc.IsNil) c.Assert(err, gc.ErrorMatches, "entity: not found") c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) // Make sure that the not-found result has been cached. e, err = cache.Entity(charm.MustParseURL("~bob/wordpress-1"), nil) c.Assert(e, gc.IsNil) c.Assert(err, gc.ErrorMatches, "entity: not found") c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) c.Assert(entityFetchCount, gc.Equals, 1) // Make sure fetching the base entity works the same way. be, err := cache.BaseEntity(charm.MustParseURL("~bob/wordpress"), nil) c.Assert(be, gc.IsNil) c.Assert(err, gc.ErrorMatches, "base entity: not found") c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) be, err = cache.BaseEntity(charm.MustParseURL("~bob/wordpress"), nil) c.Assert(be, gc.IsNil) c.Assert(err, gc.ErrorMatches, "base entity: not found") c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) c.Assert(baseEntityFetchCount, gc.Equals, 1) } func (*suite) TestFetchError(c *gc.C) { entityFetchCount := 0 baseEntityFetchCount := 0 store := &callbackStore{ findBestEntity: func(url *charm.URL, fields map[string]int) (*mongodoc.Entity, error) { entityFetchCount++ return nil, errgo.New("entity error") }, findBaseEntity: func(url *charm.URL, fields map[string]int) (*mongodoc.BaseEntity, error) { baseEntityFetchCount++ return nil, errgo.New("base entity error") }, } cache := entitycache.New(store) defer cache.Close() // Check that we get the entity fetch error from cache.Entity. e, err := cache.Entity(charm.MustParseURL("~bob/wordpress-1"), nil) c.Assert(e, gc.IsNil) c.Assert(err, gc.ErrorMatches, `cannot fetch "cs:~bob/wordpress-1": entity error`) // Check that the error is cached. e, err = cache.Entity(charm.MustParseURL("~bob/wordpress-1"), nil) c.Assert(e, gc.IsNil) c.Assert(err, gc.ErrorMatches, `cannot fetch "cs:~bob/wordpress-1": entity error`) c.Assert(entityFetchCount, gc.Equals, 1) // Check that we get the base-entity fetch error from cache.BaseEntity. be, err := cache.BaseEntity(charm.MustParseURL("~bob/wordpress"), nil) c.Assert(be, gc.IsNil) c.Assert(err, gc.ErrorMatches, `cannot fetch "cs:~bob/wordpress": base entity error`) // Check that the error is cached. be, err = cache.BaseEntity(charm.MustParseURL("~bob/wordpress"), nil) c.Assert(be, gc.IsNil) c.Assert(err, gc.ErrorMatches, `cannot fetch "cs:~bob/wordpress": base entity error`) c.Assert(baseEntityFetchCount, gc.Equals, 1) } func (*suite) TestStartFetch(c *gc.C) { store := newChanStore() cache := entitycache.New(store) url := charm.MustParseURL("~bob/wordpress-1") baseURL := charm.MustParseURL("~bob/wordpress") cache.StartFetch([]*charm.URL{url}) entity := &mongodoc.Entity{ URL: url, BaseURL: baseURL, BlobName: "foo", } baseEntity := &mongodoc.BaseEntity{ URL: baseURL, } // Both queries should be issued concurrently. query1 := <-store.entityqc c.Assert(query1.url, jc.DeepEquals, charm.MustParseURL("~bob/wordpress-1")) query2 := <-store.baseEntityqc c.Assert(query2.url, jc.DeepEquals, charm.MustParseURL("~bob/wordpress")) entityQueryDone := make(chan struct{}) go func() { defer close(entityQueryDone) e, err := cache.Entity(url, nil) c.Check(err, gc.IsNil) c.Check(e, jc.DeepEquals, selectEntityFields(entity, entityFields())) }() baseEntityQueryDone := make(chan struct{}) go func() { defer close(baseEntityQueryDone) e, err := cache.BaseEntity(baseURL, nil) c.Check(err, gc.IsNil) c.Check(e, jc.DeepEquals, baseEntity) }() // Reply to the entity query. // This should cause the extra entity query to complete. query1.reply <- entityReply{ entity: entity, } <-entityQueryDone // Reply to the base entity query. // This should cause the extra base entity query to complete. query2.reply <- baseEntityReply{ entity: &mongodoc.BaseEntity{ URL: baseURL, }, } <-baseEntityQueryDone } func (*suite) TestAddEntityFields(c *gc.C) { store := newChanStore() baseEntity := &mongodoc.BaseEntity{ URL: charm.MustParseURL("cs:~bob/wordpress"), } entity := &mongodoc.Entity{ URL: charm.MustParseURL("cs:~bob/wordpress-1"), BlobName: "foo", Size: 999, BlobHash: "ffff", } baseEntityCount := 0 store.findBaseEntity = func(url *charm.URL, fields map[string]int) (*mongodoc.BaseEntity, error) { baseEntityCount++ if url.String() != "cs:~bob/wordpress" { return nil, params.ErrNotFound } return baseEntity, nil } cache := entitycache.New(store) cache.AddEntityFields(map[string]int{"blobname": 1, "size": 1}) queryDone := make(chan struct{}) go func() { defer close(queryDone) e, err := cache.Entity(charm.MustParseURL("cs:~bob/wordpress-1"), map[string]int{"blobname": 1}) c.Check(err, gc.IsNil) c.Check(e, jc.DeepEquals, selectEntityFields(entity, entityFields("blobname", "size"))) // Adding existing entity fields should have no effect. cache.AddEntityFields(map[string]int{"blobname": 1, "size": 1}) e, err = cache.Entity(charm.MustParseURL("cs:~bob/wordpress-1"), map[string]int{"size": 1}) c.Check(err, gc.IsNil) c.Check(e, jc.DeepEquals, selectEntityFields(entity, entityFields("blobname", "size"))) // Adding a new field should will cause the cache to be invalidated // and a new fetch to take place. cache.AddEntityFields(map[string]int{"blobhash": 1}) e, err = cache.Entity(charm.MustParseURL("cs:~bob/wordpress-1"), nil) c.Check(err, gc.IsNil) c.Check(e, jc.DeepEquals, selectEntityFields(entity, entityFields("blobname", "size", "blobhash"))) }() query1 := <-store.entityqc c.Assert(query1.url, jc.DeepEquals, charm.MustParseURL("~bob/wordpress-1")) c.Assert(query1.fields, jc.DeepEquals, entityFields("blobname", "size")) query1.reply <- entityReply{ entity: entity, } // When the entity fields are added, we expect another query // because that invalidates the cache. query2 := <-store.entityqc c.Assert(query2.url, jc.DeepEquals, charm.MustParseURL("~bob/wordpress-1")) c.Assert(query2.fields, jc.DeepEquals, entityFields("blobhash", "blobname", "size")) query2.reply <- entityReply{ entity: entity, } <-queryDone } func (*suite) TestLookupByDifferentKey(c *gc.C) { entityFetchCount := 0 entity := &mongodoc.Entity{ URL: charm.MustParseURL("~bob/wordpress-1"), BaseURL: charm.MustParseURL("~bob/wordpress"), BlobName: "w1", } baseEntity := &mongodoc.BaseEntity{ URL: charm.MustParseURL("~bob/wordpress"), Name: "wordpress", } store := &callbackStore{ findBestEntity: func(url *charm.URL, fields map[string]int) (*mongodoc.Entity, error) { entityFetchCount++ return entity, nil }, findBaseEntity: func(url *charm.URL, fields map[string]int) (*mongodoc.BaseEntity, error) { if url.String() != "cs:~bob/wordpress" { return nil, params.ErrNotFound } return baseEntity, nil }, } cache := entitycache.New(store) defer cache.Close() e, err := cache.Entity(charm.MustParseURL("~bob/wordpress-1"), nil) c.Assert(err, gc.IsNil) c.Check(e, jc.DeepEquals, selectEntityFields(entity, entityFields())) oldEntity := e // The second fetch will trigger another query because // we can't tell whether it's the same entity or not, // but it should return the cached entity anyway. entity = &mongodoc.Entity{ URL: charm.MustParseURL("~bob/wordpress-1"), BaseURL: charm.MustParseURL("~bob/wordpress"), BlobName: "w2", } e, err = cache.Entity(charm.MustParseURL("~bob/wordpress"), nil) c.Assert(err, gc.IsNil) c.Logf("got %p; old entity %p; new entity %p", e, oldEntity, entity) c.Assert(e, gc.Equals, oldEntity) c.Assert(entityFetchCount, gc.Equals, 2) } func (s *suite) TestIterSingle(c *gc.C) { store := newChanStore() store.findBestEntity = func(url *charm.URL, fields map[string]int) (*mongodoc.Entity, error) { c.Errorf("store query made unexpectedly") return nil, errgo.New("no queries expected during iteration") } cache := entitycache.New(store) defer cache.Close() fakeIter := newFakeIter() iter := cache.CustomIter(fakeIter, map[string]int{"size": 1, "blobsize": 1}) nextDone := make(chan struct{}) go func() { defer close(nextDone) ok := iter.Next() c.Assert(ok, gc.Equals, true) }() replyc := <-fakeIter.req entity := &mongodoc.Entity{ URL: charm.MustParseURL("~bob/wordpress-1"), BaseURL: charm.MustParseURL("~bob/wordpress"), BlobName: "w1", } replyc <- iterReply{ entity: entity, } // The iterator should batch up entities so make sure that // it does not return the entry immediately. select { case <-nextDone: c.Fatalf("Next returned early - no batching?") case <-time.After(10 * time.Millisecond): } // Get the next iterator query and reply to signal that // the iterator has completed. replyc = <-fakeIter.req replyc <- iterReply{ err: errIterFinished, } // The base entity should be requested asynchronously now. baseQuery := <-store.baseEntityqc // ... but the initial reply shouldn't be held up by that. <-nextDone // Check that the entity is the one we expect. cachedEntity := iter.Entity() c.Assert(cachedEntity, jc.DeepEquals, selectEntityFields(entity, entityFields("size", "blobsize"))) // Check that the entity can now be fetched from the cache. e, err := cache.Entity(charm.MustParseURL("~bob/wordpress-1"), nil) c.Assert(err, gc.IsNil) c.Assert(e, gc.Equals, cachedEntity) // A request for the base entity should now block // until the initial base entity request has been satisfied. baseEntity := &mongodoc.BaseEntity{ URL: charm.MustParseURL("~bob/wordpress"), Name: "wordpress", } queryDone := make(chan struct{}) go func() { defer close(queryDone) e, err := cache.BaseEntity(charm.MustParseURL("~bob/wordpress"), nil) c.Check(err, gc.IsNil) c.Check(e, jc.DeepEquals, selectBaseEntityFields(baseEntity, baseEntityFields())) }() // Check that no additional base entity query is made. select { case <-queryDone: c.Fatalf("Next returned early - no batching?") case <-time.After(10 * time.Millisecond): } // Reply to the base entity query ... baseQuery.reply <- baseEntityReply{ entity: baseEntity, } // ... which should result in the one we just made // being satisfied too. <-queryDone } func (*suite) TestIterWithEntryAlreadyInCache(c *gc.C) { store := &staticStore{ entities: []*mongodoc.Entity{{ URL: charm.MustParseURL("~bob/wordpress-1"), BaseURL: charm.MustParseURL("~bob/wordpress"), BlobName: "w1", }, { URL: charm.MustParseURL("~bob/wordpress-2"), BaseURL: charm.MustParseURL("~bob/wordpress"), BlobName: "w2", }, { URL: charm.MustParseURL("~alice/mysql-1"), BaseURL: charm.MustParseURL("~alice/mysql"), BlobName: "a1", }}, baseEntities: []*mongodoc.BaseEntity{{ URL: charm.MustParseURL("~bob/wordpress"), }, { URL: charm.MustParseURL("~alice/mysql"), }}, } cache := entitycache.New(store) defer cache.Close() e, err := cache.Entity(charm.MustParseURL("~bob/wordpress-1"), map[string]int{"size": 1, "blobsize": 1}) c.Assert(err, gc.IsNil) c.Check(e, jc.DeepEquals, selectEntityFields(store.entities[0], entityFields("size", "blobsize"))) cachedEntity := e be, err := cache.BaseEntity(charm.MustParseURL("~bob/wordpress"), nil) c.Assert(err, gc.IsNil) c.Check(be, jc.DeepEquals, selectBaseEntityFields(store.baseEntities[0], baseEntityFields())) cachedBaseEntity := be iterEntity := &mongodoc.Entity{ URL: charm.MustParseURL("~bob/wordpress-1"), BaseURL: charm.MustParseURL("~bob/wordpress"), BlobName: "w2", } fakeIter := newFakeIter() iter := cache.CustomIter(fakeIter, map[string]int{"size": 1, "blobsize": 1}) iterDone := make(chan struct{}) go func() { defer close(iterDone) ok := iter.Next() c.Check(ok, gc.Equals, true) // Even though the entity is in the cache, we still // receive the entity returned from the iterator. // We can't actually tell this though. c.Check(iter.Entity(), jc.DeepEquals, selectEntityFields(iterEntity, entityFields("size", "blobsize"))) ok = iter.Next() c.Check(ok, gc.Equals, false) }() // Provide the iterator request with an entity that's already // in the cache. replyc := <-fakeIter.req replyc <- iterReply{ entity: iterEntity, } replyc = <-fakeIter.req replyc <- iterReply{ err: errIterFinished, } <-iterDone // The original cached entities should still be there. e, err = cache.Entity(charm.MustParseURL("~bob/wordpress-1"), nil) c.Assert(err, gc.IsNil) c.Assert(e, gc.Equals, cachedEntity) be, err = cache.BaseEntity(charm.MustParseURL("~bob/wordpress"), nil) c.Assert(err, gc.IsNil) c.Assert(be, gc.Equals, cachedBaseEntity) } func (*suite) TestIterCloseEarlyWhenBatchLimitExceeded(c *gc.C) { // The iterator gets closed when the batch limit has been // exceeded. entities := make([]*mongodoc.Entity, entitycache.BaseEntityThreshold) baseEntities := make([]*mongodoc.BaseEntity, entitycache.BaseEntityThreshold) for i := range entities { entities[i] = &mongodoc.Entity{ URL: &charm.URL{ Schema: "cs", Name: fmt.Sprintf("wordpress%d", i), User: "bob", Revision: i, }, BaseURL: &charm.URL{ Name: fmt.Sprintf("wordpress%d", i), User: "bob", }, BlobName: fmt.Sprintf("w%d", i), } baseEntities[i] = &mongodoc.BaseEntity{ URL: entities[i].BaseURL, } } store := &staticStore{ baseEntities: baseEntities, } cache := entitycache.New(store) fakeIter := &sliceIter{ entities: entities, } iter := cache.CustomIter(fakeIter, map[string]int{"blobname": 1}) iter.Close() c.Assert(iter.Next(), gc.Equals, false) } func (*suite) TestIterEntityBatchLimitExceeded(c *gc.C) { entities := make([]*mongodoc.Entity, entitycache.EntityThreshold) for i := range entities { entities[i] = &mongodoc.Entity{ URL: &charm.URL{ Schema: "cs", Name: "wordpress", User: "bob", Revision: i, }, BaseURL: charm.MustParseURL("~bob/wordpress"), BlobName: fmt.Sprintf("w%d", i), } } entities = append(entities, &mongodoc.Entity{ URL: charm.MustParseURL("~alice/mysql1-1"), BaseURL: charm.MustParseURL("~alice/mysql1"), }) store := newChanStore() cache := entitycache.New(store) fakeIter := &sliceIter{ entities: entities, } iter := cache.CustomIter(fakeIter, map[string]int{"blobname": 1}) // The iterator should fetch up to entityThreshold entities // from the underlying iterator before sending // the batched base-entity request, then it // will make all those entries available. query := <-store.baseEntityqc c.Assert(query.url, jc.DeepEquals, charm.MustParseURL("~bob/wordpress")) query.reply <- baseEntityReply{ entity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~bob/wordpress"), }, } for i := 0; i < entitycache.EntityThreshold; i++ { ok := iter.Next() c.Assert(ok, gc.Equals, true) c.Assert(iter.Entity(), jc.DeepEquals, entities[i]) } // When the iterator reaches its end, the // remaining entity and base entity are fetched. query = <-store.baseEntityqc c.Assert(query.url, jc.DeepEquals, charm.MustParseURL("~alice/mysql1")) query.reply <- baseEntityReply{ entity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~alice/mysql1"), }, } ok := iter.Next() c.Assert(ok, gc.Equals, true) c.Assert(iter.Entity(), jc.DeepEquals, entities[entitycache.EntityThreshold]) // Check that all the entities and base entities are in fact cached. for _, want := range entities { got, err := cache.Entity(want.URL, nil) c.Assert(err, gc.IsNil) c.Assert(got, jc.DeepEquals, want) gotBase, err := cache.BaseEntity(want.URL, nil) c.Assert(err, gc.IsNil) c.Assert(gotBase, jc.DeepEquals, &mongodoc.BaseEntity{ URL: want.BaseURL, }) } } func (*suite) TestIterError(c *gc.C) { cache := entitycache.New(&staticStore{}) fakeIter := newFakeIter() iter := cache.CustomIter(fakeIter, nil) // Err returns nil while the iteration is in progress. err := iter.Err() c.Assert(err, gc.IsNil) replyc := <-fakeIter.req replyc <- iterReply{ err: errgo.New("iterator error"), } ok := iter.Next() c.Assert(ok, gc.Equals, false) err = iter.Err() c.Assert(err, gc.ErrorMatches, "iterator error") } // iterReply holds a reply from a request from a fakeIter // for the next item. type iterReply struct { // entity holds the entity to be replied with. // Any fields not specified when creating the // iterator will be omitted from the result // sent to the entitycache code. entity *mongodoc.Entity // err holds any iteration error. When the iteration is complete, // errIterFinished should be sent. err error } // fakeIter provides a mock iterator implementation // that sends each request for an entity to // another goroutine for a result. type fakeIter struct { closed bool fields map[string]int err error // req holds a channel that is sent a value // whenever the Next method is called. req chan chan iterReply } func newFakeIter() *fakeIter { return &fakeIter{ req: make(chan chan iterReply, 1), } } func (i *fakeIter) Iter(fields map[string]int) entitycache.StoreIter { i.fields = fields return i } // Next implements mgoIter.Next. The // x parameter must be a *mongodoc.Entity. func (i *fakeIter) Next(x interface{}) bool { if i.closed { panic("Next called after Close") } if i.err != nil { return false } replyc := make(chan iterReply) i.req <- replyc reply := <-replyc i.err = reply.err if i.err == nil { *(x.(*mongodoc.Entity)) = *selectEntityFields(reply.entity, i.fields) } else if reply.entity != nil { panic("entity with non-nil error") } return i.err == nil } var errIterFinished = errgo.New("iteration finished") // Close implements mgoIter.Close. func (i *fakeIter) Close() error { i.closed = true if i.err == errIterFinished { return nil } return i.err } // Close implements mgoIter.Err. func (i *fakeIter) Err() error { if i.err == errIterFinished { return nil } return i.err } // sliceIter implements mgoIter over a slice of entities, // returning each one in turn. type sliceIter struct { fields map[string]int entities []*mongodoc.Entity closed bool } func (i *sliceIter) Iter(fields map[string]int) entitycache.StoreIter { i.fields = fields return i } func (iter *sliceIter) Next(x interface{}) bool { if iter.closed { panic("Next called after Close") } if len(iter.entities) == 0 { return false } e := x.(*mongodoc.Entity) *e = *selectEntityFields(iter.entities[0], iter.fields) iter.entities = iter.entities[1:] return true } func (iter *sliceIter) Err() error { return nil } func (iter *sliceIter) Close() error { iter.closed = true return nil } type chanStore struct { entityqc chan entityQuery baseEntityqc chan baseEntityQuery *callbackStore } func newChanStore() *chanStore { entityqc := make(chan entityQuery, 1) baseEntityqc := make(chan baseEntityQuery, 1) return &chanStore{ entityqc: entityqc, baseEntityqc: baseEntityqc, callbackStore: &callbackStore{ findBestEntity: func(url *charm.URL, fields map[string]int) (*mongodoc.Entity, error) { reply := make(chan entityReply) entityqc <- entityQuery{ url: url, fields: fields, reply: reply, } r := <-reply return r.entity, r.err }, findBaseEntity: func(url *charm.URL, fields map[string]int) (*mongodoc.BaseEntity, error) { reply := make(chan baseEntityReply) baseEntityqc <- baseEntityQuery{ url: url, fields: fields, reply: reply, } r := <-reply return r.entity, r.err }, }, } } type callbackStore struct { findBestEntity func(url *charm.URL, fields map[string]int) (*mongodoc.Entity, error) findBaseEntity func(url *charm.URL, fields map[string]int) (*mongodoc.BaseEntity, error) } func (s *callbackStore) FindBestEntity(url *charm.URL, fields map[string]int) (*mongodoc.Entity, error) { e, err := s.findBestEntity(url, fields) if err != nil { return nil, err } return selectEntityFields(e, fields), nil } func (s *callbackStore) FindBaseEntity(url *charm.URL, fields map[string]int) (*mongodoc.BaseEntity, error) { e, err := s.findBaseEntity(url, fields) if err != nil { return nil, err } return selectBaseEntityFields(e, fields), nil } type staticStore struct { entities []*mongodoc.Entity baseEntities []*mongodoc.BaseEntity } func (s *staticStore) FindBestEntity(url *charm.URL, fields map[string]int) (*mongodoc.Entity, error) { for _, e := range s.entities { if *url == *e.URL { return selectEntityFields(e, fields), nil } } return nil, params.ErrNotFound } func (s *staticStore) FindBaseEntity(url *charm.URL, fields map[string]int) (*mongodoc.BaseEntity, error) { for _, e := range s.baseEntities { if *url == *e.URL { return e, nil } } return nil, params.ErrNotFound } func selectEntityFields(x *mongodoc.Entity, fields map[string]int) *mongodoc.Entity { e := selectFields(x, fields).(*mongodoc.Entity) if e.URL == nil { panic("url empty after selectfields") } return e } func selectBaseEntityFields(x *mongodoc.BaseEntity, fields map[string]int) *mongodoc.BaseEntity { return selectFields(x, fields).(*mongodoc.BaseEntity) } // selectFields returns a copy of x (which must // be a pointer to struct) with all fields zeroed // except those mentioned in fields. func selectFields(x interface{}, fields map[string]int) interface{} { xv := reflect.ValueOf(x).Elem() xt := xv.Type() dv := reflect.New(xt).Elem() dv.Set(xv) for i := 0; i < xt.NumField(); i++ { f := xt.Field(i) if _, ok := fields[bsonFieldName(f)]; ok { continue } dv.Field(i).Set(reflect.Zero(f.Type)) } return dv.Addr().Interface() } func bsonFieldName(f reflect.StructField) string { t := f.Tag.Get("bson") if t == "" { return strings.ToLower(f.Name) } if i := strings.Index(t, ","); i >= 0 { t = t[0:i] } if t != "" { return t } return strings.ToLower(f.Name) } func entityFields(fields ...string) map[string]int { return addFields(entitycache.RequiredEntityFields, fields...) } func baseEntityFields(fields ...string) map[string]int { return addFields(entitycache.RequiredBaseEntityFields, fields...) } func addFields(fields map[string]int, extra ...string) map[string]int { fields1 := make(map[string]int) for f := range fields { fields1[f] = 1 } for _, f := range extra { fields1[f] = 1 } return fields1 } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/entitycache/export_test.go0000664000175000017500000000034012672604603030414 0ustar marcomarcopackage entitycache var ( RequiredEntityFields = requiredEntityFields RequiredBaseEntityFields = requiredBaseEntityFields ) const ( EntityThreshold = entityThreshold BaseEntityThreshold = baseEntityThreshold ) charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/entitycache/cache.go0000664000175000017500000005350012672604603027105 0ustar marcomarco// Package entitycache provides a cache of charmstore entities and // base-entities, designed to be used for individual charmstore API // requests. package entitycache import ( "sync" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/mgo.v2" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" ) // TODO it might be better to represent the field selection with // a uint64 bitmask instead of a map[string]int. // Store holds the underlying storage used by the entity cache. // It is implemented by *charmstore.Store. type Store interface { FindBestEntity(url *charm.URL, fields map[string]int) (*mongodoc.Entity, error) FindBaseEntity(url *charm.URL, fields map[string]int) (*mongodoc.BaseEntity, error) } const ( // entityThreshold holds the maximum number // of entities that will be batched up before // requesting their base entities. entityThreshold = 100 // baseEntityThreshold holds the maximum number // of base entities that will be batched up before // requesting them. baseEntityThreshold = 20 ) // Cache holds a cache of entities and base entities. Whenever an entity // is fetched, its base entity is fetched too. It is OK to call methods // on Cache concurrently. type Cache struct { // store holds the store used by the cache. store Store // wg represents the set of running goroutines. wg sync.WaitGroup // entities holds all the cached *mongodoc.Entity entries, // A given entity always has an entry with its canonical URL as key, // but also may have other entries for other unambiguous names. // // Note that if an entity is in the entities stash, it does // not imply that its base entity necessarily in the base entities // stash. entities stash // entities holds all the cached *mongodoc.BaseEntity entries, // keyed by the canonical base URL string, and also its // promulgated URL. baseEntities stash } var requiredEntityFields = map[string]int{ "_id": 1, "promulgated-url": 1, "baseurl": 1, } var requiredBaseEntityFields = map[string]int{ "_id": 1, } // New returns a new cache that uses the given store // for fetching entities. func New(store Store) *Cache { var c Cache c.entities.init(c.getEntity, &c.wg, requiredEntityFields) c.baseEntities.init(c.getBaseEntity, &c.wg, requiredBaseEntityFields) c.store = store return &c } // Close closes the cache, ensuring that there are // no currently outstanding goroutines in progress. func (c *Cache) Close() { c.wg.Wait() } // AddEntityFields arranges that any entity subsequently // returned from Entity will have the given fields populated. // // If all the required fields are added before retrieving any entities, // fewer database round trips will be required. func (c *Cache) AddEntityFields(fields map[string]int) { c.entities.mu.Lock() defer c.entities.mu.Unlock() c.entities.addFields(fields) } // AddBaseEntityFields arranges that any value subsequently // returned from BaseEntity will have the given fields populated. // // If all the required fields are added before retrieving any base entities, // less database round trips will be required. func (c *Cache) AddBaseEntityFields(fields map[string]int) { c.baseEntities.mu.Lock() defer c.baseEntities.mu.Unlock() c.baseEntities.addFields(fields) } // StartFetch starts to fetch entities for all the given ids. The // entities can be accessed by calling Entity and their associated base // entities found by calling BaseEntity. // This method does not wait for the entities to actually be fetched. func (c *Cache) StartFetch(ids []*charm.URL) { c.entities.mu.Lock() for _, id := range ids { c.entities.startFetch(id) } c.entities.mu.Unlock() // Start any base entity fetches that we can. c.baseEntities.mu.Lock() defer c.baseEntities.mu.Unlock() for _, id := range ids { if id.User != "" { c.baseEntities.startFetch(mongodoc.BaseURL(id)) } } } // Entity returns the entity with the given id. If the entity is not // found, it returns an error with a params.ErrNotFound cause. // The returned entity will have at least the given fields filled out. func (c *Cache) Entity(id *charm.URL, fields map[string]int) (*mongodoc.Entity, error) { // Start the base entity fetch asynchronously if we have // an id we can infer the base entity URL from. if id.User != "" { c.baseEntities.mu.Lock() c.baseEntities.startFetch(mongodoc.BaseURL(id)) c.baseEntities.mu.Unlock() } e, err := c.entities.entity(id, fields) if err != nil { return nil, errgo.Mask(err, errgo.Is(params.ErrNotFound)) } return e.(entity).Entity, nil } // BaseEntity returns the base entity with the given id. If the entity is not // found, it returns an error with a params.ErrNotFound cause. // The returned entity will have at least the given fields filled out. func (c *Cache) BaseEntity(id *charm.URL, fields map[string]int) (*mongodoc.BaseEntity, error) { if id.User == "" { return nil, errgo.Newf("cannot get base entity of URL %q with no user", id) } e, err := c.baseEntities.entity(mongodoc.BaseURL(id), fields) if err != nil { return nil, errgo.Mask(err, errgo.Is(params.ErrNotFound)) } return e.(baseEntity).BaseEntity, nil } // getEntity is used by c.entities to fetch entities. // Called with no locks held. func (c *Cache) getEntity(id *charm.URL, fields map[string]int) (stashEntity, error) { e, err := c.store.FindBestEntity(id, fields) if err != nil { return nil, errgo.Mask(err, errgo.Any) } if id.User == "" { // The id we used to look up the entity had no user // so we were not able to start the base entity fetching // concurrently, so start fetching it now, at the soonest // possible moment. c.baseEntities.mu.Lock() c.baseEntities.startFetch(mongodoc.BaseURL(e.URL)) c.baseEntities.mu.Unlock() } return entity{e}, nil } // getBaseEntity is used by c.baseEntities to fetch entities. // Called with no locks held. func (c *Cache) getBaseEntity(id *charm.URL, fields map[string]int) (stashEntity, error) { e, err := c.store.FindBaseEntity(id, fields) if err != nil { return nil, errgo.Mask(err, errgo.Any) } return baseEntity{e}, nil } // stash holds a set of one kind of entity (either entities or base entities). type stash struct { // get fetches the entity with the given URL. get func(id *charm.URL, fields map[string]int) (stashEntity, error) // wg represents the set of running goroutines. wg *sync.WaitGroup // mu guards the fields below mu sync.Mutex // changed is signalled every time the entities map has changed. // This means that each waiter can potentially be woken up many // times before it finds the entity that it's waiting for, but // saves us having a channel or condition per entity. // // Note that in the usual pattern we expect to see, callers // will ask for entities in the same order that they arrive // in the cache, so won't iterate many times. changed sync.Cond // entities holds at least one entry for each cached entity, // keyed by the entity id string. A given entity always has an // entry with its canonical URL as key, but also may have other // entries for other unambiguous names. // // A nil entry indicates that the entity has been scheduled to // be fetched. Entries that have been fetched but that were not // found are indicated with a notFoundEntity value. entities map[charm.URL]stashEntity // fields holds the set of fields required when fetching an // entity. This map is never changed after it is first populated // - it is replaced instead, which means that it's OK to pass it // to concurrent goroutines that access it without the mutex // locked. // // When it does change, the entity cache is invalidated. Fields // are never deleted. fields map[string]int // version is incremented every time fields is modified. version int // err holds any database fetch error (other than "not found") // that has occurred while fetching entities. err error } // init initializes the stash with the given entity get function. func (s *stash) init(get func(id *charm.URL, fields map[string]int) (stashEntity, error), wg *sync.WaitGroup, initialFields map[string]int) { s.changed.L = &s.mu s.get = get s.wg = wg s.fields = initialFields s.entities = make(map[charm.URL]stashEntity) } // entity returns the entity with the given id. If the entity is not // found, it returns an error with a params.ErrNotFound cause. func (s *stash) entity(id *charm.URL, fields map[string]int) (stashEntity, error) { s.mu.Lock() defer s.mu.Unlock() s.addFields(fields) e, hasEntry := s.entities[*id] for { if e != nil { if e, ok := e.(*notFoundEntity); ok { return nil, errgo.Mask(e.err, errgo.Is(params.ErrNotFound)) } return e, nil } if s.err != nil { return nil, errgo.Notef(s.err, "cannot fetch %q", id) } if hasEntry { // The entity is already being fetched. Wait for the fetch // to complete and try again. s.changed.Wait() e, hasEntry = s.entities[*id] continue } // Fetch synchronously (any other goroutines will be // notified when we've retrieved the entity). After the // fetch has completed, the entry in the cache will either // be set to the retrieved entity, or deleted (if the // selected fields have changed). s.entities[*id] = nil version := s.version fields := s.fields s.mu.Unlock() e = s.fetch(id, fields, version) s.mu.Lock() // Invariant (from fetch): e != nil || s.err != nil } } // addFields adds the given fields to those that will be fetched // when an entity is fetched. // // Called with s.mu locked. func (s *stash) addFields(fields map[string]int) { changed := false for field := range fields { if _, ok := s.fields[field]; !ok { changed = true break } } if !changed { return } if len(s.entities) > 0 { // The fields have changed, invalidating our current // cache, so delete all entries. s.entities = make(map[charm.URL]stashEntity) s.version++ // There may be several goroutines waiting for pending // entities. Notify them so that they can start a new // fetch. s.changed.Broadcast() } newFields := make(map[string]int) for field := range s.fields { newFields[field] = 1 } for field := range fields { newFields[field] = 1 } s.fields = newFields } // startFetch starts an asynchronous fetch for the given id. // If a fetch is already in progress, it does nothing. // // Called with s.mu locked. func (s *stash) startFetch(id *charm.URL) { if _, ok := s.entities[*id]; ok { return } s.entities[*id] = nil // Note that it's only OK to pass s.fields here because // it's never mutated, only replaced. s.wg.Add(1) go s.fetchAsync(id, s.fields, s.version) } // fetchAsync is like fetch except that it is expected to be called // in a separate goroutine, with s.wg.Add called appropriately // beforehand. // Called with s.mu unlocked. func (s *stash) fetchAsync(url *charm.URL, fields map[string]int, version int) stashEntity { defer s.wg.Done() return s.fetch(url, fields, version) } // fetch fetches the entity with the given id, including the given // fields, adds it to the stash and notifies any waiters that the stash // has changed. // // The given entity version holds the version at the time the // fetch was started. If the entity version has changed when the result is received, // the result is discarded. // // fetch returns the entity as it would be stored in the cache (notFoundEntity // implies not found). It returns nil if and only if some other kind of error has // been encountered (in this case the error will be stored in s.err). // // Called with no locks held. func (s *stash) fetch(url *charm.URL, fields map[string]int, version int) stashEntity { e, err := s.get(url, fields) s.mu.Lock() defer s.mu.Unlock() if err != nil { if errgo.Cause(err) != params.ErrNotFound { if s.err == nil { // Only set the error if we haven't encountered one already. // We assume that if we're getting several errors, they're // almost certainly caused by the same thing, so there's // no point in logging them all. s.err = errgo.Mask(err) // Let other waiters know about the fact that // we got an error. s.changed.Broadcast() } return nil } e = ¬FoundEntity{err} } if s.version != version { // The entity version has changed, implying the selected // fields have changed, so the entity we've just fetched // is not valid to put in the cache because we haven't // fetched all the fields that are required. // // We return the entity that we've just fetched (our // caller, at least, wanted the fields we've just got). // There's no need to delete the "pending" entry from the // cache because all entries will have been cleared out // when the version changed. return e } return s.addEntity(e, url) } // addEntity adds the given entity to the stash, adds the given lookupId // as an alias for it, and notifies any listeners if there has been a // change. // // It returns the cached entity - this may be different from e if // an entry is already present in the cache. // // Called with s.mu locked. func (s *stash) addEntity(e stashEntity, lookupId *charm.URL) stashEntity { keys := make([]*charm.URL, 0, 3) if _, ok := e.(*notFoundEntity); ok { keys = append(keys, lookupId) } else { keys = append(keys, e.url()) if u := e.promulgatedURL(); u != nil { keys = append(keys, u) } if lookupId != nil { keys = append(keys, lookupId) } } added := false for _, key := range keys { if old := s.entities[*key]; old == nil { s.entities[*key] = e added = true } else { // We've found an old entry - use that instead // of the new one if necessary. e = old } } if added { s.changed.Broadcast() } return e } // notFoundEntity is a sentinel type that is stored // in the entities map when the value has been fetched // but was not found. type notFoundEntity struct { // The actual not-found error encountered. err error } func (*notFoundEntity) url() *charm.URL { panic("url called on not-found sentinel value") } func (*notFoundEntity) promulgatedURL() *charm.URL { panic("promulgatedURL called on not-found sentinel value") } // Iter returns an iterator that iterates through // all the entities found by the given query, which must // be a query on the entities collection. // The entities produced by the returned iterator // will have at least the given fields populated. func (c *Cache) Iter(q *mgo.Query, fields map[string]int) *Iter { return c.CustomIter(mgoQuery{q}, fields) } // CustomIter is the same as Iter except that it allows iteration // through entities that aren't necessarily the direct result of // a MongoDB query. Care must be taken to ensure that // the fields returned are valid for the entities they purport // to represent. func (c *Cache) CustomIter(q StoreQuery, fields map[string]int) *Iter { c.entities.mu.Lock() defer c.entities.mu.Unlock() c.entities.addFields(fields) iter := &Iter{ iter: q.Iter(c.entities.fields), cache: c, entityc: make(chan *mongodoc.Entity), closed: make(chan struct{}), version: c.entities.version, } iter.runWG.Add(1) go iter.run() return iter } // Iter holds an iterator over a set of entities. type Iter struct { // e holds the current entity. It is nil only // if the iterator has terminated. e *mongodoc.Entity iter StoreIter cache *Cache entityc chan *mongodoc.Entity closed chan struct{} runWG sync.WaitGroup // err holds any error encountered when iterating. // It is set only after Next has returned false. err error // The following fields are owned by Iter.run. // entityBatch holds the entities that we have read // from the underlying iterator but haven't yet // sent on iter.entityc. entityBatch []*mongodoc.Entity // baseEntityBatch holds the set of base entities that // are required by the entities in entityBatch. baseEntityBatch []*charm.URL // version holds cache.entities.version at the time the iterator // was created. If cache.entities.version changes during // iteration, we will still deliver entities to the iterator, // but we cannot store them in the stash because they won't have // the required fields. version int } // Next reports whether there are any more entities available from the // iterator. The iterator is automatically closed when Next returns // false. func (i *Iter) Next() bool { i.e = <-i.entityc if i.e != nil { return true } if err := i.iter.Err(); err != nil { i.err = errgo.Mask(err) } return false } // Entity returns the current entity, or nil if the iterator has reached // the end of its iteration. The base entity associated with the entity // will be available via the EntityFetcher.BaseEntity method. // The caller should treat the returned entity as read-only. func (i *Iter) Entity() *mongodoc.Entity { return i.e } // Close closes the iterator. This must be called if the iterator is // abandoned without reaching its end. func (i *Iter) Close() { close(i.closed) // Wait for the iterator goroutine to complete. Note that we // *could* just wait for i.entityc to be closed, but this would // mean that it would be possible for i.send to complete // successfully even when the iterator has been closed, which // compromises test reproducibility. An alternative to the wait // group might be for iter.send to do a non-blocking receive on // i.closed before trying to send on i.entityc. i.runWG.Wait() i.e = nil if err := i.iter.Err(); err != nil { i.err = errgo.Mask(err) } } // Err returns any error encountered by the the iterator. If the // iterator has not terminated or been closed, it will always // return nil. func (iter *Iter) Err() error { return iter.err } // run iterates through the underlying iterator, sending // entities on iter.entityc, first ensuring that their respective base // entities have also been fetched. func (iter *Iter) run() { defer iter.runWG.Done() defer close(iter.entityc) defer iter.iter.Close() for { var e mongodoc.Entity if !iter.iter.Next(&e) { break } iter.addEntity(entity{&e}) if len(iter.baseEntityBatch) >= baseEntityThreshold || len(iter.entityBatch) >= entityThreshold { // We've reached one of the thresholds - send the batch. if !iter.sendBatch() { return } } } iter.sendBatch() } // addEntity adds an entity that has been received // from the underlying iterator. // // Called from iter.run without any locks held. func (iter *Iter) addEntity(e entity) { iter.entityBatch = append(iter.entityBatch, e.Entity) entities := &iter.cache.entities entities.mu.Lock() defer entities.mu.Unlock() if _, ok := entities.entities[*e.url()]; ok { // The entity has already been fetched, or is being fetched. // This also implies that its base entity has already been added (or // is in the process of being added) to the cache. return } if entities.version == iter.version { // The entity we have here is valid to put into the cache, so do that. // Note: we know from the check above that the entity is not // already present in the cache. entities.addEntity(e, nil) } baseEntities := &iter.cache.baseEntities baseEntities.mu.Lock() defer baseEntities.mu.Unlock() baseURL := mongodoc.BaseURL(e.URL) if _, ok := baseEntities.entities[*baseURL]; !ok { // We need to fetch the base entity, so add it to our // batch and signal that it will be fetched by adding it // to the map. Note: this assumes that the client doing // the iteration will make progress - it could delay // other base entity reads arbitrarily by not calling // Next. This should not be a problem in practice. iter.baseEntityBatch = append(iter.baseEntityBatch, baseURL) baseEntities.entities[*baseURL] = nil } return } // sendBatch obtains all the batched base entities and sends all the // batched entities on iter.entityc. If it encounters an error, or the // iterator is closed, it sets iter.err and returns false. // // Called from iter.run with no locks held. func (iter *Iter) sendBatch() bool { // Start a fetch for all base entities. // TODO use actual batch fetch with $in etc rather // than starting a goroutine for each base entity. baseEntities := &iter.cache.baseEntities baseEntities.mu.Lock() iter.cache.wg.Add(len(iter.baseEntityBatch)) for _, id := range iter.baseEntityBatch { go baseEntities.fetchAsync(id, baseEntities.fields, baseEntities.version) } baseEntities.mu.Unlock() iter.baseEntityBatch = iter.baseEntityBatch[:0] for _, e := range iter.entityBatch { if !iter.send(e) { return false } } iter.entityBatch = iter.entityBatch[:0] return true } // send sends the given entity on iter.entityc. // It reports whether that entity was sent OK (that is, // the iterator has not been closed). func (iter *Iter) send(e *mongodoc.Entity) bool { select { case iter.entityc <- e: return true case <-iter.closed: return false } } // stashEntity represents an entity stored in a stash. // It is implemented by the entity and baseEntity types. type stashEntity interface { url() *charm.URL promulgatedURL() *charm.URL } type entity struct { *mongodoc.Entity } func (e entity) url() *charm.URL { u := *e.URL return &u } func (e entity) promulgatedURL() *charm.URL { if e.PromulgatedURL == nil { return nil } u := *e.PromulgatedURL return &u } type baseEntity struct { *mongodoc.BaseEntity } func (e baseEntity) url() *charm.URL { return e.URL } func (e baseEntity) promulgatedURL() *charm.URL { return nil } // StoreQuery represents a query on entities in the charm store It is // represented as an interface rather than using *mgo.Query directly so // that we can easily fake it in tests, and so that it's possible to use // other different underlying representations. type StoreQuery interface { // Iter returns an iterator over the query, selecting // at least the fields mentioned in the given map. Iter(fields map[string]int) StoreIter } // StoreIter represents an iterator over entities in the charm store. type StoreIter interface { Next(interface{}) bool Err() error Close() error } type mgoQuery struct { query *mgo.Query } func (q mgoQuery) Iter(fields map[string]int) StoreIter { return q.query.Select(fields).Iter() } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/entitycache/package_test.go0000664000175000017500000000033312672604603030470 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package entitycache_test import ( "testing" gc "gopkg.in/check.v1" ) func TestPackage(t *testing.T) { gc.TestingT(t) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/entitycache/bench_test.go0000664000175000017500000000263412672604603030162 0ustar marcomarcopackage entitycache_test import ( "testing" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmstore.v5-unstable/internal/entitycache" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" ) func BenchmarkSingleRequest(b *testing.B) { // This benchmarks the common case of getting a single entity and its // base entity, so that we get an idea of the baseline overhead // in this simple case. entity := &mongodoc.Entity{ URL: charm.MustParseURL("~bob/wordpress-1"), BaseURL: charm.MustParseURL("~bob/wordpress"), BlobName: "w1", } baseEntity := &mongodoc.BaseEntity{ URL: charm.MustParseURL("~bob/wordpress"), Name: "wordpress", } store := &callbackStore{ findBestEntity: func(url *charm.URL, fields map[string]int) (*mongodoc.Entity, error) { return entity, nil }, findBaseEntity: func(url *charm.URL, fields map[string]int) (*mongodoc.BaseEntity, error) { return baseEntity, nil }, } url := charm.MustParseURL("~bob/wordpress-1") baseURL := charm.MustParseURL("~bob/wordpress") for i := 0; i < b.N; i++ { c := entitycache.New(store) c.AddEntityFields(map[string]int{"size": 1, "blobname": 1}) e, err := c.Entity(url, nil) if err != nil || e != entity { b.Fatalf("get returned unexpected entity (err %v)", err) } be, err := c.BaseEntity(baseURL, nil) if err != nil || be != baseEntity { b.Fatalf("get returned unexpected base entity (err %v)", err) } } } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/agent/0000775000175000017500000000000012672604603024306 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/agent/export_test.go0000664000175000017500000000043112672604603027213 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package agent // import "gopkg.in/juju/charmstore.v5-unstable/internal/agent" type ( AgentLoginRequest agentLoginRequest LoginMethods loginMethods Error agentError ) charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/agent/agent.go0000664000175000017500000000573112672604603025741 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package agent // import "gopkg.in/juju/charmstore.v5-unstable/internal/agent" import ( "bytes" "encoding/json" "net/http" "net/url" "github.com/juju/loggo" "gopkg.in/errgo.v1" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/juju/charmstore.v5-unstable/internal/router" ) var logger = loggo.GetLogger("charmstore.internal.agent") type loginMethods struct { Agent string `json:"agent"` } type agentLoginRequest struct { Username string `json:"username"` PublicKey *bakery.PublicKey `json:"public_key"` } // TODO make VisitWebPage support using different usernames (and possibly // keys) for different sites. // VisitWebPage returns a function that can be used with // httpbakery.Client.VisitWebPage. The returned function will attept to // perform an agent login with the server. func VisitWebPage(c *httpbakery.Client, username string) func(u *url.URL) error { return func(u *url.URL) error { logger.Infof("Attempting agent login to %q", u) req, err := http.NewRequest("GET", u.String(), nil) if err != nil { return errgo.Notef(err, "cannot create request") } // Set the Accept header to indicate that we're asking for a // non-interactive login. req.Header.Set("Accept", "application/json") resp, err := c.Do(req) if err != nil { return errgo.Notef(err, "cannot get login methods") } defer resp.Body.Close() var lm loginMethods if err := router.UnmarshalJSONResponse(resp, &lm, getError); err != nil { return errgo.Notef(err, "cannot get login methods") } if lm.Agent == "" { return errgo.New("agent login not supported") } lr := &agentLoginRequest{ Username: username, } if c.Key != nil { lr.PublicKey = &c.Key.Public } body, err := json.Marshal(lr) if err != nil { return errgo.Notef(err, "cannot marshal login request") } req, err = http.NewRequest("POST", lm.Agent, nil) if err != nil { return errgo.Notef(err, "cannot create login request") } req.Header.Set("Content-Type", "application/json") resp, err = c.DoWithBody(req, bytes.NewReader(body)) if err != nil { return errgo.Notef(err, "cannot post login request") } defer resp.Body.Close() if resp.StatusCode >= http.StatusBadRequest { return errgo.Notef(getError(resp), "cannot log in") } return nil } } // NewClient creates an httpbakery.Client that is configured to use agent // login. The agent login attempts will be made using the provided // username and key. func NewClient(username string, key *bakery.KeyPair) *httpbakery.Client { c := httpbakery.NewClient() c.Key = key c.VisitWebPage = VisitWebPage(c, username) return c } func getError(resp *http.Response) error { var aerr agentError if err := router.UnmarshalJSONResponse(resp, &aerr, nil); err != nil { return err } return aerr } type agentError struct { Message string } func (e agentError) Error() string { return e.Message } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/agent/idm_test.go0000664000175000017500000002126412672604603026452 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package agent_test import ( "encoding/json" "fmt" "io/ioutil" "mime" "net/http" "net/http/httptest" gc "gopkg.in/check.v1" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/juju/charmstore.v5-unstable/internal/agent" ) type discharge struct { id string c chan error } // idM provides a mock identity server that can be used to test agent login. // the following endpoints are provided: // /public-key // /discharge // /protected // /login // /agent // /wait // Most tests will intiate with a call to /protected. type idM struct { *httptest.Server *http.ServeMux svc *bakery.Service discharges map[string]discharge key *bakery.KeyPair } func newIdM(c *gc.C) *idM { i := &idM{ ServeMux: http.NewServeMux(), discharges: make(map[string]discharge), } i.Server = httptest.NewServer(i) var err error i.key, err = bakery.GenerateKey() c.Assert(err, gc.IsNil) i.svc, err = bakery.NewService(bakery.NewServiceParams{ Key: i.key, Locator: bakery.PublicKeyLocatorMap{ i.URL: &i.key.Public, }, }) c.Assert(err, gc.IsNil) httpbakery.AddDischargeHandler(i.ServeMux, "/", i.svc, i.checker) i.Handle("/", http.HandlerFunc(i.notFound)) i.Handle("/protected", http.HandlerFunc(i.serveProtected)) i.Handle("/login", http.HandlerFunc(i.serveLogin)) i.Handle("/wait", http.HandlerFunc(i.serveWait)) i.Handle("/agent", http.HandlerFunc(i.serveAgent)) return i } func (i *idM) notFound(w http.ResponseWriter, req *http.Request) { i.error(w, http.StatusNotFound, "not found", "%s not found", req.URL.Path) } func (i *idM) write(w http.ResponseWriter, v interface{}) { body, err := json.Marshal(v) if err != nil { i.error(w, http.StatusInternalServerError, "cannot marshal response: %s", err) return } w.Header().Set("Content-Type", "application/json") w.Write(body) } func (i *idM) error(w http.ResponseWriter, status int, format string, a ...interface{}) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(status) body, err := json.Marshal(&agent.Error{ Message: fmt.Sprintf(format, a...), }) if err != nil { panic(err) } w.Write(body) } // serveProtected provides the /protected endpoint. When /protected is // called two parameters should be provided: // test = test id this id uniquely identifies the test // cav = the caveat to put in the third party caveat. // // The cav parameter determines what will happen in the test and can be one of // allow = the macaroon is discharged straight away // agent = successful agent authentication // agent-fail = unsuccessful agent authentication // interactive = login does not return a JSON object // no-agent = login does return a JSON object, but agent authentication is not specified. func (i *idM) serveProtected(w http.ResponseWriter, r *http.Request) { r.ParseForm() if r.Form.Get("test") == "" { i.error(w, http.StatusBadRequest, "test id not specified") return } attrs, err := httpbakery.CheckRequest(i.svc, r, nil, checkers.OperationChecker(r.Form.Get("test"))) if err == nil { i.write(w, attrs) return } verr, ok := err.(*bakery.VerificationError) if !ok { i.error(w, http.StatusInternalServerError, "error checking macaroon: %s", err) return } m, err := i.svc.NewMacaroon("", nil, []checkers.Caveat{ { Location: i.URL, Condition: r.Form.Get("c") + " " + r.Form.Get("test"), }, checkers.AllowCaveat(r.Form.Get("test")), }) if err != nil { i.error(w, http.StatusInternalServerError, "cannot create macaroon: %s", err) return } httpbakery.WriteDischargeRequiredErrorForRequest(w, m, "/", verr, r) } // serveLogin provides the /login endpoint. When /login is called it should // be provided with a test id. /login also supports some additional parameters: // a = if set to "true" an agent URL will be added to the json response. // i = if set to "true" a plaintext response will be sent to simulate interaction. func (i *idM) serveLogin(w http.ResponseWriter, r *http.Request) { r.ParseForm() if r.Form.Get("i") == "true" || r.Header.Get("Accept") != "application/json" { w.Write([]byte("Let's interact!")) return } var lm agent.LoginMethods if r.Form.Get("a") == "true" { lm.Agent = i.URL + "/agent?test=" + r.Form.Get("test") + "&f=" + r.Form.Get("f") } i.write(w, lm) } // serveWait provides the /wait endpoint. When /wait is called it should // be provided with a test id. This then matches the wait to the login // being tested. func (i *idM) serveWait(w http.ResponseWriter, r *http.Request) { r.ParseForm() if r.Form.Get("test") == "" { i.error(w, http.StatusBadRequest, "test id not specified") return } d := i.discharges[r.Form.Get("test")] derr := <-d.c if derr != nil { // do something with the error return } m, err := i.svc.Discharge( bakery.ThirdPartyCheckerFunc( func(cavId, cav string) ([]checkers.Caveat, error) { return nil, nil }, ), d.id, ) if err != nil { i.error(w, http.StatusInternalServerError, "cannot discharge caveat: %s", err) return } i.write(w, httpbakery.WaitResponse{ Macaroon: m, }) } // serveAgent provides the /agent endpoint. When /agent is called it // should be provided with a test id. This then matches the current login // to the correct wait. If the optional f query variable is set to "true" // then a failure will be simulated. func (i *idM) serveAgent(w http.ResponseWriter, r *http.Request) { r.ParseForm() if r.Form.Get("f") == "true" { i.error(w, http.StatusTeapot, "forced failure") return } test := r.Form.Get("test") op := "agent-login-" + test _, err := httpbakery.CheckRequest(i.svc, r, nil, checkers.OperationChecker(op)) if err == nil { d := i.discharges[test] d.c <- nil return } verr, ok := err.(*bakery.VerificationError) if !ok { d := i.discharges[test] d.c <- err i.error(w, http.StatusInternalServerError, "cannot check request: %s", err) return } body, err := ioutil.ReadAll(r.Body) if err != nil { d := i.discharges[test] d.c <- err i.error(w, http.StatusInternalServerError, "cannot read agent login request: %s", err) return } ct, _, err := mime.ParseMediaType(r.Header.Get("Content-Type")) if err != nil { d := i.discharges[test] d.c <- err i.error(w, http.StatusBadRequest, "cannot parse mediatype: %s", err) return } if ct != "application/json" { d := i.discharges[test] d.c <- err i.error(w, http.StatusBadRequest, "unexpected Content-Type: %s", ct) return } var login agent.AgentLoginRequest err = json.Unmarshal(body, &login) if err != nil { d := i.discharges[test] d.c <- err i.error(w, http.StatusBadRequest, "cannot unmarshal login request: %s", err) return } m, err := i.svc.NewMacaroon("", nil, []checkers.Caveat{ bakery.LocalThirdPartyCaveat(login.PublicKey), checkers.AllowCaveat(op), }) if err != nil { d := i.discharges[test] d.c <- err i.error(w, http.StatusInternalServerError, "cannot create macaroon: %s", err) return } httpbakery.WriteDischargeRequiredErrorForRequest(w, m, "/", verr, r) } func (i *idM) checker(r *http.Request, cavId, cav string) ([]checkers.Caveat, error) { cond, arg, err := checkers.ParseCaveat(cav) if err != nil { return nil, err } switch cond { case "allow": return nil, nil case "agent": i.discharges[arg] = discharge{ id: cavId, c: make(chan error, 1), } return nil, &httpbakery.Error{ Message: "need login", Code: httpbakery.ErrInteractionRequired, Info: &httpbakery.ErrorInfo{ VisitURL: i.URL + "/login?a=true&test=" + arg, WaitURL: i.URL + "/wait?test=" + arg, }, } case "interactive": i.discharges[arg] = discharge{ id: cavId, c: make(chan error, 1), } return nil, &httpbakery.Error{ Message: "need login", Code: httpbakery.ErrInteractionRequired, Info: &httpbakery.ErrorInfo{ VisitURL: i.URL + "/login?i=true&test=" + arg, WaitURL: i.URL + "/wait?test=" + arg, }, } case "no-agent": i.discharges[arg] = discharge{ id: cavId, c: make(chan error, 1), } return nil, &httpbakery.Error{ Message: "need login", Code: httpbakery.ErrInteractionRequired, Info: &httpbakery.ErrorInfo{ VisitURL: i.URL + "/login?test=" + arg, WaitURL: i.URL + "/wait?test=" + arg, }, } case "agent-fail": i.discharges[arg] = discharge{ id: cavId, c: make(chan error, 1), } return nil, &httpbakery.Error{ Message: "need login", Code: httpbakery.ErrInteractionRequired, Info: &httpbakery.ErrorInfo{ VisitURL: i.URL + "/login?a=true&f=true&test=" + arg, WaitURL: i.URL + "/wait?test=" + arg, }, } default: return nil, checkers.ErrCaveatNotRecognized } } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/agent/agent_test.go0000664000175000017500000000424112672604603026773 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package agent_test import ( "encoding/json" "fmt" "net/http" "net/url" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/juju/charmstore.v5-unstable/internal/agent" "gopkg.in/juju/charmstore.v5-unstable/internal/router" ) type agentSuite struct { idM *idM } var _ = gc.Suite(&agentSuite{}) func (s *agentSuite) SetUpSuite(c *gc.C) { s.idM = newIdM(c) } func (s *agentSuite) TearDownSuite(c *gc.C) { s.idM.Close() } var agentLoginTests = []struct { about string condition string expectBody interface{} expectError string }{{ about: "no login required", condition: "allow", expectBody: map[string]string{}, }, { about: "successful agent login", condition: "agent", expectBody: map[string]string{}, }, { about: "interactive", condition: "interactive", expectError: `cannot get discharge from ".*": cannot start interactive session: cannot get login methods: unexpected content type "text/plain"`, }, { about: "agent not supported", condition: "no-agent", expectError: `cannot get discharge from "http://.*": cannot start interactive session: agent login not supported`, }, { about: "agent fail", condition: "agent-fail", expectError: `cannot get discharge from "http://.*": cannot start interactive session: cannot log in: forced failure`, }} func (s *agentSuite) TestAgentLogin(c *gc.C) { key, err := bakery.GenerateKey() c.Assert(err, gc.IsNil) for i, test := range agentLoginTests { c.Logf("%d. %s", i, test.about) client := agent.NewClient("testuser", key) u := fmt.Sprintf("%s/protected?test=%d&c=%s", s.idM.URL, i, url.QueryEscape(test.condition)) req, err := http.NewRequest("GET", u, nil) c.Assert(err, gc.IsNil) resp, err := client.Do(req) if test.expectError != "" { c.Assert(err, gc.ErrorMatches, test.expectError) continue } c.Assert(err, gc.IsNil) defer resp.Body.Close() var v json.RawMessage err = router.UnmarshalJSONResponse(resp, &v, nil) c.Assert(err, gc.IsNil) c.Assert(string(v), jc.JSONEquals, test.expectBody) } } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/internal/agent/package_test.go0000664000175000017500000000032512672604603027267 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package agent_test import ( "testing" gc "gopkg.in/check.v1" ) func TestPackage(t *testing.T) { gc.TestingT(t) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/audit/0000775000175000017500000000000012672604603022502 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/audit/audit.go0000664000175000017500000000166412672604603024146 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package audit import ( "time" "gopkg.in/juju/charm.v6-unstable" ) // Operation represents the type of an entry. type Operation string const ( // OpSetPerm represents the setting of ACLs on an entity. // Required fields: Entity, ACL OpSetPerm Operation = "set-perm" // OpPromulgate, OpUnpromulgate represent the promulgation on an entity. // Required fields: Entity OpPromulgate Operation = "promulgate" OpUnpromulgate Operation = "unpromulgate" ) // ACL represents an access control list. type ACL struct { Read []string `json:"read,omitempty"` Write []string `json:"write,omitempty"` } // Entry represents an audit log entry. type Entry struct { Time time.Time `json:"time"` User string `json:"user"` Op Operation `json:"op"` Entity *charm.URL `json:"entity,omitempty"` ACL *ACL `json:"acl,omitempty"` } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/elasticsearch/0000775000175000017500000000000012672604603024206 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/elasticsearch/elasticsearch_test.go0000664000175000017500000003044712672604603030416 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package elasticsearch_test // import "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" import ( "encoding/json" "testing" "time" jujutesting "github.com/juju/testing" "github.com/juju/utils" gc "gopkg.in/check.v1" es "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" ) func TestPackage(t *testing.T) { gc.TestingT(t) } type Suite struct { jujutesting.IsolationSuite storetesting.ElasticSearchSuite Indexes []string TestIndex string } func (s *Suite) SetUpSuite(c *gc.C) { s.IsolationSuite.SetUpSuite(c) s.ElasticSearchSuite.SetUpSuite(c) } func (s *Suite) TearDownSuite(c *gc.C) { s.ElasticSearchSuite.TearDownSuite(c) s.IsolationSuite.TearDownSuite(c) } func (s *Suite) SetUpTest(c *gc.C) { s.IsolationSuite.SetUpTest(c) s.ElasticSearchSuite.SetUpTest(c) s.TestIndex = s.NewIndex(c) err := s.ES.PutIndex(s.TestIndex, map[string]interface{}{"settings": map[string]interface{}{"number_of_shards": 1}}) c.Assert(err, gc.Equals, nil) err = s.ES.PutDocument(s.TestIndex, "testtype", s.TestIndex, struct{}{}) c.Assert(err, gc.Equals, nil) err = s.ES.RefreshIndex(s.TestIndex) c.Assert(err, gc.Equals, nil) } func (s *Suite) TearDownTest(c *gc.C) { for _, i := range s.Indexes { s.ES.DeleteIndex(i) } s.ElasticSearchSuite.TearDownTest(c) s.IsolationSuite.TearDownTest(c) } func (s *Suite) NewIndex(c *gc.C) string { uuid, err := utils.NewUUID() c.Assert(err, gc.Equals, nil) idx := time.Now().Format("20060102150405") + "-" + uuid.String() s.Indexes = append(s.Indexes, idx) return idx } var _ = gc.Suite(&Suite{}) func (s *Suite) TestSuccessfulPostDocument(c *gc.C) { doc := map[string]string{ "a": "b", } id, err := s.ES.PostDocument(s.TestIndex, "testtype", doc) c.Assert(err, gc.IsNil) c.Assert(id, gc.NotNil) var result map[string]string err = s.ES.GetDocument(s.TestIndex, "testtype", id, &result) c.Assert(err, gc.IsNil) } func (s *Suite) TestSuccessfulPutNewDocument(c *gc.C) { doc := map[string]string{ "a": "b", } // Show that no document with this id exists. exists, err := s.ES.HasDocument(s.TestIndex, "testtype", "a") c.Assert(err, gc.IsNil) c.Assert(exists, gc.Equals, false) err = s.ES.PutDocument(s.TestIndex, "testtype", "a", doc) c.Assert(err, gc.IsNil) var result map[string]string err = s.ES.GetDocument(s.TestIndex, "testtype", "a", &result) c.Assert(result["a"], gc.Equals, "b") exists, err = s.ES.HasDocument(s.TestIndex, "testtype", "a") c.Assert(err, gc.IsNil) c.Assert(exists, gc.Equals, true) } func (s *Suite) TestSuccessfulPutUpdatedDocument(c *gc.C) { doc := map[string]string{ "a": "b", } err := s.ES.PutDocument(s.TestIndex, "testtype", "a", doc) c.Assert(err, gc.IsNil) doc["a"] = "c" err = s.ES.PutDocument(s.TestIndex, "testtype", "a", doc) c.Assert(err, gc.IsNil) var result map[string]string err = s.ES.GetDocument(s.TestIndex, "testtype", "a", &result) c.Assert(result["a"], gc.Equals, "c") } func (s *Suite) TestPutVersionWithTypeNewDocument(c *gc.C) { doc := map[string]string{ "a": "b", } // Show that no document with this id exists. exists, err := s.ES.HasDocument(s.TestIndex, "testtype", "a") c.Assert(err, gc.IsNil) c.Assert(exists, gc.Equals, false) err = s.ES.PutDocumentVersionWithType(s.TestIndex, "testtype", "a", 1, es.ExternalGTE, doc) c.Assert(err, gc.IsNil) var result map[string]string err = s.ES.GetDocument(s.TestIndex, "testtype", "a", &result) c.Assert(result["a"], gc.Equals, "b") exists, err = s.ES.HasDocument(s.TestIndex, "testtype", "a") c.Assert(err, gc.IsNil) c.Assert(exists, gc.Equals, true) } func (s *Suite) TestPutVersionWithTypeUpdateCurrentDocumentVersion(c *gc.C) { doc := map[string]string{ "a": "b", } // Show that no document with this id exists. exists, err := s.ES.HasDocument(s.TestIndex, "testtype", "a") c.Assert(err, gc.IsNil) c.Assert(exists, gc.Equals, false) err = s.ES.PutDocumentVersionWithType(s.TestIndex, "testtype", "a", 1, es.ExternalGTE, doc) c.Assert(err, gc.IsNil) doc["a"] = "c" err = s.ES.PutDocumentVersionWithType(s.TestIndex, "testtype", "a", 1, es.ExternalGTE, doc) c.Assert(err, gc.IsNil) var result map[string]string err = s.ES.GetDocument(s.TestIndex, "testtype", "a", &result) c.Assert(result["a"], gc.Equals, "c") exists, err = s.ES.HasDocument(s.TestIndex, "testtype", "a") c.Assert(err, gc.IsNil) c.Assert(exists, gc.Equals, true) } func (s *Suite) TestPutVersionWithTypeUpdateLaterDocumentVersion(c *gc.C) { doc := map[string]string{ "a": "b", } // Show that no document with this id exists. exists, err := s.ES.HasDocument(s.TestIndex, "testtype", "a") c.Assert(err, gc.IsNil) c.Assert(exists, gc.Equals, false) err = s.ES.PutDocumentVersionWithType(s.TestIndex, "testtype", "a", 1, es.ExternalGTE, doc) c.Assert(err, gc.IsNil) doc["a"] = "c" err = s.ES.PutDocumentVersionWithType(s.TestIndex, "testtype", "a", 3, es.ExternalGTE, doc) c.Assert(err, gc.IsNil) var result map[string]string err = s.ES.GetDocument(s.TestIndex, "testtype", "a", &result) c.Assert(result["a"], gc.Equals, "c") exists, err = s.ES.HasDocument(s.TestIndex, "testtype", "a") c.Assert(err, gc.IsNil) c.Assert(exists, gc.Equals, true) } func (s *Suite) TestPutVersionWithTypeUpdateEarlierDocumentVersion(c *gc.C) { doc := map[string]string{ "a": "b", } // Show that no document with this id exists. exists, err := s.ES.HasDocument(s.TestIndex, "testtype", "a") c.Assert(err, gc.IsNil) c.Assert(exists, gc.Equals, false) err = s.ES.PutDocumentVersionWithType(s.TestIndex, "testtype", "a", 3, es.ExternalGTE, doc) c.Assert(err, gc.IsNil) doc["a"] = "c" err = s.ES.PutDocumentVersionWithType(s.TestIndex, "testtype", "a", 1, es.ExternalGTE, doc) c.Assert(err, gc.Equals, es.ErrConflict) var result map[string]string err = s.ES.GetDocument(s.TestIndex, "testtype", "a", &result) c.Assert(result["a"], gc.Equals, "b") exists, err = s.ES.HasDocument(s.TestIndex, "testtype", "a") c.Assert(err, gc.IsNil) c.Assert(exists, gc.Equals, true) } func (s *Suite) TestDelete(c *gc.C) { doc := map[string]string{ "a": "b", } _, err := s.ES.PostDocument(s.TestIndex, "testtype", doc) c.Assert(err, gc.IsNil) err = s.ES.DeleteIndex(s.TestIndex) c.Assert(err, gc.IsNil) } func (s *Suite) TestDeleteErrorOnNonExistingIndex(c *gc.C) { err := s.ES.DeleteIndex("nope") c.Assert(err, gc.NotNil) c.Assert(err.Error(), gc.Equals, "elasticsearch document not found") } func (s *Suite) TestIndexesCreatedAutomatically(c *gc.C) { doc := map[string]string{"a": "b"} _, err := s.ES.PostDocument(s.TestIndex, "testtype", doc) c.Assert(err, gc.IsNil) indexes, err := s.ES.ListAllIndexes() c.Assert(err, gc.IsNil) c.Assert(indexes, gc.Not(gc.HasLen), 0) found := false for _, index2 := range indexes { if index2 == s.TestIndex { found = true } } c.Assert(found, gc.Equals, true) } func (s *Suite) TestHealthIsWorking(c *gc.C) { result, err := s.ES.Health() c.Assert(err, gc.IsNil) c.Assert(result.ClusterName, gc.NotNil) c.Assert(result.ActivePrimaryShards, gc.NotNil) c.Assert(result.ActiveShards, gc.NotNil) c.Assert(result.InitializingShards, gc.NotNil) c.Assert(result.NumberOfDataNodes, gc.NotNil) c.Assert(result.NumberOfNodes, gc.NotNil) c.Assert(result.RelocatingShards, gc.NotNil) c.Assert(result.Status, gc.NotNil) c.Assert(result.TimedOut, gc.NotNil) c.Assert(result.UnassignedShards, gc.NotNil) } func (s *Suite) TestSearch(c *gc.C) { doc := map[string]string{"foo": "bar"} _, err := s.ES.PostDocument(s.TestIndex, "testtype", doc) c.Assert(err, gc.IsNil) doc["foo"] = "baz" id2, err := s.ES.PostDocument(s.TestIndex, "testtype", doc) c.Assert(err, gc.IsNil) s.ES.RefreshIndex(s.TestIndex) q := es.QueryDSL{ Query: es.TermQuery{Field: "foo", Value: "baz"}, Fields: []string{"foo"}, } results, err := s.ES.Search(s.TestIndex, "testtype", q) c.Assert(err, gc.IsNil) c.Assert(results.Hits.Total, gc.Equals, 1) c.Assert(results.Hits.Hits[0].ID, gc.Equals, id2) c.Assert(results.Hits.Hits[0].Fields.GetString("foo"), gc.Equals, "baz") } func (s *Suite) TestPutMapping(c *gc.C) { var mapping = map[string]interface{}{ "testtype": map[string]interface{}{ "properties": map[string]interface{}{ "foo": map[string]interface{}{ "type": "string", }, }, }, } err := s.ES.PutMapping(s.TestIndex, "testtype", mapping) c.Assert(err, gc.IsNil) } func (s *Suite) TestEscapeRegexp(c *gc.C) { var tests = []struct { about string original string expected string }{{ about: `plain string`, original: `foo`, expected: `foo`, }, { about: `escape .`, original: `foo.bar`, expected: `foo\.bar`, }, { about: `escape ?`, original: `foo?bar`, expected: `foo\?bar`, }, { about: `escape +`, original: `foo+bar`, expected: `foo\+bar`, }, { about: `escape *`, original: `foo*bar`, expected: `foo\*bar`, }, { about: `escape |`, original: `foo|bar`, expected: `foo\|bar`, }, { about: `escape {`, original: `foo{bar`, expected: `foo\{bar`, }, { about: `escape }`, original: `foo}bar`, expected: `foo\}bar`, }, { about: `escape [`, original: `foo[bar`, expected: `foo\[bar`, }, { about: `escape ]`, original: `foo]bar`, expected: `foo\]bar`, }, { about: `escape (`, original: `foo(bar`, expected: `foo\(bar`, }, { about: `escape )`, original: `foo)bar`, expected: `foo\)bar`, }, { about: `escape "`, original: `foo"bar`, expected: `foo\"bar`, }, { about: `escape \`, original: `foo\bar`, expected: `foo\\bar`, }, { about: `escape #`, original: `foo#bar`, expected: `foo\#bar`, }, { about: `escape @`, original: `foo@bar`, expected: `foo\@bar`, }, { about: `escape &`, original: `foo&bar`, expected: `foo\&bar`, }, { about: `escape <`, original: `foo`, original: `foo>bar`, expected: `foo\>bar`, }, { about: `escape ~`, original: `foo~bar`, expected: `foo\~bar`, }, { about: `escape start`, original: `*foo`, expected: `\*foo`, }, { about: `escape end`, original: `foo\`, expected: `foo\\`, }, { about: `escape many`, original: `\"*\`, expected: `\\\"\*\\`, }} for i, test := range tests { c.Logf("%d: %s", i, test.about) c.Assert(es.EscapeRegexp(test.original), gc.Equals, test.expected) } } func (s *Suite) TestAlias(c *gc.C) { uuid, err := utils.NewUUID() c.Assert(err, gc.Equals, nil) alias := uuid.String() index1 := alias + "-1" index2 := alias + "-2" // Create first index err = s.ES.PutIndex(index1, struct{}{}) c.Assert(err, gc.Equals, nil) defer s.ES.DeleteIndex(index1) // Create second index err = s.ES.PutIndex(index2, struct{}{}) c.Assert(err, gc.Equals, nil) defer s.ES.DeleteIndex(index2) // Check alias is not aliased to anything indexes, err := s.ES.ListIndexesForAlias(alias) c.Assert(err, gc.Equals, nil) c.Assert(indexes, gc.HasLen, 0) // Associate alias with index 1 err = s.ES.Alias(index1, alias) c.Assert(err, gc.Equals, nil) indexes, err = s.ES.ListIndexesForAlias(alias) c.Assert(err, gc.Equals, nil) c.Assert(indexes, gc.HasLen, 1) c.Assert(indexes[0], gc.Equals, index1) // Associate alias with index 2, removing it from index 1 err = s.ES.Alias(index2, alias) c.Assert(err, gc.Equals, nil) indexes, err = s.ES.ListIndexesForAlias(alias) c.Assert(err, gc.Equals, nil) c.Assert(indexes, gc.HasLen, 1) c.Assert(indexes[0], gc.Equals, index2) } func (S *Suite) TestDecodingHealthStatus(c *gc.C) { const health_message = `{ "cluster_name":"elasticsearch", "status": "green", "timed_out": true, "number_of_nodes": 2, "number_of_data_nodes": 2, "active_primary_shards": 14, "active_shards": 28, "relocating_shards": 2, "initializing_shards": 2, "unassigned_shards": 2 }` var h es.ClusterHealth err := json.Unmarshal([]byte(health_message), &h) c.Assert(err, gc.IsNil) c.Assert(h.ClusterName, gc.Equals, "elasticsearch") c.Assert(h.Status, gc.Equals, "green") c.Assert(h.TimedOut, gc.Equals, true) c.Assert(h.NumberOfNodes, gc.Equals, int64(2)) c.Assert(h.NumberOfDataNodes, gc.Equals, int64(2)) c.Assert(h.ActivePrimaryShards, gc.Equals, int64(14)) c.Assert(h.ActiveShards, gc.Equals, int64(28)) c.Assert(h.RelocatingShards, gc.Equals, int64(2)) c.Assert(h.InitializingShards, gc.Equals, int64(2)) c.Assert(h.UnassignedShards, gc.Equals, int64(2)) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/elasticsearch/query_test.go0000664000175000017500000001061412672604603026743 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package elasticsearch_test // import "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" import ( jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" . "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" ) type QuerySuite struct{} var _ = gc.Suite(&QuerySuite{}) func (s *QuerySuite) TestJSONEncodings(c *gc.C) { var tests = []struct { about string query interface{} json string }{{ about: "term query", query: TermQuery{Field: "foo", Value: "bar"}, json: `{"term": {"foo": "bar"}}`, }, { about: "match all query", query: MatchAllQuery{}, json: `{"match_all": {}}`, }, { about: "match query", query: MatchQuery{Field: "foo", Query: "bar"}, json: `{"match": {"foo": {"query": "bar"}}}`, }, { about: "match query with type", query: MatchQuery{Field: "foo", Query: "bar", Type: "baz"}, json: `{"match": {"foo": {"query": "bar", "type": "baz"}}}`, }, { about: "multi match query", query: MultiMatchQuery{Query: "foo", Fields: []string{BoostField("bar", 2), "baz"}}, json: `{"multi_match": {"query": "foo", "fields": ["bar^2.000000", "baz"]}}`, }, { about: "filtered query", query: FilteredQuery{ Query: TermQuery{Field: "foo", Value: "bar"}, Filter: TermFilter{Field: "baz", Value: "quz"}}, json: `{"filtered": {"query": {"term": {"foo": "bar"}}, "filter": {"term": {"baz": "quz"}}}}`, }, { about: "function score query", query: FunctionScoreQuery{ Query: TermQuery{Field: "foo", Value: "bar"}, Functions: []Function{ DecayFunction{ Function: "baz", Field: "foo", Scale: "quz", }, }, }, json: `{"function_score": {"query": {"term": {"foo": "bar"}}, "functions": [{"baz": {"foo":{"scale": "quz"}}}]}}`, }, { about: "term filter", query: TermFilter{Field: "foo", Value: "bar"}, json: `{"term": {"foo": "bar"}}`, }, { about: "and filter", query: AndFilter{ TermFilter{Field: "foo", Value: "bar"}, TermFilter{Field: "baz", Value: "quz"}, }, json: `{"and": {"filters": [{"term": {"foo": "bar"}}, {"term": {"baz": "quz"}}]}}`, }, { about: "or filter", query: OrFilter{ TermFilter{Field: "foo", Value: "bar"}, TermFilter{Field: "baz", Value: "quz"}, }, json: `{"or": {"filters": [{"term": {"foo": "bar"}}, {"term": {"baz": "quz"}}]}}`, }, { about: "not filter", query: NotFilter{TermFilter{Field: "foo", Value: "bar"}}, json: `{"not": {"term": {"foo": "bar"}}}`, }, { about: "query filter", query: QueryFilter{Query: TermQuery{Field: "foo", Value: "bar"}}, json: `{"query": {"term": {"foo": "bar"}}}`, }, { about: "regexp filter", query: RegexpFilter{Field: "foo", Regexp: ".*"}, json: `{"regexp": {"foo": ".*"}}`, }, { about: "query dsl", query: QueryDSL{ Fields: []string{"foo", "bar"}, Size: 10, Query: TermQuery{Field: "baz", Value: "quz"}, Sort: []Sort{{Field: "foo", Order: Order{"desc"}}}, }, json: `{"fields": ["foo", "bar"], "size": 10, "query": {"term": {"baz": "quz"}}, "sort": [{"foo": { "order": "desc"}}]}`, }, { about: "decay function", query: DecayFunction{ Function: "baz", Field: "foo", Scale: "quz", }, json: `{"baz": {"foo":{"scale": "quz"}}}`, }, { about: "boost_factor function", query: BoostFactorFunction{ BoostFactor: 1.5, }, json: `{"boost_factor": 1.5}`, }, { about: "boost_factor function with filter", query: BoostFactorFunction{ BoostFactor: 1.5, Filter: TermFilter{ Field: "foo", Value: "bar", }, }, json: `{"filter": {"term": {"foo": "bar"}}, "boost_factor": 1.5}`, }, { about: "paginated query", query: QueryDSL{ Fields: []string{"foo", "bar"}, Size: 10, Query: TermQuery{Field: "baz", Value: "quz"}, Sort: []Sort{{Field: "foo", Order: Order{"desc"}}}, From: 10, }, json: `{"fields": ["foo", "bar"], "size": 10, "query": {"term": {"baz": "quz"}}, "sort": [{"foo": { "order": "desc"}}], "from": 10}`, }, { about: "field value factor", query: FieldValueFactorFunction{ Field: "foo", Factor: 1.2, Modifier: "bar", }, json: `{"field_value_factor": {"field": "foo", "factor": 1.2, "modifier": "bar"}}`, }} for i, test := range tests { c.Logf("%d: %s", i, test.about) // Note JSONEquals is being used a bit backwards here, this is fine // but any error results may be a little confusing. c.Assert(test.json, jc.JSONEquals, test.query) } } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/elasticsearch/elasticsearch.go0000664000175000017500000004253312672604603027356 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. // elasticsearch package api attempts to name methods to match the // corresponding elasticsearch endpoint. Methods names like CatIndices are // named as such because they correspond to /_cat/indices elasticsearch // endpoint. // There is no reason to use different vocabulary from that of elasticsearch. // Use the elasticsearch terminology and avoid mapping names of things. package elasticsearch // import "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" import ( "bytes" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/url" "path" "strings" "github.com/juju/loggo" "gopkg.in/errgo.v1" ) const ( // Internal provides elasticsearche's "internal" versioning system, as described in // http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types Internal = "internal" // External provides elasticsearche's "external" versioning system, as described in // http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types External = "external" // ExternalGTE provides elasticsearche's "external_gte" versioning system, as described in // http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types ExternalGTE = "external_gte" ) var log = loggo.GetLogger("charmstore.elasticsearch") var ErrConflict = errgo.New("elasticsearch document conflict") var ErrNotFound = errgo.New("elasticsearch document not found") type ElasticSearchError struct { Err string `json:"error"` Status int `json:"status"` } func (e ElasticSearchError) Error() string { return e.Err } type Database struct { Addr string } // Document represents a document in the elasticsearch database. type Document struct { Found bool `json:"found"` Id string `json:"_id"` Index string `json:"_index"` Type string `json:"_type"` Version int64 `json:"_version"` Source json.RawMessage `json:"_source"` } // Represents the response from _cluster/health on elastic search // http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-health.html type ClusterHealth struct { ClusterName string `json:"cluster_name"` Status string `json:"status"` TimedOut bool `json:"timed_out"` NumberOfNodes int64 `json:"number_of_nodes"` NumberOfDataNodes int64 `json:"number_of_data_nodes"` ActivePrimaryShards int64 `json:"active_primary_shards"` ActiveShards int64 `json:"active_shards"` RelocatingShards int64 `json:"relocating_shards"` InitializingShards int64 `json:"initializing_shards"` UnassignedShards int64 `json:"unassigned_shards"` } func (h *ClusterHealth) String() string { return fmt.Sprintf("cluster_name: %s, status: %s, timed_out: %t"+ ", number_of_nodes: %d, number_of_data_nodes: %d"+ ", active_primary_shards: %d, active_shards: %d"+ ", relocating_shards: %d, initializing_shards: %d"+ ", unassigned_shards:%d", h.ClusterName, h.Status, h.TimedOut, h.NumberOfNodes, h.NumberOfDataNodes, h.ActivePrimaryShards, h.ActiveShards, h.RelocatingShards, h.InitializingShards, h.UnassignedShards) } // Alias creates or updates an index alias. An alias a is created, // or modified if it already exists, to point to i. See // http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-aliases.html#indices-aliases // for further details. func (db *Database) Alias(i, a string) error { indexes, err := db.ListIndexesForAlias(a) if err != nil { return errgo.Notef(err, "cannot retrieve current aliases") } var actions struct { Actions []action `json:"actions"` } for _, i := range indexes { actions.Actions = append(actions.Actions, action{Remove: &alias{Index: i, Alias: a}}) } if i != "" { actions.Actions = append(actions.Actions, action{Add: &alias{Index: i, Alias: a}}) } if len(actions.Actions) == 0 { return nil } if err := db.post(db.url("_aliases"), actions, nil); err != nil { return errgo.Notef(err, "error updating aliases") } return nil } // Create document attempts to create a new document at index/type_/id with the // contents in doc. If the document already exists then CreateDocument will return // ErrConflict and return a non-nil error if any other error occurs. // See http://www.elasticsearch.org/guide/en/elasticsearch/guide/current/create-doc.html#create-doc // for further details. func (db *Database) CreateDocument(index, type_, id string, doc interface{}) error { if err := db.put(db.url(index, type_, id, "_create"), doc, nil); err != nil { return getError(err) } return nil } // DeleteDocument deletes the document at index/type_/id from the elasticsearch // database. See http://www.elasticsearch.org/guide/en/elasticsearch/guide/current/delete-doc.html#delete-doc // for further details. func (db *Database) DeleteDocument(index, type_, id string) error { if err := db.delete(db.url(index, type_, id), nil, nil); err != nil { return getError(err) } return nil } // DeleteIndex deletes the index with the given name from the database. // http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-delete-index.html // If the index does not exist or if the database cannot be // reached, then an error is returned. func (db *Database) DeleteIndex(index string) error { if err := db.delete(db.url(index), nil, nil); err != nil { return getError(err) } return nil } // GetDocument retrieves the document with the given index, type_ and id and // unmarshals the json response into v. GetDocument returns ErrNotFound if the // requested document is not present, and returns a non-nil error if any other error // occurs. func (db *Database) GetDocument(index, type_, id string, v interface{}) error { d, err := db.GetESDocument(index, type_, id) if err != nil { return getError(err) } if !d.Found { return ErrNotFound } if err := json.Unmarshal([]byte(d.Source), &v); err != nil { return errgo.Mask(err) } return nil } // GetESDocument returns elasticsearch's view of the document stored at // index/type_/id. It is not an error if this document does not exist, in that case // the Found field of the returned Document will be false. func (db *Database) GetESDocument(index, type_, id string) (Document, error) { var d Document if err := db.get(db.url(index, type_, id), nil, &d); err != nil { return Document{}, getError(err) } return d, nil } // HasDocument tests to see a document of the given index, type_, and id exists // in the elasticsearch database. A non-nil error is returned if there is an error // communicating with the elasticsearch database. func (db *Database) HasDocument(index, type_, id string) (bool, error) { var d Document if err := db.get(db.url(index, type_, id)+"?_source=false", nil, &d); err != nil { return false, getError(err) } return d.Found, nil } // Check the health status of Elastic search and retrieve general data from it. // Calling get on /_cluster/health to retrieve status. func (db *Database) Health() (ClusterHealth, error) { var result ClusterHealth if err := db.get(db.url("_cluster", "health"), nil, &result); err != nil { return ClusterHealth{}, getError(err) } return result, nil } // ListAllIndexes retreieves the list of all user indexes in the elasticsearch database. // indexes that are generated to to support plugins are filtered out of the list that // is returned. func (db *Database) ListAllIndexes() ([]string, error) { var result map[string]interface{} if err := db.get(db.url("_aliases"), nil, &result); err != nil { return nil, getError(err) } var indexes []string for key := range result { // Some ElasticSearch plugins create indexes (e.g. ".marvel...") for their // use. Ignore any that start with a dot. if !strings.HasPrefix(key, ".") { indexes = append(indexes, key) } } return indexes, nil } // ListIndexesForAlias retreieves the list of all indexes in the elasticsearch database // that have the alias a. func (db *Database) ListIndexesForAlias(a string) ([]string, error) { var result map[string]struct{} if err := db.get(db.url("*", "_alias", a), nil, &result); err != nil { return nil, getError(err) } var indexes []string for key := range result { indexes = append(indexes, key) } return indexes, nil } // PostDocument creates a new auto id document with the given index and _type // and returns the generated id of the document. The type_ parameter controls how // the document will be mapped in the index. See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html // for more details. func (db *Database) PostDocument(index, type_ string, doc interface{}) (string, error) { var resp struct { ID string `json:"_id"` } if err := db.post(db.url(index, type_), doc, &resp); err != nil { return "", getError(err) } return resp.ID, nil } // PutDocument creates or updates the document with the given index, type_ and // id. The type_ parameter controls how the document will be mapped in the index. // See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html // for more details. func (db *Database) PutDocument(index, type_, id string, doc interface{}) error { if err := db.put(db.url(index, type_, id), doc, nil); err != nil { return getError(err) } return nil } // PutDocumentVersion creates or updates the document in the given index if the version // parameter is the same as the currently stored version. The type_ parameter // controls how the document will be indexed. PutDocumentVersion returns // ErrConflict if the data cannot be stored due to a version mismatch, and a non-nil error if // any other error occurs. // See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#index-versioning // for more information. func (db *Database) PutDocumentVersion(index, type_, id string, version int64, doc interface{}) error { return db.PutDocumentVersionWithType(index, type_, id, version, "internal", doc) } // PutDocumentVersion creates or updates the document in the given index if the version // parameter is the same as the currently stored version. The type_ parameter // controls how the document will be indexed. PutDocumentVersionWithType returns // ErrConflict if the data cannot be stored due to a version mismatch, and a non-nil error if // any other error occurs. // // The constants Internal, External and ExternalGTE represent some of the available // version types. Other version types may also be available, plese check the elasticsearch // documentation. // // See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#index-versioning // and http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types for more information. func (db *Database) PutDocumentVersionWithType( index, type_, id string, version int64, versionType string, doc interface{}) error { url := fmt.Sprintf("%s?version=%d&version_type=%s", db.url(index, type_, id), version, versionType) if err := db.put(url, doc, nil); err != nil { return getError(err) } return nil } // PutIndex creates the index with the given configuration. func (db *Database) PutIndex(index string, config interface{}) error { if err := db.put(db.url(index), config, nil); err != nil { return getError(err) } return nil } // PutMapping creates or updates the mapping with the given configuration. func (db *Database) PutMapping(index, type_ string, config interface{}) error { if err := db.put(db.url(index, "_mapping", type_), config, nil); err != nil { return getError(err) } return nil } // RefreshIndex posts a _refresh to the index in the database. // http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-refresh.html func (db *Database) RefreshIndex(index string) error { if err := db.post(db.url(index, "_refresh"), nil, nil); err != nil { return getError(err) } return nil } // Search performs the query specified in q on the values in index/type_ and returns a // SearchResult. func (db *Database) Search(index, type_ string, q QueryDSL) (SearchResult, error) { var sr SearchResult if err := db.get(db.url(index, type_, "_search"), q, &sr); err != nil { return SearchResult{}, errgo.Notef(getError(err), "search failed") } return sr, nil } // do performs a request on the elasticsearch server. If body is not nil it will be // marshaled as a json object and sent with the request. If v is non nil the response // body will be unmarshalled into the value it points to. func (db *Database) do(method, url string, body, v interface{}) error { log.Debugf(">>> %s %s", method, url) var r io.Reader if body != nil { b, err := json.Marshal(body) if err != nil { return errgo.Notef(err, "can not marshaling body") } log.Debugf(">>> %s", b) r = bytes.NewReader(b) } req, err := http.NewRequest(method, url, r) if err != nil { log.Debugf("*** %s", err) return errgo.Notef(err, "cannot create request") } if body != nil { req.Header.Add("Content-Type", "application/json") } resp, err := http.DefaultClient.Do(req) if err != nil { log.Debugf("*** %s", err) return errgo.Mask(err) } defer resp.Body.Close() b, err := ioutil.ReadAll(resp.Body) if err != nil { log.Debugf("*** %s", err) return errgo.Notef(err, "cannot read response") } log.Debugf("<<< %s", resp.Status) log.Debugf("<<< %s", b) var eserr *ElasticSearchError // TODO(mhilton) don't try to parse every response as an error. if err = json.Unmarshal(b, &eserr); err != nil { log.Debugf("*** %s", err) } if eserr.Status != 0 { return eserr } if v != nil { if err = json.Unmarshal(b, v); err != nil { log.Debugf("*** %s", err) return errgo.Notef(err, "cannot unmarshal response") } } return nil } // delete makes a DELETE request to the database url. A non-nil body will be // sent with the request and if v is not nill then the response will be unmarshaled // into tha value it points to. func (db *Database) delete(url string, body, v interface{}) error { return db.do("DELETE", url, body, v) } // get makes a GET request to the database url. A non-nil body will be // sent with the request and if v is not nill then the response will be unmarshaled // into tha value it points to. func (db *Database) get(url string, body, v interface{}) error { return db.do("GET", url, body, v) } // post makes a POST request to the database url. A non-nil body will be // sent with the request and if v is not nill then the response will be unmarshaled // into tha value it points to. func (db *Database) post(url string, body, v interface{}) error { return db.do("POST", url, body, v) } // put makes a PUT request to the database url. A non-nil body will be // sent with the request and if v is not nill then the response will be unmarshaled // into tha value it points to. func (db *Database) put(url string, body, v interface{}) error { return db.do("PUT", url, body, v) } // url constructs the URL for accessing the database. func (db *Database) url(pathParts ...string) string { path := path.Join(pathParts...) url := &url.URL{ Scheme: "http", Host: db.Addr, Path: path, } return url.String() } // SearchResult is the result returned after performing a search in elasticsearch type SearchResult struct { Hits struct { Total int `json:"total"` MaxScore float64 `json:"max_score"` Hits []Hit `json:"hits"` } `json:"hits"` Took int `json:"took"` TimedOut bool `json:"timed_out"` } // Hit represents an individual search hit returned from elasticsearch type Hit struct { Index string `json:"_index"` Type string `json:"_type"` ID string `json:"_id"` Score float64 `json:"_score"` Source json.RawMessage `json:"_source"` Fields Fields `json:"fields"` } type Fields map[string][]interface{} // Get retrieves the first value of key in the fields map. If no such value // exists then it will return nil. func (f Fields) Get(key string) interface{} { if len(f[key]) < 1 { return nil } return f[key][0] } // Get retrieves the first value of key in the fields map, and coerces it into a // string. If no such value exists or the value is not a string, then "" will be returned. func (f Fields) GetString(key string) string { s, ok := f.Get(key).(string) if !ok { return "" } return s } // EscapeRegexp returns the supplied string with any special characters escaped. // A regular expression match on the returned string will match exactly the characters // in the supplied string. func EscapeRegexp(s string) string { return regexpReplacer.Replace(s) } var regexpReplacer = strings.NewReplacer( `.`, `\.`, `?`, `\?`, `+`, `\+`, `*`, `\*`, `|`, `\|`, `{`, `\{`, `}`, `\}`, `[`, `\[`, `]`, `\]`, `(`, `\(`, `)`, `\)`, `"`, `\"`, `\`, `\\`, `#`, `\#`, `@`, `\@`, `&`, `\&`, `<`, `\<`, `>`, `\>`, `~`, `\~`, ) // alias describes an alias in elasticsearch. type alias struct { Index string `json:"index"` Alias string `json:"alias"` } // action is an action that can be performed on an alias type action struct { Remove *alias `json:"remove,omitempty"` Add *alias `json:"add,omitempty"` } // getError derives an error from the underlaying error returned // by elasticsearch. func getError(err error) error { if eserr, ok := err.(*ElasticSearchError); ok { switch eserr.Status { case http.StatusNotFound: return ErrNotFound case http.StatusConflict: return ErrConflict default: return err } } return err } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/elasticsearch/query.go0000664000175000017500000001450512672604603025707 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package elasticsearch // import "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" import ( "encoding/json" "fmt" ) // Query DSL - Queries // Query represents a query in the elasticsearch DSL. type Query interface { json.Marshaler } // Filter represents a filter in the elasticsearch DSL. type Filter interface { json.Marshaler } // Function is a function definition for use with a FunctionScoreQuery. type Function interface{} // BoostField creates a string which represents a field name with a boost value. func BoostField(field string, boost float64) string { return fmt.Sprintf("%s^%f", field, boost) } // MatchAllQuery provides a query that matches all // documents in the index. type MatchAllQuery struct { } func (m MatchAllQuery) MarshalJSON() ([]byte, error) { return marshalNamedObject("match_all", struct{}{}) } // MatchQuery provides a query that matches against // a complete field. type MatchQuery struct { Field string Query string Type string } func (m MatchQuery) MarshalJSON() ([]byte, error) { params := map[string]interface{}{"query": m.Query} if m.Type != "" { params["type"] = m.Type } return marshalNamedObject("match", map[string]interface{}{m.Field: params}) } // MultiMatchQuery provides a query that matches on a number of fields. type MultiMatchQuery struct { Query string Fields []string } func (m MultiMatchQuery) MarshalJSON() ([]byte, error) { return marshalNamedObject("multi_match", map[string]interface{}{ "query": m.Query, "fields": m.Fields, }) } // FilteredQuery provides a query that includes a filter. type FilteredQuery struct { Query Query Filter Filter } func (f FilteredQuery) MarshalJSON() ([]byte, error) { return marshalNamedObject("filtered", map[string]interface{}{ "query": f.Query, "filter": f.Filter, }) } // FunctionScoreQuery provides a query that adjusts the scoring of a // query by applying functions to it. type FunctionScoreQuery struct { Query Query Functions []Function } func (f FunctionScoreQuery) MarshalJSON() ([]byte, error) { return marshalNamedObject("function_score", map[string]interface{}{ "query": f.Query, "functions": f.Functions, }) } // TermQuery provides a query that matches a term in a field. type TermQuery struct { Field string Value string } func (t TermQuery) MarshalJSON() ([]byte, error) { return marshalNamedObject("term", map[string]interface{}{ t.Field: t.Value, }) } // DecayFunction provides a function that boosts depending on // the difference in values of a certain field. See // http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_decay_functions // for details. type DecayFunction struct { Function string Field string Scale string } func (f DecayFunction) MarshalJSON() ([]byte, error) { return marshalNamedObject(f.Function, map[string]interface{}{ f.Field: map[string]interface{}{ "scale": f.Scale, }, }) } // BoostFactorFunction provides a function that boosts results by the specified amount. type BoostFactorFunction struct { Filter Filter `json:"filter,omitempty"` BoostFactor float64 `json:"boost_factor"` } // FieldValueFactorFunction boosts the results by the value of a field in the document. type FieldValueFactorFunction struct { Field string `json:"field"` Factor float64 `json:"factor,omitempty"` Modifier string `json:"modifier,omitempty"` } func (f FieldValueFactorFunction) MarshalJSON() ([]byte, error) { type ffvf FieldValueFactorFunction return marshalNamedObject("field_value_factor", ffvf(f)) } // AndFilter provides a filter that matches if all of the internal // filters match. type AndFilter []Filter func (a AndFilter) MarshalJSON() ([]byte, error) { return marshalNamedObject("and", map[string]interface{}{ "filters": []Filter(a), }) } // OrFilter provides a filter that matches if any of the internal // filters match. type OrFilter []Filter func (o OrFilter) MarshalJSON() ([]byte, error) { return marshalNamedObject("or", map[string]interface{}{ "filters": []Filter(o), }) } // NotFilter provides a filter that matches the opposite of the // wrapped filter. type NotFilter struct { Filter Filter } func (n NotFilter) MarshalJSON() ([]byte, error) { return marshalNamedObject("not", n.Filter) } // QueryFilter provides a filter that matches when a query matches // on a result type QueryFilter struct { Query Query } func (q QueryFilter) MarshalJSON() ([]byte, error) { return marshalNamedObject("query", q.Query) } // RegexpFilter provides a filter that matches a field against a // regular expression. type RegexpFilter struct { Field string Regexp string } func (r RegexpFilter) MarshalJSON() ([]byte, error) { return marshalNamedObject("regexp", map[string]string{r.Field: r.Regexp}) } // TermFilter provides a filter that requires a field to match. type TermFilter struct { Field string Value string } func (t TermFilter) MarshalJSON() ([]byte, error) { return marshalNamedObject("term", map[string]string{t.Field: t.Value}) } // ExistsFilter provides a filter that requres a field to be present. type ExistsFilter string func (f ExistsFilter) MarshalJSON() ([]byte, error) { return marshalNamedObject("exists", map[string]string{"field": string(f)}) } // QueryDSL provides a structure to put together a query using the // elasticsearch DSL. type QueryDSL struct { Fields []string `json:"fields"` From int `json:"from,omitempty"` Size int `json:"size,omitempty"` Query Query `json:"query,omitempty"` Sort []Sort `json:"sort,omitempty"` } type Sort struct { Field string Order Order } type Order struct { Order string `json:"order"` } func (s Sort) MarshalJSON() ([]byte, error) { return json.Marshal(map[string]Order{ s.Field: {s.Order.Order}, }) } // Ascending is an Order that orders a sort by ascending through the values. var Ascending = Order{"asc"} // Descending is an Order that orders a sort by descending throuth the values. var Descending = Order{"desc"} // marshalNamedObject provides a helper that creates json objects in a form // often required by the elasticsearch query DSL. The objects created // take the following form: // { // name: obj // } func marshalNamedObject(name string, obj interface{}) ([]byte, error) { return json.Marshal(map[string]interface{}{name: obj}) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/server.go0000664000175000017500000000734512672604603023242 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmstore // import "gopkg.in/juju/charmstore.v5-unstable" import ( "fmt" "net/http" "sort" "time" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/mgo.v2" "gopkg.in/natefinch/lumberjack.v2" "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" "gopkg.in/juju/charmstore.v5-unstable/internal/legacy" "gopkg.in/juju/charmstore.v5-unstable/internal/v4" "gopkg.in/juju/charmstore.v5-unstable/internal/v5" ) // Versions of the API that can be served. const ( Legacy = "" V4 = "v4" V5 = "v5" ) var versions = map[string]charmstore.NewAPIHandlerFunc{ Legacy: legacy.NewAPIHandler, V4: v4.NewAPIHandler, V5: v5.NewAPIHandler, } // HTTPCloseHandler represents a HTTP handler that // must be closed after use. type HTTPCloseHandler interface { Close() http.Handler } // Versions returns all known API version strings in alphabetical order. func Versions() []string { vs := make([]string, 0, len(versions)) for v := range versions { vs = append(vs, v) } sort.Strings(vs) return vs } // ServerParams holds configuration for a new API server. type ServerParams struct { // AuthUsername and AuthPassword hold the credentials // used for HTTP basic authentication. AuthUsername string AuthPassword string // IdentityLocation holds the location of the third party authorization // service to use when creating third party caveats, // for example: http://api.jujucharms.com/identity/v1/discharger // If it is empty, IdentityURL+"/v1/discharger" will be used. IdentityLocation string // TermsLocations holds the location of the // terms service, which knows about user agreements to // Terms and Conditions required by the charm. TermsLocation string // PublicKeyLocator holds a public key store. // It may be nil. PublicKeyLocator bakery.PublicKeyLocator // IdentityAPIURL holds the URL of the identity manager, // for example http://api.jujucharms.com/identity IdentityAPIURL string // AgentUsername and AgentKey hold the credentials used for agent // authentication. AgentUsername string AgentKey *bakery.KeyPair // StatsCacheMaxAge is the maximum length of time between // refreshes of entities in the stats cache. StatsCacheMaxAge time.Duration // SearchCacheMaxAge is the maximum length of time between // refreshes of entities in the search cache. SearchCacheMaxAge time.Duration // MaxMgoSessions specifies a soft limit on the maximum // number of mongo sessions used. Each concurrent // HTTP request will use one session. MaxMgoSessions int // HTTPRequestWaitDuration holds the amount of time // that an HTTP request will wait for a free connection // when the MaxConcurrentHTTPRequests limit is reached. HTTPRequestWaitDuration time.Duration // AuditLogger optionally holds the logger which will be used to // write audit log entries. AuditLogger *lumberjack.Logger } // NewServer returns a new handler that handles charm store requests and stores // its data in the given database. The handler will serve the specified // versions of the API using the given configuration. func NewServer(db *mgo.Database, es *elasticsearch.Database, idx string, config ServerParams, serveVersions ...string) (HTTPCloseHandler, error) { newAPIs := make(map[string]charmstore.NewAPIHandlerFunc) for _, vers := range serveVersions { newAPI := versions[vers] if newAPI == nil { return nil, fmt.Errorf("unknown version %q", vers) } newAPIs[vers] = newAPI } var si *charmstore.SearchIndex if es != nil { si = &charmstore.SearchIndex{ Database: es, Index: idx, } } return charmstore.NewServer(db, si, charmstore.ServerParams(config), newAPIs) } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/server_test.go0000664000175000017500000000625612672604603024301 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmstore_test // import "gopkg.in/juju/charmstore.v5-unstable" import ( "fmt" "net/http" "testing" jujutesting "github.com/juju/testing" "github.com/juju/testing/httptesting" gc "gopkg.in/check.v1" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/charmstore.v5-unstable" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" ) // These tests are copied (almost) verbatim from internal/charmstore/server_test.go func TestPackage(t *testing.T) { jujutesting.MgoTestPackage(t, nil) } type ServerSuite struct { jujutesting.IsolatedMgoSuite config charmstore.ServerParams } var _ = gc.Suite(&ServerSuite{}) func (s *ServerSuite) SetUpSuite(c *gc.C) { s.IsolatedMgoSuite.SetUpSuite(c) s.config = charmstore.ServerParams{ AuthUsername: "test-user", AuthPassword: "test-password", } } func (s *ServerSuite) TestNewServerWithNoVersions(c *gc.C) { h, err := charmstore.NewServer(s.Session.DB("foo"), nil, "", s.config) c.Assert(err, gc.ErrorMatches, `charm store server must serve at least one version of the API`) c.Assert(h, gc.IsNil) } func (s *ServerSuite) TestNewServerWithUnregisteredVersion(c *gc.C) { h, err := charmstore.NewServer(s.Session.DB("foo"), nil, "", s.config, "wrong") c.Assert(err, gc.ErrorMatches, `unknown version "wrong"`) c.Assert(h, gc.IsNil) } type versionResponse struct { Version string Path string } func (s *ServerSuite) TestVersions(c *gc.C) { c.Assert(charmstore.Versions(), gc.DeepEquals, []string{"", "v4", "v5"}) } func (s *ServerSuite) TestNewServerWithVersions(c *gc.C) { db := s.Session.DB("foo") h, err := charmstore.NewServer(db, nil, "", s.config, charmstore.V4) c.Assert(err, gc.IsNil) defer h.Close() httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: h, URL: "/v4/debug", ExpectStatus: http.StatusInternalServerError, ExpectBody: params.Error{ Message: "method not implemented", }, }) assertDoesNotServeVersion(c, h, "v3") } func assertServesVersion(c *gc.C, h http.Handler, vers string) { httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: h, URL: "/" + vers + "/some/path", ExpectBody: versionResponse{ Version: vers, Path: "/some/path", }, }) } func assertDoesNotServeVersion(c *gc.C, h http.Handler, vers string) { url := "/" + vers + "/debug" httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: h, URL: url, ExpectStatus: http.StatusNotFound, ExpectBody: params.Error{ Message: fmt.Sprintf("no handler for %q", url), Code: params.ErrNotFound, }, }) } type ServerESSuite struct { storetesting.IsolatedMgoESSuite config charmstore.ServerParams } var _ = gc.Suite(&ServerESSuite{}) func (s *ServerESSuite) SetUpSuite(c *gc.C) { s.IsolatedMgoESSuite.SetUpSuite(c) s.config = charmstore.ServerParams{ AuthUsername: "test-user", AuthPassword: "test-password", } } func (s *ServerESSuite) TestNewServerWithElasticsearch(c *gc.C) { db := s.Session.DB("foo") srv, err := charmstore.NewServer(db, s.ES, s.TestIndex, s.config, charmstore.V4) c.Assert(err, gc.IsNil) srv.Close() } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/cmd/0000775000175000017500000000000012672604603022137 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/cmd/charmd/0000775000175000017500000000000012672604603023375 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/cmd/charmd/config.yaml0000664000175000017500000000163612672604603025534 0ustar marcomarcoaudit-log-file: audit.log mongo-url: localhost:27017 api-addr: localhost:8080 auth-username: admin auth-password: example-passwd #elasticsearch-addr: localhost:9200 # For locally running services. #identity-public-key: CIdWcEUN+0OZnKW9KwruRQnQDY/qqzVdD30CijwiWCk= #identity-location: http://localhost:8081/v1/discharger #identity-api-url: http://localhost:8081 # For production identity manager. identity-public-key: hmHaPgCC1UfuhYHUSX5+aihSAZesqpVdjRv0mgfIwjo= identity-location: https://api.jujucharms.com/identity/v1/discharger # Agent credentials. #agent-username: charmstore@admin@idm #agent-key: # private: 85ZQqTnqiNdEggFVy7TGjRDGMulJHHz8UKkfVl5tTu8= # public: X3Yj/aThvG20FoBhRAIX+JbFk300r9Roc2D78r/37iw= # Statistics Cache maximum age, default 1 hour #stats-cache-max-age: 1h #request-timeout: 500ms #search-cache-max-age: 0s # Uncomment to test with a terms service running locally #terms-location: localhost:8085 charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/cmd/charmd/main.go0000664000175000017500000000751712672604603024662 0ustar marcomarco// Copyright 2012, 2013, 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package main // import "gopkg.in/juju/charmstore.v5-unstable/cmd/charmd" import ( "flag" "fmt" "net/http" "os" "path/filepath" "github.com/juju/loggo" "gopkg.in/errgo.v1" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/mgo.v2" "gopkg.in/natefinch/lumberjack.v2" "gopkg.in/juju/charmstore.v5-unstable" "gopkg.in/juju/charmstore.v5-unstable/config" "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" "gopkg.in/juju/charmstore.v5-unstable/internal/debug" ) var ( logger = loggo.GetLogger("charmd") loggingConfig = flag.String("logging-config", "", "specify log levels for modules e.g. =TRACE") ) func main() { flag.Usage = func() { fmt.Fprintf(os.Stderr, "usage: %s [options] \n", filepath.Base(os.Args[0])) flag.PrintDefaults() os.Exit(2) } flag.Parse() if flag.NArg() != 1 { flag.Usage() } if *loggingConfig != "" { if err := loggo.ConfigureLoggers(*loggingConfig); err != nil { fmt.Fprintf(os.Stderr, "cannot configure loggers: %v", err) os.Exit(1) } } if err := serve(flag.Arg(0)); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } } func serve(confPath string) error { logger.Infof("reading configuration") conf, err := config.Read(confPath) if err != nil { return errgo.Notef(err, "cannot read config file %q", confPath) } logger.Infof("connecting to mongo") session, err := mgo.Dial(conf.MongoURL) if err != nil { return errgo.Notef(err, "cannot dial mongo at %q", conf.MongoURL) } defer session.Close() dbName := "juju" if conf.Database != "" { dbName = conf.Database } db := session.DB(dbName) var es *elasticsearch.Database if conf.ESAddr != "" { es = &elasticsearch.Database{ Addr: conf.ESAddr, } } keyring := bakery.NewPublicKeyRing() err = addPublicKey(keyring, conf.IdentityLocation, conf.IdentityPublicKey) if err != nil { return errgo.Mask(err) } if conf.TermsLocation != "" { err = addPublicKey(keyring, conf.TermsLocation, conf.TermsPublicKey) if err != nil { return errgo.Mask(err) } } logger.Infof("setting up the API server") cfg := charmstore.ServerParams{ AuthUsername: conf.AuthUsername, AuthPassword: conf.AuthPassword, IdentityLocation: conf.IdentityLocation, IdentityAPIURL: conf.IdentityAPIURL, TermsLocation: conf.TermsLocation, AgentUsername: conf.AgentUsername, AgentKey: conf.AgentKey, StatsCacheMaxAge: conf.StatsCacheMaxAge.Duration, MaxMgoSessions: conf.MaxMgoSessions, HTTPRequestWaitDuration: conf.RequestTimeout.Duration, SearchCacheMaxAge: conf.SearchCacheMaxAge.Duration, PublicKeyLocator: keyring, } if conf.AuditLogFile != "" { cfg.AuditLogger = &lumberjack.Logger{ Filename: conf.AuditLogFile, MaxSize: conf.AuditLogMaxSize, MaxAge: conf.AuditLogMaxAge, } } server, err := charmstore.NewServer(db, es, "cs", cfg, charmstore.Legacy, charmstore.V4, charmstore.V5) if err != nil { return errgo.Notef(err, "cannot create new server at %q", conf.APIAddr) } logger.Infof("starting the API server") return http.ListenAndServe(conf.APIAddr, debug.Handler("", server)) } func addPublicKey(ring *bakery.PublicKeyRing, loc string, key *bakery.PublicKey) error { if key != nil { return ring.AddPublicKeyForLocation(loc, false, key) } pubKey, err := httpbakery.PublicKeyForLocation(http.DefaultClient, loc) if err != nil { return errgo.Mask(err) } return ring.AddPublicKeyForLocation(loc, false, pubKey) } var mgoLogger = loggo.GetLogger("mgo") func init() { mgo.SetLogger(mgoLog{}) } type mgoLog struct{} func (mgoLog) Output(calldepth int, s string) error { mgoLogger.LogCallf(calldepth+1, loggo.DEBUG, "%s", s) return nil } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/cmd/cshash256/0000775000175000017500000000000012672604603023645 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/cmd/cshash256/main.go0000664000175000017500000000646212672604603025130 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. // This command populates the blobhash256 field of all entities. // This command is intended to be run on the production db and then discarded. // The first time this command is executed, all the entities are updated. // Subsequent runs have no effect. package main // import "gopkg.in/juju/charmstore.v5-unstable/cmd/cshash256" import ( "crypto/sha256" "flag" "fmt" "io" "os" "path/filepath" "github.com/juju/loggo" "gopkg.in/errgo.v1" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/juju/charmstore.v5-unstable/config" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" ) var ( logger = loggo.GetLogger("cshash256") loggingConfig = flag.String("logging-config", "INFO", "specify log levels for modules e.g. =TRACE") ) func main() { flag.Usage = func() { fmt.Fprintf(os.Stderr, "usage: %s [options] \n", filepath.Base(os.Args[0])) flag.PrintDefaults() os.Exit(2) } flag.Parse() if flag.NArg() != 1 { flag.Usage() } if *loggingConfig != "" { if err := loggo.ConfigureLoggers(*loggingConfig); err != nil { fmt.Fprintf(os.Stderr, "cannot configure loggers: %v", err) os.Exit(1) } } if err := run(flag.Arg(0)); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } } func run(confPath string) error { logger.Infof("reading configuration") conf, err := config.Read(confPath) if err != nil { return errgo.Notef(err, "cannot read config file %q", confPath) } logger.Infof("connecting to mongo") session, err := mgo.Dial(conf.MongoURL) if err != nil { return errgo.Notef(err, "cannot dial mongo at %q", conf.MongoURL) } defer session.Close() db := session.DB("juju") logger.Infof("instantiating the store") pool, err := charmstore.NewPool(db, nil, nil, charmstore.ServerParams{}) if err != nil { return errgo.Notef(err, "cannot create a new store") } store := pool.Store() defer store.Close() logger.Infof("updating entities") if err := update(store); err != nil { return errgo.Notef(err, "cannot update entities") } logger.Infof("done") return nil } func update(store *charmstore.Store) error { entities := store.DB.Entities() var entity mongodoc.Entity iter := entities.Find(bson.D{{"blobhash256", ""}}).Select(bson.D{{"blobname", 1}}).Iter() defer iter.Close() counter := 0 for iter.Next(&entity) { // Retrieve the archive contents. r, _, err := store.BlobStore.Open(entity.BlobName) if err != nil { return errgo.Notef(err, "cannot open archive data for %s", entity.URL) } // Calculate the contents hash. hash := sha256.New() if _, err = io.Copy(hash, r); err != nil { r.Close() return errgo.Notef(err, "cannot calculate archive sha256 for %s", entity.URL) } r.Close() // Update the entity document. if err := entities.UpdateId(entity.URL, bson.D{{ "$set", bson.D{{"blobhash256", fmt.Sprintf("%x", hash.Sum(nil))}}, }}); err != nil { return errgo.Notef(err, "cannot update entity id %s", entity.URL) } counter++ if counter%100 == 0 { logger.Infof("%d entities updated", counter) } } if err := iter.Close(); err != nil { return errgo.Notef(err, "cannot iterate entities") } logger.Infof("%d entities updated", counter) return nil } charm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/cmd/essync/0000775000175000017500000000000012672604603023443 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmstore.v5-unstable/cmd/essync/main.go0000664000175000017500000000432512672604603024722 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package main // import "gopkg.in/juju/charmstore.v5-unstable/cmd/essync" import ( "flag" "fmt" "os" "path/filepath" "github.com/juju/loggo" "gopkg.in/errgo.v1" "gopkg.in/mgo.v2" "gopkg.in/juju/charmstore.v5-unstable/config" "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" ) var logger = loggo.GetLogger("essync") var ( index = flag.String("index", "cs", "Name of index to populate.") loggingConfig = flag.String("logging-config", "", "specify log levels for modules e.g. =TRACE") mapping = flag.String("mapping", "", "No longer used.") settings = flag.String("settings", "", "No longer used.") ) func main() { flag.Usage = func() { fmt.Fprintf(os.Stderr, "usage: %s [options] \n", filepath.Base(os.Args[0])) flag.PrintDefaults() os.Exit(2) } flag.Parse() if flag.NArg() != 1 { flag.Usage() } if *loggingConfig != "" { if err := loggo.ConfigureLoggers(*loggingConfig); err != nil { fmt.Fprintf(os.Stderr, "cannot configure loggers: %v", err) os.Exit(1) } } if err := populate(flag.Arg(0)); err != nil { logger.Errorf("cannot populate elasticsearch: %v", err) os.Exit(1) } } func populate(confPath string) error { logger.Debugf("reading config file %q", confPath) conf, err := config.Read(confPath) if err != nil { return errgo.Notef(err, "cannot read config file %q", confPath) } if conf.ESAddr == "" { return errgo.Newf("no elasticsearch-addr specified in config file %q", confPath) } si := &charmstore.SearchIndex{ Database: &elasticsearch.Database{ conf.ESAddr, }, Index: *index, } session, err := mgo.Dial(conf.MongoURL) if err != nil { return errgo.Notef(err, "cannot dial mongo at %q", conf.MongoURL) } defer session.Close() db := session.DB("juju") pool, err := charmstore.NewPool(db, si, nil, charmstore.ServerParams{}) if err != nil { return errgo.Notef(err, "cannot create a new store") } store := pool.Store() defer store.Close() if err := store.SynchroniseElasticsearch(); err != nil { return errgo.Notef(err, "cannot synchronise elasticsearch") } return nil } charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/0000775000175000017500000000000012672604536017110 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/jujusvg.v1/doc.go0000664000175000017500000000046112672604536020205 0ustar marcomarco// Copyright 2014 Canonical, Ltd. // Licensed under the LGPLv3, see LICENCE file for details. // jujusvg generates SVG representations of various Juju artifacts, such as // charm bundles or live environments. // // For more information, please refer to the README file in this directory. package jujusvg charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/canvas_test.go0000664000175000017500000002072712672604536021761 0ustar marcomarcopackage jujusvg import ( "bytes" "encoding/xml" "image" "io" "github.com/ajstarks/svgo" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/jujusvg.v1/assets" ) type CanvasSuite struct{} var _ = gc.Suite(&CanvasSuite{}) func (s *CanvasSuite) TestServiceRender(c *gc.C) { // Ensure that the Service's definition and usage methods output the // proper SVG elements. var tests = []struct { about string service service expected string }{ { about: "Service without iconSrc, no def created", service: service{ name: "foo", point: image.Point{ X: 0, Y: 0, }, iconUrl: "foo", }, expected: ` foo foo `, }, { about: "Service with iconSrc", service: service{ name: "bar", charmPath: "bar", point: image.Point{ X: 0, Y: 0, }, iconSrc: []byte("bar"), }, expected: `bar bar bar `, }, { about: "Service with already def'd icon", service: service{ name: "baz", charmPath: "bar", point: image.Point{ X: 0, Y: 0, }, iconSrc: []byte("bar"), }, expected: ` baz baz `, }, } // Maintain our list of rendered icons outside the loop. iconsRendered := make(map[string]bool) iconIds := make(map[string]string) for _, test := range tests { var buf bytes.Buffer svg := svg.New(&buf) test.service.definition(svg, iconsRendered, iconIds) test.service.usage(svg, iconIds) c.Log(test.about) c.Log(buf.String()) c.Assert(buf.String(), gc.Equals, test.expected) } } func (s *CanvasSuite) TestRelationRender(c *gc.C) { // Ensure that the Relation's definition and usage methods output the // proper SVG elements. var buf bytes.Buffer svg := svg.New(&buf) relation := serviceRelation{ name: "foo", serviceA: &service{ point: image.Point{ X: 0, Y: 0, }, }, serviceB: &service{ point: image.Point{ X: 100, Y: 100, }, }, } relation.definition(svg) relation.usage(svg) c.Assert(buf.String(), gc.Equals, ` foo `) } func (s *CanvasSuite) TestLayout(c *gc.C) { // Ensure that the SVG is sized exactly around the positioned services. canvas := Canvas{} canvas.addService(&service{ name: "service1", point: image.Point{ X: 0, Y: 0, }, }) canvas.addService(&service{ name: "service2", point: image.Point{ X: 100, Y: 100, }, }) width, height := canvas.layout() c.Assert(width, gc.Equals, 281) c.Assert(height, gc.Equals, 281) canvas.addService(&service{ name: "service3", point: image.Point{ X: -100, Y: -100, }, }) canvas.addService(&service{ name: "service4", point: image.Point{ X: -100, Y: 100, }, }) canvas.addService(&service{ name: "service5", point: image.Point{ X: 200, Y: -100, }, }) width, height = canvas.layout() c.Assert(width, gc.Equals, 481) c.Assert(height, gc.Equals, 381) } func (s *CanvasSuite) TestMarshal(c *gc.C) { // Ensure that the internal representation of the canvas can be marshalled // to SVG. var buf bytes.Buffer canvas := Canvas{} serviceA := &service{ name: "service-a", charmPath: "trusty/svc-a", point: image.Point{ X: 0, Y: 0, }, iconSrc: []byte(` `), } serviceB := &service{ name: "service-b", point: image.Point{ X: 100, Y: 100, }, } canvas.addService(serviceA) canvas.addService(serviceB) canvas.addRelation(&serviceRelation{ name: "relation", serviceA: serviceA, serviceB: serviceB, }) canvas.Marshal(&buf) c.Logf("%s", buf.Bytes()) assertXMLEqual(c, buf.Bytes(), []byte(` `+assets.RelationIconHealthy+` relation service-a service-a service-b service-b `)) } func assertXMLEqual(c *gc.C, obtained, expected []byte) { toksObtained := xmlTokens(c, obtained) toksExpected := xmlTokens(c, expected) c.Assert(toksObtained, jc.DeepEquals, toksExpected) } func xmlTokens(c *gc.C, data []byte) []xml.Token { dec := xml.NewDecoder(bytes.NewReader(data)) var toks []xml.Token for { tok, err := dec.Token() if err == io.EOF { return toks } c.Assert(err, gc.IsNil) if cdata, ok := tok.(xml.CharData); ok { // It's char data - trim all white space and ignore it // if it's all blank. cdata = bytes.TrimSpace(cdata) if len(cdata) == 0 { continue } tok = cdata } toks = append(toks, xml.CopyToken(tok)) } } charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/svg.go0000664000175000017500000000455312672604536020245 0ustar marcomarcopackage jujusvg import ( "io" "github.com/juju/xml" "gopkg.in/errgo.v1" ) const svgNamespace = "http://www.w3.org/2000/svg" // Process an icon SVG file from a reader, removing anything surrounding // the tags, which would be invalid in this context (such as // decls, directives, etc), writing out to a writer. In // addition, loosely check that the icon is a valid SVG file. The id // argument provides a unique identifier for the icon SVG so that it can // be referenced within the bundle diagram. If an id attribute on the SVG // tag already exists, it will be replaced with this argument. func processIcon(r io.Reader, w io.Writer, id string) error { dec := xml.NewDecoder(r) dec.DefaultSpace = svgNamespace enc := xml.NewEncoder(w) svgStartFound := false svgEndFound := false depth := 0 for depth < 1 { tok, err := dec.Token() if err != nil { if err == io.EOF { break } return errgo.Notef(err, "cannot get token") } tag, ok := tok.(xml.StartElement) if ok && tag.Name.Space == svgNamespace && tag.Name.Local == "svg" { svgStartFound = true depth++ tag.Attr = setXMLAttr(tag.Attr, xml.Name{ Local: "id", }, id) if err := enc.EncodeToken(tag); err != nil { return errgo.Notef(err, "cannot encode token %#v", tag) } } } for depth > 0 { tok, err := dec.Token() if err != nil { if err == io.EOF { break } return errgo.Notef(err, "cannot get token") } switch tag := tok.(type) { case xml.StartElement: if tag.Name.Space == svgNamespace && tag.Name.Local == "svg" { depth++ } case xml.EndElement: if tag.Name.Space == svgNamespace && tag.Name.Local == "svg" { depth-- if depth == 0 { svgEndFound = true } } } if err := enc.EncodeToken(tok); err != nil { return errgo.Notef(err, "cannot encode token %#v", tok) } } if !svgStartFound || !svgEndFound { return errgo.Newf("icon does not appear to be a valid SVG") } if err := enc.Flush(); err != nil { return err } return nil } // setXMLAttr returns the given attributes with the given attribute name set to // val, adding an attribute if necessary. func setXMLAttr(attrs []xml.Attr, name xml.Name, val string) []xml.Attr { for i := range attrs { if attrs[i].Name == name { attrs[i].Value = val return attrs } } return append(attrs, xml.Attr{ Name: name, Value: val, }) } charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/jujusvg_test.go0000664000175000017500000004416712672604536022207 0ustar marcomarcopackage jujusvg import ( "bytes" "fmt" "net/http" "net/http/httptest" "strings" "testing" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/jujusvg.v1/assets" ) func Test(t *testing.T) { gc.TestingT(t) } type newSuite struct{} var _ = gc.Suite(&newSuite{}) var bundle = ` services: mongodb: charm: "cs:precise/mongodb-21" num_units: 1 annotations: "gui-x": "940.5" "gui-y": "388.7698359714502" constraints: "mem=2G cpu-cores=1" elasticsearch: charm: "cs:~charming-devs/precise/elasticsearch-2" num_units: 1 annotations: "gui-x": "490.5" "gui-y": "369.7698359714502" constraints: "mem=2G cpu-cores=1" charmworld: charm: "cs:~juju-jitsu/precise/charmworld-58" num_units: 1 expose: true annotations: "gui-x": "813.5" "gui-y": "112.23016402854975" options: charm_import_limit: -1 source: "lp:~bac/charmworld/ingest-local-charms" revno: 511 relations: - - "charmworld:essearch" - "elasticsearch:essearch" - - "charmworld:database" - "mongodb:database" series: precise ` func iconURL(ref *charm.URL) string { return "http://0.1.2.3/" + ref.Path() + ".svg" } type emptyFetcher struct{} func (f *emptyFetcher) FetchIcons(*charm.BundleData) (map[string][]byte, error) { return nil, nil } type errFetcher string func (f *errFetcher) FetchIcons(*charm.BundleData) (map[string][]byte, error) { return nil, fmt.Errorf("%s", *f) } func (s *newSuite) TestNewFromBundle(c *gc.C) { b, err := charm.ReadBundleData(strings.NewReader(bundle)) c.Assert(err, gc.IsNil) err = b.Verify(nil, nil) c.Assert(err, gc.IsNil) cvs, err := NewFromBundle(b, iconURL, nil) c.Assert(err, gc.IsNil) var buf bytes.Buffer cvs.Marshal(&buf) c.Logf("%s", buf.String()) assertXMLEqual(c, buf.Bytes(), []byte(` `+assets.RelationIconHealthy+` charmworld:essearch elasticsearch:essearch charmworld:database mongodb:database charmworld charmworld elasticsearch elasticsearch mongodb mongodb `)) } func (s *newSuite) TestNewFromBundleWithUnplacedService(c *gc.C) { b, err := charm.ReadBundleData(strings.NewReader(bundle)) c.Assert(err, gc.IsNil) err = b.Verify(nil, nil) c.Assert(err, gc.IsNil) b.Services["charmworld"].Annotations["gui-x"] = "" b.Services["charmworld"].Annotations["gui-y"] = "" cvs, err := NewFromBundle(b, iconURL, nil) c.Assert(err, gc.IsNil) var buf bytes.Buffer cvs.Marshal(&buf) c.Logf("%s", buf.String()) assertXMLEqual(c, buf.Bytes(), []byte(` `+assets.RelationIconHealthy+` charmworld:essearch elasticsearch:essearch charmworld:database mongodb:database charmworld charmworld elasticsearch elasticsearch mongodb mongodb `)) } func (s *newSuite) TestWithFetcher(c *gc.C) { b, err := charm.ReadBundleData(strings.NewReader(bundle)) c.Assert(err, gc.IsNil) err = b.Verify(nil, nil) c.Assert(err, gc.IsNil) cvs, err := NewFromBundle(b, iconURL, new(emptyFetcher)) c.Assert(err, gc.IsNil) var buf bytes.Buffer cvs.Marshal(&buf) c.Logf("%s", buf.String()) assertXMLEqual(c, buf.Bytes(), []byte(` `+assets.RelationIconHealthy+` charmworld:essearch elasticsearch:essearch charmworld:database mongodb:database charmworld charmworld elasticsearch elasticsearch mongodb mongodb `)) } func (s *newSuite) TestDefaultHTTPFetcher(c *gc.C) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, "") })) defer ts.Close() tsIconUrl := func(ref *charm.URL) string { return ts.URL + "/" + ref.Path() + ".svg" } b, err := charm.ReadBundleData(strings.NewReader(bundle)) c.Assert(err, gc.IsNil) err = b.Verify(nil, nil) c.Assert(err, gc.IsNil) cvs, err := NewFromBundle(b, tsIconUrl, &HTTPFetcher{IconURL: tsIconUrl}) c.Assert(err, gc.IsNil) var buf bytes.Buffer cvs.Marshal(&buf) c.Logf("%s", buf.String()) assertXMLEqual(c, buf.Bytes(), []byte(` `+assets.RelationIconHealthy+` charmworld:essearch elasticsearch:essearch charmworld:database mongodb:database charmworld charmworld elasticsearch elasticsearch mongodb mongodb `)) } func (s *newSuite) TestFetcherError(c *gc.C) { b, err := charm.ReadBundleData(strings.NewReader(bundle)) c.Assert(err, gc.IsNil) err = b.Verify(nil, nil) c.Assert(err, gc.IsNil) ef := errFetcher("bad-wolf") _, err = NewFromBundle(b, iconURL, &ef) c.Assert(err, gc.ErrorMatches, "bad-wolf") } func (s *newSuite) TestWithBadBundle(c *gc.C) { b, err := charm.ReadBundleData(strings.NewReader(bundle)) c.Assert(err, gc.IsNil) b.Relations[0][0] = "evil-unknown-service" cvs, err := NewFromBundle(b, iconURL, nil) c.Assert(err, gc.ErrorMatches, "cannot verify bundle: .*") c.Assert(cvs, gc.IsNil) } func (s *newSuite) TestWithBadPosition(c *gc.C) { b, err := charm.ReadBundleData(strings.NewReader(bundle)) c.Assert(err, gc.IsNil) b.Services["charmworld"].Annotations["gui-x"] = "bad" cvs, err := NewFromBundle(b, iconURL, nil) c.Assert(err, gc.ErrorMatches, `service "charmworld" does not have a valid position`) c.Assert(cvs, gc.IsNil) b, err = charm.ReadBundleData(strings.NewReader(bundle)) c.Assert(err, gc.IsNil) b.Services["charmworld"].Annotations["gui-y"] = "bad" cvs, err = NewFromBundle(b, iconURL, nil) c.Assert(err, gc.ErrorMatches, `service "charmworld" does not have a valid position`) c.Assert(cvs, gc.IsNil) } charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/Makefile0000664000175000017500000000214212672604536020547 0ustar marcomarcoifndef GOPATH $(warning You need to set up a GOPATH.) endif PROJECT := gopkg.in/juju/jujusvg.v1 PROJECT_DIR := $(shell go list -e -f '{{.Dir}}' $(PROJECT)) help: @echo "Available targets:" @echo " deps - fetch all dependencies" @echo " build - build the project" @echo " check - run tests" @echo " install - install the library in your GOPATH" @echo " clean - clean the project" # Start of GOPATH-dependent targets. Some targets only make sense - # and will only work - when this tree is found on the GOPATH. ifeq ($(CURDIR),$(PROJECT_DIR)) deps: go get -v -t $(PROJECT)/... build: go build $(PROJECT)/... check: go test $(PROJECT)/... install: go install $(INSTALL_FLAGS) -v $(PROJECT)/... clean: go clean $(PROJECT)/... else deps: $(error Cannot $@; $(CURDIR) is not on GOPATH) build: $(error Cannot $@; $(CURDIR) is not on GOPATH) check: $(error Cannot $@; $(CURDIR) is not on GOPATH) install: $(error Cannot $@; $(CURDIR) is not on GOPATH) clean: $(error Cannot $@; $(CURDIR) is not on GOPATH) endif # End of GOPATH-dependent targets. .PHONY: help deps build check install clean charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/dependencies.tsv0000664000175000017500000000321512672604536022275 0ustar marcomarcogithub.com/ajstarks/svgo git 89e3ac64b5b3e403a5e7c35ea4f98d45db7b4518 2014-10-04T21:11:59Z github.com/juju/errors git 1b5e39b83d1835fa480e0c2ddefb040ee82d58b3 2015-09-16T12:56:42Z github.com/juju/gojsonpointer git afe8b77aa08f272b49e01b82de78510c11f61500 2015-02-04T19:46:29Z github.com/juju/gojsonreference git f0d24ac5ee330baa21721cdff56d45e4ee42628e 2015-02-04T19:46:33Z github.com/juju/gojsonschema git e1ad140384f254c82f89450d9a7c8dd38a632838 2015-03-12T17:00:16Z github.com/juju/loggo git 8477fc936adf0e382d680310047ca27e128a309a 2015-05-27T03:58:39Z github.com/juju/names git e287fe4ae0dbda220cace3ed0e35cda4796c1aa3 2015-10-22T17:21:35Z github.com/juju/schema git afe1151cb49d1d7ed3c75592dfc6f38703f2e988 2015-08-07T07:58:08Z github.com/juju/testing git ad6f815f49f8209a27a3b7efb6d44876493e5939 2015-10-12T16:09:06Z github.com/juju/utils git f2db28cef935aba0a7207254fa5dba273e649d0e 2015-11-09T11:51:43Z github.com/juju/xml git eb759a627588d35166bc505fceb51b88500e291e 2015-04-13T13:11:21Z golang.org/x/crypto git aedad9a179ec1ea11b7064c57cbc6dc30d7724ec 2015-08-30T18:06:42Z gopkg.in/check.v1 git b3d3430320d4260e5fea99841af984b3badcea63 2015-06-26T10:50:28Z gopkg.in/errgo.v1 git 66cb46252b94c1f3d65646f54ee8043ab38d766c 2015-10-07T15:31:57Z gopkg.in/juju/charm.v6-unstable git a3d228ef5292531219d17d47679b260580fba1a8 2015-11-19T07:39:58Z gopkg.in/mgo.v2 git 4d04138ffef2791c479c0c8bbffc30b34081b8d9 2015-10-26T16:34:53Z gopkg.in/yaml.v1 git 9f9df34309c04878acc86042b16630b0f696e1de 2014-09-24T16:16:07Z gopkg.in/yaml.v2 git 7ad95dd0798a40da1ccdff6dff35fd177b5edf40 2015-06-24T10:29:02Z launchpad.net/tomb bzr gustavo@niemeyer.net-20140529072043-hzcrlnl3ygvg914q 18 charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/LICENSE0000664000175000017500000002150112672604536020114 0ustar marcomarcoAll files in this repository are licensed as follows. If you contribute to this repository, it is assumed that you license your contribution under the same license unless you state otherwise. All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. This software is licensed under the LGPLv3, included below. As a special exception to the GNU Lesser General Public License version 3 ("LGPL3"), the copyright holders of this Library give you permission to convey to a third party a Combined Work that links statically or dynamically to this Library without providing any Minimal Corresponding Source or Minimal Application Code as set out in 4d or providing the installation information set out in section 4e, provided that you comply with the other provisions of LGPL3 and provided that you meet, for the Application the terms and conditions of the license(s) which apply to the Application. Except as stated in this special exception, the provisions of LGPL3 will continue to comply in full to this Library. If you modify this Library, you may apply this exception to your version of this Library, but you are not obliged to do so. If you do not wish to do so, delete this exception statement from your version. This exception does not (and cannot) modify any license terms which apply to the Application, with which you must still comply. GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/jujusvg.go0000664000175000017500000000621312672604536021136 0ustar marcomarcopackage jujusvg // import "gopkg.in/juju/jujusvg.v1" import ( "fmt" "image" "math" "sort" "strconv" "strings" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" ) // NewFromBundle returns a new Canvas that can be used // to generate a graphical representation of the given bundle // data. The iconURL function is used to generate a URL // that refers to an SVG for the supplied charm URL. // If fetcher is non-nil, it will be used to fetch icon // contents for any icons embedded within the charm, // allowing the generated bundle to be self-contained. If fetcher // is nil, a default fetcher which refers to icons by their // URLs as svg tags will be used. func NewFromBundle(b *charm.BundleData, iconURL func(*charm.URL) string, fetcher IconFetcher) (*Canvas, error) { if fetcher == nil { fetcher = &LinkFetcher{ IconURL: iconURL, } } iconMap, err := fetcher.FetchIcons(b) if err != nil { return nil, err } var canvas Canvas // Verify the bundle to make sure that all the invariants // that we depend on below actually hold true. if err := b.Verify(nil, nil); err != nil { return nil, errgo.Notef(err, "cannot verify bundle") } // Go through all services in alphabetical order so that // we get consistent results. serviceNames := make([]string, 0, len(b.Services)) for name := range b.Services { serviceNames = append(serviceNames, name) } sort.Strings(serviceNames) services := make(map[string]*service) servicesNeedingPlacement := make(map[string]bool) for _, name := range serviceNames { serviceData := b.Services[name] x, xerr := strconv.ParseFloat(serviceData.Annotations["gui-x"], 64) y, yerr := strconv.ParseFloat(serviceData.Annotations["gui-y"], 64) if xerr != nil || yerr != nil { if serviceData.Annotations["gui-x"] == "" && serviceData.Annotations["gui-y"] == "" { servicesNeedingPlacement[name] = true x = 0 y = 0 } else { return nil, errgo.Newf("service %q does not have a valid position", name) } } charmID, err := charm.ParseURL(serviceData.Charm) if err != nil { // cannot actually happen, as we've verified it. return nil, errgo.Notef(err, "cannot parse charm %q", serviceData.Charm) } icon := iconMap[charmID.Path()] svc := &service{ name: name, charmPath: charmID.Path(), point: image.Point{int(x), int(y)}, iconUrl: iconURL(charmID), iconSrc: icon, } services[name] = svc } padding := image.Point{int(math.Floor(serviceBlockSize * 1.5)), int(math.Floor(serviceBlockSize * 0.5))} for name := range servicesNeedingPlacement { vertices := []image.Point{} for n, svc := range services { if !servicesNeedingPlacement[n] { vertices = append(vertices, svc.point) } } services[name].point = getPointOutside(vertices, padding) servicesNeedingPlacement[name] = false } for _, name := range serviceNames { canvas.addService(services[name]) } for _, relation := range b.Relations { canvas.addRelation(&serviceRelation{ name: fmt.Sprintf("%s %s", relation[0], relation[1]), serviceA: services[strings.Split(relation[0], ":")[0]], serviceB: services[strings.Split(relation[1], ":")[0]], }) } return &canvas, nil } charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/hull_test.go0000664000175000017500000000333012672604536021441 0ustar marcomarcopackage jujusvg import ( "image" gc "gopkg.in/check.v1" ) type HullSuite struct{} var _ = gc.Suite(&HullSuite{}) func (s *HullSuite) TestGetPointOutside(c *gc.C) { var tests = []struct { about string vertices []image.Point expected image.Point }{ { about: "zero vertices", vertices: []image.Point{}, expected: image.Point{0, 0}, }, { about: "one vertex", vertices: []image.Point{{0, 0}}, expected: image.Point{10, 10}, }, { about: "two vertices", vertices: []image.Point{{0, 0}, {10, 10}}, expected: image.Point{20, 20}, }, { about: "three vertices (convexHull fall through)", vertices: []image.Point{{0, 0}, {0, 10}, {10, 0}}, expected: image.Point{10, 20}, }, { about: "four vertices", vertices: []image.Point{{0, 0}, {0, 10}, {10, 0}, {10, 10}}, expected: image.Point{20, 20}, }, } for _, test := range tests { c.Log(test.about) c.Assert(getPointOutside(test.vertices, image.Point{10, 10}), gc.Equals, test.expected) } } func (s *HullSuite) TestConvexHull(c *gc.C) { // Zero vertices vertices := []image.Point{} c.Assert(convexHull(vertices), gc.DeepEquals, []image.Point{{0, 0}}) // Identities vertices = []image.Point{{1, 1}} c.Assert(convexHull(vertices), gc.DeepEquals, vertices) vertices = []image.Point{{1, 1}, {2, 2}} c.Assert(convexHull(vertices), gc.DeepEquals, vertices) vertices = []image.Point{{1, 1}, {2, 2}, {1, 2}} c.Assert(convexHull(vertices), gc.DeepEquals, vertices) // > 3 vertices vertices = []image.Point{} for i := 0; i < 100; i++ { vertices = append(vertices, image.Point{i / 10, i % 10}) } c.Assert(convexHull(vertices), gc.DeepEquals, []image.Point{ {0, 0}, {9, 0}, {9, 9}, {0, 9}, }) } charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/canvas.go0000664000175000017500000001735112672604536020721 0ustar marcomarcopackage jujusvg import ( "bytes" "fmt" "image" "io" "math" svg "github.com/ajstarks/svgo" "gopkg.in/juju/jujusvg.v1/assets" ) const ( iconSize = 96 serviceBlockSize = 180 healthCircleRadius = 8 relationLineWidth = 1 maxInt = int(^uint(0) >> 1) minInt = -(maxInt - 1) maxHeight = 450 maxWidth = 1000 fontColor = "#505050" relationColor = "#a7a7a7" ) // Canvas holds the parsed form of a bundle or environment. type Canvas struct { services []*service relations []*serviceRelation iconsRendered map[string]bool iconIds map[string]string } // service represents a service deployed to an environment and contains the // point of the top-left corner of the icon, icon URL, and additional metadata. type service struct { name string charmPath string iconUrl string iconSrc []byte point image.Point } // serviceRelation represents a relation created between two services. type serviceRelation struct { name string serviceA *service serviceB *service } // line represents a line segment with two endpoints. type line struct { p0, p1 image.Point } // definition creates any necessary defs that can be used later in the SVG. func (s *service) definition(canvas *svg.SVG, iconsRendered map[string]bool, iconIds map[string]string) error { if len(s.iconSrc) == 0 || iconsRendered[s.charmPath] { return nil } iconsRendered[s.charmPath] = true iconIds[s.charmPath] = fmt.Sprintf("icon-%d", len(iconsRendered)) // Temporary solution: iconBuf := bytes.NewBuffer(s.iconSrc) return processIcon(iconBuf, canvas.Writer, iconIds[s.charmPath]) } // usage creates any necessary tags for actually using the service in the SVG. func (s *service) usage(canvas *svg.SVG, iconIds map[string]string) { canvas.Group(fmt.Sprintf(`transform="translate(%d,%d)"`, s.point.X, s.point.Y)) defer canvas.Gend() canvas.Title(s.name) canvas.Circle( serviceBlockSize/2, serviceBlockSize/2, serviceBlockSize/2, `class="service-block" fill="#f5f5f5" stroke="#888" stroke-width="1"`) canvas.Circle( serviceBlockSize/2-iconSize/2+5, // for these two, add an offset to help serviceBlockSize/2-iconSize/2+7, // hide the embossed border. serviceBlockSize/4, `id="service-icon-mask-`+s.name+`" fill="none"`) canvas.ClipPath(`id="clip-` + s.name + `"`) canvas.Use( 0, 0, `#service-icon-mask-`+s.name) canvas.ClipEnd() if len(s.iconSrc) > 0 { canvas.Use( 0, 0, "#"+iconIds[s.charmPath], fmt.Sprintf(`transform="translate(%d,%d)" width="%d" height="%d" clip-path="url(#clip-%s)"`, serviceBlockSize/2-iconSize/2, serviceBlockSize/2-iconSize/2, iconSize, iconSize, s.name), ) } else { canvas.Image( serviceBlockSize/2-iconSize/2, serviceBlockSize/2-iconSize/2, iconSize, iconSize, s.iconUrl, `clip-path="url(#clip-`+s.name+`)"`, ) } name := s.name if len(name) > 20 { name = fmt.Sprintf("%s...", name[:17]) } canvas.Rect( 0, serviceBlockSize-45, serviceBlockSize, 32, `rx="2" ry="2" fill="rgba(220, 220, 220, 0.8)"`) canvas.Text( serviceBlockSize/2, serviceBlockSize-23, name, `text-anchor="middle" style="font-weight:200"`) } // definition creates any necessary defs that can be used later in the SVG. func (r *serviceRelation) definition(canvas *svg.SVG) { } // usage creates any necessary tags for actually using the relation in the SVG. func (r *serviceRelation) usage(canvas *svg.SVG) { canvas.Group() defer canvas.Gend() canvas.Title(r.name) l := line{ p0: r.serviceA.point.Add(point(serviceBlockSize/2, serviceBlockSize/2)), p1: r.serviceB.point.Add(point(serviceBlockSize/2, serviceBlockSize/2)), } canvas.Line( l.p0.X, l.p0.Y, l.p1.X, l.p1.Y, fmt.Sprintf(`stroke=%q`, relationColor), fmt.Sprintf(`stroke-width="%dpx"`, relationLineWidth), fmt.Sprintf(`stroke-dasharray=%q`, strokeDashArray(l)), ) mid := l.p0.Add(l.p1).Div(2).Sub(point(healthCircleRadius, healthCircleRadius)) canvas.Use(mid.X, mid.Y, "#healthCircle") deg := math.Atan2(float64(l.p0.Y-l.p1.Y), float64(l.p0.X-l.p1.X)) canvas.Circle( int(float64(l.p0.X)-math.Cos(deg)*(serviceBlockSize/2)), int(float64(l.p0.Y)-math.Sin(deg)*(serviceBlockSize/2)), 4, fmt.Sprintf(`fill=%q`, relationColor)) canvas.Circle( int(float64(l.p1.X)+math.Cos(deg)*(serviceBlockSize/2)), int(float64(l.p1.Y)+math.Sin(deg)*(serviceBlockSize/2)), 4, fmt.Sprintf(`fill=%q`, relationColor)) } // strokeDashArray generates the stroke-dasharray attribute content so that // the relation health indicator is placed in an empty space. func strokeDashArray(l line) string { return fmt.Sprintf("%.2f, %d", l.length()/2-healthCircleRadius, healthCircleRadius*2) } // length calculates the length of a line. func (l *line) length() float64 { dp := l.p0.Sub(l.p1) return math.Sqrt(square(float64(dp.X)) + square(float64(dp.Y))) } // addService adds a new service to the canvas. func (c *Canvas) addService(s *service) { c.services = append(c.services, s) } // addRelation adds a new relation to the canvas. func (c *Canvas) addRelation(r *serviceRelation) { c.relations = append(c.relations, r) } // layout adjusts all items so that they are positioned appropriately, // and returns the overall size of the canvas. func (c *Canvas) layout() (int, int) { minWidth := maxInt minHeight := maxInt maxWidth := minInt maxHeight := minInt for _, service := range c.services { if service.point.X < minWidth { minWidth = service.point.X } if service.point.Y < minHeight { minHeight = service.point.Y } if service.point.X > maxWidth { maxWidth = service.point.X } if service.point.Y > maxHeight { maxHeight = service.point.Y } } for _, service := range c.services { service.point = service.point.Sub(point(minWidth, minHeight)) } return abs(maxWidth-minWidth) + serviceBlockSize + 1, abs(maxHeight-minHeight) + serviceBlockSize + 1 } func (c *Canvas) definition(canvas *svg.SVG) { canvas.Def() defer canvas.DefEnd() // Relation health circle. canvas.Group(`id="healthCircle"`, `transform="scale(1.1)"`) io.WriteString(canvas.Writer, assets.RelationIconHealthy) canvas.Gend() // Service and relation specific defs. for _, relation := range c.relations { relation.definition(canvas) } for _, service := range c.services { service.definition(canvas, c.iconsRendered, c.iconIds) } } func (c *Canvas) relationsGroup(canvas *svg.SVG) { canvas.Gid("relations") defer canvas.Gend() for _, relation := range c.relations { relation.usage(canvas) } } func (c *Canvas) servicesGroup(canvas *svg.SVG) { canvas.Gid("services") defer canvas.Gend() for _, service := range c.services { service.usage(canvas, c.iconIds) } } // Marshal renders the SVG to the given io.Writer. func (c *Canvas) Marshal(w io.Writer) { // Initialize maps for service icons, which are used both in definition // and use methods for services. c.iconsRendered = make(map[string]bool) c.iconIds = make(map[string]string) // TODO check write errors and return an error from // Marshal if the write fails. The svg package does not // itself check or return write errors; a possible work-around // is to wrap the writer in a custom writer that panics // on error, and catch the panic here. width, height := c.layout() canvas := svg.New(w) canvas.Start( width, height, fmt.Sprintf(`style="font-family:Ubuntu, sans-serif;" viewBox="0 0 %d %d"`, width, height), ) defer canvas.End() c.definition(canvas) c.relationsGroup(canvas) c.servicesGroup(canvas) } // abs returns the absolute value of a number. func abs(x int) int { if x < 0 { return -x } else { return x } } // square multiplies a number by itself. func square(x float64) float64 { return x * x } // point generates an image.Point given its coordinates. func point(x, y int) image.Point { return image.Point{x, y} } charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/examples/0000775000175000017500000000000012672604536020726 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/jujusvg.v1/examples/mediawiki-scalable.yaml0000664000175000017500000000545312672604536025330 0ustar marcomarcoservices: haproxy: charm: cs:precise/haproxy-35 num_units: 1 options: default_log: global default_mode: http default_options: httplog, dontlognull default_retries: 3 default_timeouts: queue 20000, client 50000, connect 5000, server 50000 enable_monitoring: false global_debug: false global_group: haproxy global_log: 127.0.0.1 local0, 127.0.0.1 local1 notice global_maxconn: 4096 global_quiet: false global_spread_checks: 0 global_user: haproxy monitoring_allowed_cidr: 127.0.0.1/32 monitoring_password: changeme monitoring_port: 10000 monitoring_stats_refresh: 3 monitoring_username: haproxy nagios_context: juju package_status: install services: "- service_name: haproxy_service\n service_host: \"0.0.0.0\"\n service_port: 80\n service_options: [balance leastconn]\n server_options: maxconn 100\n" sysctl: "" annotations: gui-x: "619" gui-y: "-406" mediawiki: charm: cs:precise/mediawiki-10 num_units: 1 options: debug: false name: Please set name of wiki skin: vector annotations: gui-x: "618" gui-y: "-128" memcached: charm: cs:precise/memcached-7 num_units: 1 options: connection-limit: 1024 disable-auto-cleanup: "no" disable-cas: "no" disable-large-pages: "no" extra-options: "" factor: 1.25 min-item-size: -1 nagios_context: juju request-limit: -1 size: 768 slab-page-size: -1 tcp-port: 11211 threads: -1 udp-port: 0 annotations: gui-x: "926" gui-y: "-125" mysql: charm: cs:precise/mysql-28 num_units: 1 options: binlog-format: MIXED block-size: 5 dataset-size: 80% flavor: distro ha-bindiface: eth0 ha-mcastport: 5411 max-connections: -1 preferred-storage-engine: InnoDB query-cache-size: -1 query-cache-type: "OFF" rbd-name: mysql1 tuning-level: safest vip_cidr: 24 vip_iface: eth0 annotations: gui-x: "926" gui-y: "123" mysql-slave: charm: cs:precise/mysql-28 num_units: 1 options: binlog-format: MIXED block-size: 5 dataset-size: 80% flavor: distro ha-bindiface: eth0 ha-mcastport: 5411 max-connections: -1 preferred-storage-engine: InnoDB query-cache-size: -1 query-cache-type: "OFF" rbd-name: mysql1 tuning-level: safest vip_cidr: 24 vip_iface: eth0 annotations: gui-x: "619" gui-y: "124" series: precise relations: - - mediawiki:cache - memcached:cache - - haproxy:reverseproxy - mediawiki:website - - mysql-slave:slave - mysql:master - - mediawiki:slave - mysql-slave:db charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/examples/charmworld-missing-placement.yaml0000664000175000017500000000134412672604536027373 0ustar marcomarcoservices: mongodb: charm: "cs:precise/mongodb-21" num_units: 1 annotations: "gui-x": "940.5" "gui-y": "388.7698359714502" constraints: "mem=2G cpu-cores=1" elasticsearch: charm: "cs:~charming-devs/precise/elasticsearch-2" num_units: 1 constraints: "mem=2G cpu-cores=1" charmworld: charm: "cs:~juju-jitsu/precise/charmworld-58" num_units: 1 expose: true annotations: "gui-x": "813.5" "gui-y": "112.23016402854975" options: charm_import_limit: -1 source: "lp:~bac/charmworld/ingest-local-charms" revno: 511 relations: - - "charmworld:essearch" - "elasticsearch:essearch" - - "charmworld:database" - "mongodb:database" series: precise charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/examples/generatesvg.go0000664000175000017500000000337112672604536023573 0ustar marcomarcopackage main // This is a demo application that uses the jujusvg library to build a bundle SVG // from a given bundle.yaml file. import ( "io/ioutil" "log" "os" "strings" "gopkg.in/juju/charm.v6-unstable" // Import the jujusvg library and the juju charm library "gopkg.in/juju/jujusvg.v1" ) // iconURL takes a reference to a charm and returns the URL for that charm's icon. // In this case, we're using the api.jujucharms.com API to provide the icon's URL. func iconURL(ref *charm.URL) string { return "https://api.jujucharms.com/charmstore/v4/" + ref.Path() + "/icon.svg" } func main() { if len(os.Args) != 2 { log.Fatalf("Please provide the name of a bundle file as the first argument") } // First, we need to read our bundle data into a []byte bundle_data, err := ioutil.ReadFile(os.Args[1]) if err != nil { log.Fatalf("Error reading bundle: %s\n", err) } // Next, generate a charm.Bundle from the bytearray by passing it to ReadNewBundleData. // This gives us an in-memory object representation of the bundle that we can pass to jujusvg bundle, err := charm.ReadBundleData(strings.NewReader(string(bundle_data))) if err != nil { log.Fatalf("Error parsing bundle: %s\n", err) } fetcher := &jujusvg.HTTPFetcher{ IconURL: iconURL, } // Next, build a canvas of the bundle. This is a simplified version of a charm.Bundle // that contains just the position information and charm icon URLs necessary to build // the SVG representation of the bundle canvas, err := jujusvg.NewFromBundle(bundle, iconURL, fetcher) if err != nil { log.Fatalf("Error generating canvas: %s\n", err) } // Finally, marshal that canvas as SVG to os.Stdout; this will print the SVG data // required to generate an image of the bundle. canvas.Marshal(os.Stdout) } charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/examples/kubernetes-bundle.yaml0000664000175000017500000000175712672604536025242 0ustar marcomarcoservices: "kubernetes-master": charm: cs:~kubernetes/trusty/kubernetes-master-5 annotations: "gui-x": "600" "gui-y": "0" expose: true docker: charm: cs:trusty/docker-2 num_units: 2 annotations: "gui-x": "0" "gui-y": "0" flannel-docker: charm: cs:trusty/flannel-docker-5 annotations: "gui-x": "0" "gui-y": "300" kubernetes: charm: cs:~kubernetes/trusty/kubernetes-5 annotations: "gui-x": "300" "gui-y": "300" etcd: charm: cs:~kubernetes/trusty/etcd-2 annotations: "gui-x": "300" "gui-y": "0" relations: - - "flannel-docker:network" - "docker:network" - - "flannel-docker:docker-host" - "docker:juju-info" - - "flannel-docker:db" - "etcd:client" - - "kubernetes:docker-host" - "docker:juju-info" - - "etcd:client" - "kubernetes:etcd" - - "etcd:client" - "kubernetes-master:etcd" - - "kubernetes-master:minions-api" - "kubernetes:api" series: trusty charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/examples/charmworld-no-placement.yaml0000664000175000017500000000111512672604536026332 0ustar marcomarcoservices: mongodb: charm: "cs:precise/mongodb-21" num_units: 1 constraints: "mem=2G cpu-cores=1" elasticsearch: charm: "cs:~charming-devs/precise/elasticsearch-2" num_units: 1 constraints: "mem=2G cpu-cores=1" charmworld: charm: "cs:~juju-jitsu/precise/charmworld-58" num_units: 1 expose: true options: charm_import_limit: -1 source: "lp:~bac/charmworld/ingest-local-charms" revno: 511 relations: - - "charmworld:essearch" - "elasticsearch:essearch" - - "charmworld:database" - "mongodb:database" series: precise charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/examples/charmworld.yaml0000664000175000017500000000145712672604536023763 0ustar marcomarcoservices: mongodb: charm: "cs:precise/mongodb-21" num_units: 1 annotations: "gui-x": "940.5" "gui-y": "388.7698359714502" constraints: "mem=2G cpu-cores=1" elasticsearch: charm: "cs:~charming-devs/precise/elasticsearch-2" num_units: 1 annotations: "gui-x": "490.5" "gui-y": "369.7698359714502" constraints: "mem=2G cpu-cores=1" charmworld: charm: "cs:~juju-jitsu/precise/charmworld-58" num_units: 1 expose: true annotations: "gui-x": "813.5" "gui-y": "112.23016402854975" options: charm_import_limit: -1 source: "lp:~bac/charmworld/ingest-local-charms" revno: 511 relations: - - "charmworld:essearch" - "elasticsearch:essearch" - - "charmworld:database" - "mongodb:database" series: precise charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/examples/openstack.yaml0000664000175000017500000001516412672604536023610 0ustar marcomarcomachines: '0': constraints: arch=amd64 series: trusty '1': constraints: arch=amd64 series: trusty '2': constraints: arch=amd64 series: trusty '3': constraints: arch=amd64 series: trusty relations: - - nova-compute:amqp - rabbitmq-server:amqp - - neutron-gateway:amqp - rabbitmq-server:amqp - - keystone:shared-db - mysql:shared-db - - nova-cloud-controller:identity-service - keystone:identity-service - - glance:identity-service - keystone:identity-service - - neutron-api:identity-service - keystone:identity-service - - neutron-openvswitch:neutron-plugin-api - neutron-api:neutron-plugin-api - - neutron-api:shared-db - mysql:shared-db - - neutron-api:amqp - rabbitmq-server:amqp - - neutron-gateway:neutron-plugin-api - neutron-api:neutron-plugin-api - - glance:shared-db - mysql:shared-db - - glance:amqp - rabbitmq-server:amqp - - nova-cloud-controller:image-service - glance:image-service - - nova-compute:image-service - glance:image-service - - nova-cloud-controller:cloud-compute - nova-compute:cloud-compute - - nova-cloud-controller:amqp - rabbitmq-server:amqp - - nova-cloud-controller:quantum-network-service - neutron-gateway:quantum-network-service - - nova-compute:neutron-plugin - neutron-openvswitch:neutron-plugin - - neutron-openvswitch:amqp - rabbitmq-server:amqp - - openstack-dashboard:identity-service - keystone:identity-service - - nova-cloud-controller:shared-db - mysql:shared-db - - nova-cloud-controller:neutron-api - neutron-api:neutron-api - - cinder:image-service - glance:image-service - - cinder:amqp - rabbitmq-server:amqp - - cinder:identity-service - keystone:identity-service - - cinder:cinder-volume-service - nova-cloud-controller:cinder-volume-service - - cinder-ceph:storage-backend - cinder:storage-backend - - ceph:client - nova-compute:ceph - - cinder:shared-db - mysql:shared-db - - ceph:client - cinder-ceph:ceph - - ceph:client - glance:ceph - - ceph-osd:mon - ceph:osd - - ntp:juju-info - nova-compute:juju-info - - ntp:juju-info - neutron-gateway:juju-info - - ceph-radosgw:mon - ceph:radosgw - - ceph-radosgw:identity-service - keystone:identity-service - - ceilometer:amqp - rabbitmq-server:amqp - - ceilometer-agent:ceilometer-service - ceilometer:ceilometer-service - - ceilometer:identity-service - keystone:identity-service - - ceilometer:identity-notifications - keystone:identity-notifications - - ceilometer-agent:nova-ceilometer - nova-compute:nova-ceilometer - - ceilometer:shared-db - mongodb:database series: trusty services: ceilometer: annotations: gui-x: '1288.8744298356794' gui-y: '0.7040786325134718' charm: cs:trusty/ceilometer-15 num_units: 1 options: openstack-origin: cloud:trusty-liberty to: - lxc:2 ceilometer-agent: annotations: gui-x: '1288.9999389648438' gui-y: '503' charm: cs:trusty/ceilometer-agent-11 ceph: annotations: gui-x: '750' gui-y: '500' charm: cs:trusty/ceph-42 num_units: 3 options: fsid: 5a791d94-980b-11e4-b6f6-3c970e8b1cf7 monitor-secret: AQAi5a9UeJXUExAA+By9u+GPhl8/XiUQ4nwI3A== osd-devices: /dev/sdb osd-reformat: 'yes' source: cloud:trusty-liberty to: - '1' - '2' - '3' ceph-osd: annotations: gui-x: '1000' gui-y: '500' charm: cs:trusty/ceph-osd-14 num_units: 1 options: osd-devices: /dev/sdb osd-reformat: 'yes' source: cloud:trusty-liberty to: - '0' ceph-radosgw: annotations: gui-x: '1000' gui-y: '250' charm: cs:trusty/ceph-radosgw-18 num_units: 1 options: source: cloud:trusty-liberty use-embedded-webserver: true to: - lxc:0 cinder: annotations: gui-x: '750' gui-y: '0' charm: cs:trusty/cinder-31 num_units: 1 options: block-device: None glance-api-version: 2 ha-mcastport: 5401 openstack-origin: cloud:trusty-liberty to: - lxc:1 cinder-ceph: annotations: gui-x: '750' gui-y: '250' charm: cs:trusty/cinder-ceph-14 num_units: 0 glance: annotations: gui-x: '250' gui-y: '0' charm: cs:trusty/glance-28 num_units: 1 options: ha-mcastport: 5402 openstack-origin: cloud:trusty-liberty to: - lxc:2 keystone: annotations: gui-x: '500' gui-y: '0' charm: cs:trusty/keystone-31 num_units: 1 options: admin-password: openstack ha-mcastport: 5403 openstack-origin: cloud:trusty-liberty to: - lxc:3 mongodb: annotations: gui-x: '1287.9999389648438' gui-y: '251.24996948242188' charm: cs:trusty/mongodb-28 num_units: 1 to: - lxc:1 mysql: annotations: gui-x: '0' gui-y: '250' charm: cs:trusty/percona-cluster-31 num_units: 1 options: max-connections: 20000 to: - lxc:0 neutron-api: annotations: gui-x: '500' gui-y: '500' charm: cs:trusty/neutron-api-21 num_units: 1 options: neutron-security-groups: true openstack-origin: cloud:trusty-liberty to: - lxc:1 neutron-gateway: annotations: gui-x: '0' gui-y: '0' charm: cs:trusty/neutron-gateway-7 num_units: 1 options: ext-port: eth1 openstack-origin: cloud:trusty-liberty to: - '0' neutron-openvswitch: annotations: gui-x: '250' gui-y: '500' charm: cs:trusty/neutron-openvswitch-13 num_units: 0 nova-cloud-controller: annotations: gui-x: '0' gui-y: '500' charm: cs:trusty/nova-cloud-controller-64 num_units: 1 options: network-manager: Neutron openstack-origin: cloud:trusty-liberty quantum-security-groups: 'yes' to: - lxc:2 nova-compute: annotations: gui-x: '250' gui-y: '250' charm: cs:trusty/nova-compute-33 num_units: 3 options: enable-live-migration: true enable-resize: true manage-neutron-plugin-legacy-mode: false migration-auth-type: ssh openstack-origin: cloud:trusty-liberty to: - '1' - '2' - '3' ntp: annotations: gui-x: '1000' gui-y: '0' charm: cs:trusty/ntp-14 num_units: 0 openstack-dashboard: annotations: gui-x: '500' gui-y: '-250' charm: cs:trusty/openstack-dashboard-19 num_units: 1 options: openstack-origin: cloud:trusty-liberty to: - lxc:3 rabbitmq-server: annotations: gui-x: '500' gui-y: '250' charm: cs:trusty/rabbitmq-server-42 num_units: 1 options: source: cloud:trusty-liberty to: - lxc:0 charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/README.md0000664000175000017500000000342612672604536020374 0ustar marcomarcojujusvg ======= A library for generating SVGs from Juju bundles and environments. Installation ------------ To start using jujusvg, first ensure you have a valid Go environment, then run the following: go get gopkg.in/juju/jujusvg.v1 Dependencies ------------ The project uses godeps (https://launchpad.net/godeps) to manage Go dependencies. To install this, run: go get launchpad.net/godeps After installing it, you can update the dependencies to the revision specified in the `dependencies.tsv` file with the following: make deps Use `make create-deps` to update the dependencies file. Usage ----- Given a Juju bundle, you can convert this to an SVG programatically. This generates a simple SVG representation of a bundle or bundles that can then be included in a webpage as a visualization. For an example of how to use this library, please see `examples/generatesvg.go`. You can run this example like: go run generatesvg.go bundle.yaml > bundle.svg The examples directory also includes three sample bundles that you can play around with, or you can use the [Juju GUI](https://demo.jujucharms.com) to generate your own bundles. Design-related assets --------------------- Some assets are specified based on assets provided by the design team. These assets are specified in the defs section of the generated SVG, and can thus be found in the Canvas.definition() method. These assets are, except where indicated, embedded in a go file assigned to an exported variable, so that they may be used like so: ```go import ( "io" "gopkg.in/juju/jujusvg.v1/assets" ) // ... io.WriteString(canvas.Writer, assets.AssetToWrite) ``` Current assets in use: * ~~The service block~~ *the service block has been deprecated and is now handled with SVGo* * The relation health indicator charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/assets/0000775000175000017500000000000012672604536020412 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/jujusvg.v1/assets/service_module.svg0000664000175000017500000000672112672604536024146 0ustar marcomarco charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/assets/service_module.go0000664000175000017500000000665712672604536023764 0ustar marcomarcopackage assets // This is the SVG for the service module block used in the bundle diagram. // Note that there MUST NOT be anything (processing instructions, xml // declarations, or directives) before the tag. var ServiceModule = ` ` ���������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/assets/relation-icon-healthy.svg���������������������������0000664�0001750�0001750�00000001305�12672604536�025331� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������ ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/assets/relation-icon-healthy.go����������������������������0000664�0001750�0001750�00000001274�12672604536�025144� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������package assets var RelationIconHealthy = ` ` ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/hull.go����������������������������������������������������0000664�0001750�0001750�00000005722�12672604536�020411� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������package jujusvg import ( "image" "math" "sort" ) // getPointOutside returns a point that is outside the hull of existing placed // vertices so that an object can be placed on the canvas without overlapping // others. func getPointOutside(vertices []image.Point, padding image.Point) image.Point { // Shortcut some easy solutions. switch len(vertices) { case 0: return image.Point{0, 0} case 1: return image.Point{ vertices[0].X + padding.X, vertices[0].Y + padding.Y, } case 2: return image.Point{ int(math.Max(float64(vertices[0].X), float64(vertices[1].X))) + padding.X, int(math.Max(float64(vertices[0].Y), float64(vertices[1].Y))) + padding.Y, } } hull := convexHull(vertices) // Find point that is the furthest to the right on the hull. var rightmost image.Point maxDistance := 0.0 for _, vertex := range hull { fromOrigin := line{p0: vertex, p1: image.Point{0, 0}} distance := fromOrigin.length() if math.Abs(distance) > maxDistance { maxDistance = math.Abs(distance) rightmost = vertex } } return image.Point{ rightmost.X + padding.X, rightmost.Y + padding.Y, } } // vertexSet implements sort.Interface for image.Point, sorting first by X, then // by Y type vertexSet []image.Point func (vs vertexSet) Len() int { return len(vs) } func (vs vertexSet) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } func (vs vertexSet) Less(i, j int) bool { if vs[i].X == vs[j].X { return vs[i].Y < vs[j].Y } return vs[i].X < vs[j].X } // convexHull takes a list of vertices and returns the set of vertices which // make up the convex hull encapsulating all vertices on a plane. func convexHull(vertices []image.Point) []image.Point { // Simple cases can be shortcutted. if len(vertices) == 0 { return []image.Point{ {0, 0}, } } // For our purposes, we can assume that three vertices form a hull. if len(vertices) < 4 { return vertices } sort.Sort(vertexSet(vertices)) var lower, upper []image.Point for _, vertex := range vertices { for len(lower) >= 2 && cross(lower[len(lower)-2], lower[len(lower)-1], vertex) <= 0 { lower = lower[:len(lower)-1] } lower = append(lower, vertex) } for _, vertex := range reverse(vertices) { for len(upper) >= 2 && cross(upper[len(upper)-2], upper[len(upper)-1], vertex) <= 0 { upper = upper[:len(upper)-1] } upper = append(upper, vertex) } return append(lower[:len(lower)-1], upper[:len(upper)-1]...) } // cross finds the 2D cross-product of OA and OB vectors. // Returns a positive value if OAB makes a counter-clockwise turn, a negative // value if OAB makes a clockwise turn, and zero if the points are collinear. func cross(o, a, b image.Point) int { return (a.X-o.X)*(b.Y-o.Y) - (a.Y-o.Y)*(b.X-o.X) } // reverse reverses a slice of Points for use in finding the upper hull. func reverse(vertices []image.Point) []image.Point { for i := 0; i < len(vertices)/2; i++ { opp := len(vertices) - (i + 1) vertices[i], vertices[opp] = vertices[opp], vertices[i] } return vertices } ����������������������������������������������charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/iconfetcher_test.go����������������������������������������0000664�0001750�0001750�00000007427�12672604536�023001� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������package jujusvg import ( "fmt" "net/http" "net/http/httptest" "strings" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" ) type IconFetcherSuite struct{} var _ = gc.Suite(&IconFetcherSuite{}) func (s *IconFetcherSuite) TestLinkFetchIcons(c *gc.C) { tests := map[string][]byte{ "~charming-devs/precise/elasticsearch-2": []byte(` `), "~juju-jitsu/precise/charmworld-58": []byte(` `), "precise/mongodb-21": []byte(` `), } iconURL := func(ref *charm.URL) string { return "/" + ref.Path() + ".svg" } b, err := charm.ReadBundleData(strings.NewReader(bundle)) c.Assert(err, gc.IsNil) err = b.Verify(nil, nil) c.Assert(err, gc.IsNil) fetcher := LinkFetcher{ IconURL: iconURL, } iconMap, err := fetcher.FetchIcons(b) c.Assert(err, gc.IsNil) for charm, link := range tests { assertXMLEqual(c, []byte(iconMap[charm]), []byte(link)) } } func (s *IconFetcherSuite) TestHTTPFetchIcons(c *gc.C) { fetchCount := 0 ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fetchCount++ fmt.Fprintln(w, fmt.Sprintf("%s", r.URL.Path)) })) defer ts.Close() tsIconURL := func(ref *charm.URL) string { return ts.URL + "/" + ref.Path() + ".svg" } b, err := charm.ReadBundleData(strings.NewReader(bundle)) c.Assert(err, gc.IsNil) err = b.Verify(nil, nil) c.Assert(err, gc.IsNil) // Only one copy of precise/mongodb-21 b.Services["duplicateService"] = &charm.ServiceSpec{ Charm: "cs:precise/mongodb-21", NumUnits: 1, } fetcher := HTTPFetcher{ Concurrency: 1, IconURL: tsIconURL, } iconMap, err := fetcher.FetchIcons(b) c.Assert(err, gc.IsNil) c.Assert(iconMap, gc.DeepEquals, map[string][]byte{ "~charming-devs/precise/elasticsearch-2": []byte("/~charming-devs/precise/elasticsearch-2.svg\n"), "~juju-jitsu/precise/charmworld-58": []byte("/~juju-jitsu/precise/charmworld-58.svg\n"), "precise/mongodb-21": []byte("/precise/mongodb-21.svg\n"), }) fetcher.Concurrency = 10 iconMap, err = fetcher.FetchIcons(b) c.Assert(err, gc.IsNil) c.Assert(iconMap, gc.DeepEquals, map[string][]byte{ "~charming-devs/precise/elasticsearch-2": []byte("/~charming-devs/precise/elasticsearch-2.svg\n"), "~juju-jitsu/precise/charmworld-58": []byte("/~juju-jitsu/precise/charmworld-58.svg\n"), "precise/mongodb-21": []byte("/precise/mongodb-21.svg\n"), }) c.Assert(fetchCount, gc.Equals, 6) } func (s *IconFetcherSuite) TestHTTPBadIconURL(c *gc.C) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { http.Error(w, "bad-wolf", http.StatusForbidden) return })) defer ts.Close() tsIconURL := func(ref *charm.URL) string { return ts.URL + "/" + ref.Path() + ".svg" } b, err := charm.ReadBundleData(strings.NewReader(bundle)) c.Assert(err, gc.IsNil) err = b.Verify(nil, nil) c.Assert(err, gc.IsNil) fetcher := HTTPFetcher{ Concurrency: 1, IconURL: tsIconURL, } iconMap, err := fetcher.FetchIcons(b) c.Assert(err, gc.ErrorMatches, fmt.Sprintf("cannot retrieve icon from %s.+\\.svg: 403 Forbidden.*", ts.URL)) c.Assert(iconMap, gc.IsNil) fetcher.Concurrency = 10 iconMap, err = fetcher.FetchIcons(b) c.Assert(err, gc.ErrorMatches, fmt.Sprintf("cannot retrieve icon from %s.+\\.svg: 403 Forbidden.*", ts.URL)) c.Assert(iconMap, gc.IsNil) } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/svg_test.go������������������������������������������������0000664�0001750�0001750�00000010436�12672604536�021301� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������package jujusvg import ( "bytes" "fmt" "github.com/juju/xml" gc "gopkg.in/check.v1" ) type SVGSuite struct{} var _ = gc.Suite(&SVGSuite{}) func (s *SVGSuite) TestProcessIcon(c *gc.C) { tests := []struct { about string icon string expected string err string }{ { about: "Nothing stripped", icon: ` `, expected: ` `, }, { about: "SVG inside an SVG", icon: ` `, expected: ` `, }, { about: "ProcInst at start stripped", icon: ` `, expected: ` `, }, { about: "Directive at start stripped", icon: ` `, expected: ` `, }, { about: "ProcInst at end stripped", icon: ` `, expected: ` `, }, { about: "Directive at end stripped", icon: ` `, expected: ` `, }, { about: "ProcInsts/Directives inside svg left in place", icon: ` `, expected: ` `, }, { about: "Not an SVG", icon: ` bad-wolf `, err: "icon does not appear to be a valid SVG", }, } for i, test := range tests { in := bytes.NewBuffer([]byte(test.icon)) out := bytes.Buffer{} err := processIcon(in, &out, fmt.Sprintf("test-%d", i)) if test.err != "" { c.Assert(err, gc.ErrorMatches, test.err) } else { c.Assert(err, gc.IsNil) assertXMLEqual(c, out.Bytes(), []byte(test.expected)) } } } func (s *SVGSuite) TestSetXMLAttr(c *gc.C) { // Attribute is added. expected := []xml.Attr{ { Name: xml.Name{ Local: "id", }, Value: "foo", }, } result := setXMLAttr([]xml.Attr{}, xml.Name{ Local: "id", }, "foo") c.Assert(result, gc.DeepEquals, expected) // Attribute is changed. result = setXMLAttr([]xml.Attr{ { Name: xml.Name{ Local: "id", }, Value: "bar", }, }, xml.Name{ Local: "id", }, "foo") c.Assert(result, gc.DeepEquals, expected) // Attribute is changed, existing attributes unchanged. expected = []xml.Attr{ { Name: xml.Name{ Local: "class", }, Value: "bar", }, { Name: xml.Name{ Local: "id", }, Value: "foo", }, } result = setXMLAttr([]xml.Attr{ { Name: xml.Name{ Local: "class", }, Value: "bar", }, { Name: xml.Name{ Local: "id", }, Value: "bar", }, }, xml.Name{ Local: "id", }, "foo") c.Assert(result, gc.DeepEquals, expected) // Attribute is added, existing attributes unchanged. result = setXMLAttr([]xml.Attr{ { Name: xml.Name{ Local: "class", }, Value: "bar", }, }, xml.Name{ Local: "id", }, "foo") c.Assert(result, gc.DeepEquals, expected) } charm-2.1.1/src/gopkg.in/juju/jujusvg.v1/iconfetcher.go0000664000175000017500000001012512672604536021727 0ustar marcomarcopackage jujusvg import ( "bytes" "fmt" "io/ioutil" "net/http" "sync" "github.com/juju/utils/parallel" "github.com/juju/xml" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" ) // An IconFetcher provides functionality for retrieving icons for the charms // within a given bundle. The FetchIcons function accepts a bundle, and // returns a map from charm paths to icon data. type IconFetcher interface { FetchIcons(*charm.BundleData) (map[string][]byte, error) } // LinkFetcher fetches icons as links so that they are included within the SVG // as remote resources using SVG tags. type LinkFetcher struct { // IconURL returns the URL of the entity for embedding IconURL func(*charm.URL) string } // FetchIcons generates the svg image tags given an appropriate URL, generating // tags only for unique icons. func (l *LinkFetcher) FetchIcons(b *charm.BundleData) (map[string][]byte, error) { // Maintain a list of icons that have already been fetched. alreadyFetched := make(map[string]bool) // Build the map of icons. icons := make(map[string][]byte) for _, serviceData := range b.Services { charmId, err := charm.ParseURL(serviceData.Charm) if err != nil { return nil, errgo.Notef(err, "cannot parse charm %q", serviceData.Charm) } path := charmId.Path() // Don't duplicate icons in the map. if !alreadyFetched[path] { alreadyFetched[path] = true icons[path] = []byte(fmt.Sprintf(` `, escapeString(l.IconURL(charmId)))) } } return icons, nil } // Wrap around xml.EscapeText to make it more string-friendly. func escapeString(s string) string { var buf bytes.Buffer xml.EscapeText(&buf, []byte(s)) return buf.String() } // HTTPFetcher is an implementation of IconFetcher which retrieves charm // icons from the web using the URL generated by IconURL on that charm. The // HTTP Client used may be overridden by an instance of http.Client. The icons // may optionally be fetched concurrently. type HTTPFetcher struct { // Concurrency specifies the number of GoRoutines to use when fetching // icons. If it is not positive, 10 will be used. Setting this to 1 // makes this method nominally synchronous. Concurrency int // IconURL returns the URL from which to fetch the given entity's icon SVG. IconURL func(*charm.URL) string // Client specifies what HTTP client to use; if it is not provided, // http.DefaultClient will be used. Client *http.Client } // FetchIcons retrieves icon SVGs over HTTP. If specified in the struct, icons // will be fetched concurrently. func (h *HTTPFetcher) FetchIcons(b *charm.BundleData) (map[string][]byte, error) { client := http.DefaultClient if h.Client != nil { client = h.Client } concurrency := h.Concurrency if concurrency <= 0 { concurrency = 10 } var iconsMu sync.Mutex // Guards icons. icons := make(map[string][]byte) alreadyFetched := make(map[string]bool) run := parallel.NewRun(concurrency) for _, serviceData := range b.Services { charmId, err := charm.ParseURL(serviceData.Charm) if err != nil { return nil, errgo.Notef(err, "cannot parse charm %q", serviceData.Charm) } path := charmId.Path() if alreadyFetched[path] { continue } alreadyFetched[path] = true run.Do(func() error { icon, err := h.fetchIcon(h.IconURL(charmId), client) if err != nil { return err } iconsMu.Lock() defer iconsMu.Unlock() icons[path] = icon return nil }) } if err := run.Wait(); err != nil { return nil, err } return icons, nil } // fetchIcon retrieves a single icon svg over HTTP. func (h *HTTPFetcher) fetchIcon(url string, client *http.Client) ([]byte, error) { resp, err := client.Get(url) if err != nil { return nil, errgo.Notef(err, "HTTP error fetching %s: %v", url, err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return nil, errgo.Newf("cannot retrieve icon from %s: %s", url, resp.Status) } body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, errgo.Notef(err, "could not read icon data from url %s", url) } return body, nil } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/0000775000175000017500000000000012672604527020325 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/charm.go0000664000175000017500000000546012672604527021753 0ustar marcomarco// Copyright 2011, 2012, 2013 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm import ( "fmt" "os" "strings" "github.com/juju/loggo" ) var logger = loggo.GetLogger("juju.charm") // The Charm interface is implemented by any type that // may be handled as a charm. type Charm interface { Meta() *Meta Config() *Config Metrics() *Metrics Actions() *Actions Revision() int } // ReadCharm reads a Charm from path, which can point to either a charm archive or a // charm directory. func ReadCharm(path string) (charm Charm, err error) { info, err := os.Stat(path) if err != nil { return nil, err } if info.IsDir() { charm, err = ReadCharmDir(path) } else { charm, err = ReadCharmArchive(path) } if err != nil { return nil, err } return charm, nil } // SeriesForCharm takes a requested series and a list of series supported by a // charm and returns the series which is relevant. // If the requested series is empty, then the first supported series is used, // otherwise the requested series is validated against the supported series. func SeriesForCharm(requestedSeries string, supportedSeries []string) (string, error) { // Old charm with no supported series. if len(supportedSeries) == 0 { if requestedSeries == "" { return "", missingSeriesError } return requestedSeries, nil } // Use the charm default. if requestedSeries == "" { return supportedSeries[0], nil } for _, s := range supportedSeries { if s == requestedSeries { return requestedSeries, nil } } return "", &unsupportedSeriesError{requestedSeries, supportedSeries} } // missingSeriesError is used to denote that SeriesForCharm could not determine // a series because a legacy charm did not declare any. var missingSeriesError = fmt.Errorf("series not specified and charm does not define any") // IsMissingSeriesError returns true if err is an missingSeriesError. func IsMissingSeriesError(err error) bool { return err == missingSeriesError } // UnsupportedSeriesError represents an error indicating that the requested series // is not supported by the charm. type unsupportedSeriesError struct { requestedSeries string supportedSeries []string } func (e *unsupportedSeriesError) Error() string { return fmt.Sprintf( "series %q not supported by charm, supported series are: %s", e.requestedSeries, strings.Join(e.supportedSeries, ","), ) } // NewUnsupportedSeriesError returns an error indicating that the requested series // is not supported by a charm. func NewUnsupportedSeriesError(requestedSeries string, supportedSeries []string) error { return &unsupportedSeriesError{requestedSeries, supportedSeries} } // IsUnsupportedSeriesError returns true if err is an UnsupportedSeriesError. func IsUnsupportedSeriesError(err error) bool { _, ok := err.(*unsupportedSeriesError) return ok } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/bundle_test.go0000664000175000017500000000342612672604527023171 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm_test import ( "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" ) var _ = gc.Suite(&BundleSuite{}) type BundleSuite struct { testing.IsolationSuite } func (*BundleSuite) TestReadBundleDir(c *gc.C) { path := bundleDirPath(c, "wordpress-simple") b, err := charm.ReadBundle(path) c.Assert(err, gc.IsNil) c.Assert(b, gc.FitsTypeOf, (*charm.BundleDir)(nil)) checkWordpressBundle(c, b, path) } func (*BundleSuite) TestReadBundleArchive(c *gc.C) { path := bundleDirPath(c, "wordpress-simple") b, err := charm.ReadBundle(path) c.Assert(err, gc.IsNil) c.Assert(b, gc.FitsTypeOf, (*charm.BundleDir)(nil)) checkWordpressBundle(c, b, path) } func checkWordpressBundle(c *gc.C, b charm.Bundle, path string) { // Load the charms required by the bundle. wordpressCharm := readCharmDir(c, "wordpress") mysqlCharm := readCharmDir(c, "mysql") bd := b.Data() c.Assert(bd.RequiredCharms(), jc.DeepEquals, []string{"mysql", "wordpress"}) charms := map[string]charm.Charm{ "wordpress": wordpressCharm, "mysql": mysqlCharm, } err := bd.VerifyWithCharms(verifyOk, nil, charms) c.Assert(err, gc.IsNil) c.Assert(bd.Services, jc.DeepEquals, map[string]*charm.ServiceSpec{ "wordpress": { Charm: "wordpress", }, "mysql": { Charm: "mysql", NumUnits: 1, }, }) c.Assert(bd.Relations, jc.DeepEquals, [][]string{ {"wordpress:db", "mysql:server"}, }) c.Assert(b.ReadMe(), gc.Equals, "A dummy bundle\n") switch b := b.(type) { case *charm.BundleArchive: c.Assert(b.Path, gc.Equals, path) case *charm.BundleDir: c.Assert(b.Path, gc.Equals, path) } } func verifyOk(string) error { return nil } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/actions_test.go0000664000175000017500000006262412672604527023365 0ustar marcomarco// Copyright 2011-2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm import ( "bytes" "encoding/json" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" ) type ActionsSuite struct{} var _ = gc.Suite(&ActionsSuite{}) func (s *ActionsSuite) TestNewActions(c *gc.C) { emptyAction := NewActions() c.Assert(emptyAction, jc.DeepEquals, &Actions{}) } func (s *ActionsSuite) TestValidateOk(c *gc.C) { for i, test := range []struct { description string actionSpec *ActionSpec objectToValidate map[string]interface{} }{{ description: "Validation of an empty object is ok.", actionSpec: &ActionSpec{ Description: "Take a snapshot of the database.", Params: map[string]interface{}{ "title": "snapshot", "description": "Take a snapshot of the database.", "type": "object", "properties": map[string]interface{}{ "outfile": map[string]interface{}{ "description": "The file to write out to.", "type": "string"}}}}, objectToValidate: nil, }, { description: "Validation of one required value.", actionSpec: &ActionSpec{ Description: "Take a snapshot of the database.", Params: map[string]interface{}{ "title": "snapshot", "description": "Take a snapshot of the database.", "type": "object", "properties": map[string]interface{}{ "outfile": map[string]interface{}{ "description": "The file to write out to.", "type": "string"}}, "required": []interface{}{"outfile"}}}, objectToValidate: map[string]interface{}{ "outfile": "out-2014-06-12.bz2", }, }, { description: "Validation of one required and one optional value.", actionSpec: &ActionSpec{ Description: "Take a snapshot of the database.", Params: map[string]interface{}{ "title": "snapshot", "description": "Take a snapshot of the database.", "type": "object", "properties": map[string]interface{}{ "outfile": map[string]interface{}{ "description": "The file to write out to.", "type": "string"}, "quality": map[string]interface{}{ "description": "Compression quality", "type": "integer", "minimum": 0, "maximum": 9}}, "required": []interface{}{"outfile"}}}, objectToValidate: map[string]interface{}{ "outfile": "out-2014-06-12.bz2", }, }, { description: "Validation of an optional, range limited value.", actionSpec: &ActionSpec{ Description: "Take a snapshot of the database.", Params: map[string]interface{}{ "title": "snapshot", "description": "Take a snapshot of the database.", "type": "object", "properties": map[string]interface{}{ "outfile": map[string]interface{}{ "description": "The file to write out to.", "type": "string"}, "quality": map[string]interface{}{ "description": "Compression quality", "type": "integer", "minimum": 0, "maximum": 9}}, "required": []interface{}{"outfile"}}}, objectToValidate: map[string]interface{}{ "outfile": "out-2014-06-12.bz2", "quality": 5, }, }} { c.Logf("test %d: %s", i, test.description) err := test.actionSpec.ValidateParams(test.objectToValidate) c.Assert(err, jc.ErrorIsNil) } } func (s *ActionsSuite) TestValidateFail(c *gc.C) { var validActionTests = []struct { description string actionSpec *ActionSpec badActionJson string expectedError string }{{ description: "Validation of one required value.", actionSpec: &ActionSpec{ Description: "Take a snapshot of the database.", Params: map[string]interface{}{ "title": "snapshot", "description": "Take a snapshot of the database.", "type": "object", "properties": map[string]interface{}{ "outfile": map[string]interface{}{ "description": "The file to write out to.", "type": "string"}}, "required": []interface{}{"outfile"}}}, badActionJson: `{"outfile": 5}`, expectedError: "validation failed: (root).outfile : must be of type string, given 5", }, { description: "Restrict to only one property", actionSpec: &ActionSpec{ Description: "Take a snapshot of the database.", Params: map[string]interface{}{ "title": "snapshot", "description": "Take a snapshot of the database.", "type": "object", "properties": map[string]interface{}{ "outfile": map[string]interface{}{ "description": "The file to write out to.", "type": "string"}}, "required": []interface{}{"outfile"}, "additionalProperties": false}}, badActionJson: `{"outfile": "foo.bz", "bar": "foo"}`, expectedError: "validation failed: (root) : additional property \"bar\" is not allowed, given {\"bar\":\"foo\",\"outfile\":\"foo.bz\"}", }, { description: "Validation of one required and one optional value.", actionSpec: &ActionSpec{ Description: "Take a snapshot of the database.", Params: map[string]interface{}{ "title": "snapshot", "description": "Take a snapshot of the database.", "type": "object", "properties": map[string]interface{}{ "outfile": map[string]interface{}{ "description": "The file to write out to.", "type": "string"}, "quality": map[string]interface{}{ "description": "Compression quality", "type": "integer", "minimum": 0, "maximum": 9}}, "required": []interface{}{"outfile"}}}, badActionJson: `{"quality": 5}`, expectedError: "validation failed: (root) : \"outfile\" property is missing and required, given {\"quality\":5}", }, { description: "Validation of an optional, range limited value.", actionSpec: &ActionSpec{ Description: "Take a snapshot of the database.", Params: map[string]interface{}{ "title": "snapshot", "description": "Take a snapshot of the database.", "type": "object", "properties": map[string]interface{}{ "outfile": map[string]interface{}{ "description": "The file to write out to.", "type": "string"}, "quality": map[string]interface{}{ "description": "Compression quality", "type": "integer", "minimum": 0, "maximum": 9}}, "required": []interface{}{"outfile"}}}, badActionJson: ` { "outfile": "out-2014-06-12.bz2", "quality": "two" }`, expectedError: "validation failed: (root).quality : must be of type integer, given \"two\"", }} for i, test := range validActionTests { c.Logf("test %d: %s", i, test.description) var params map[string]interface{} jsonBytes := []byte(test.badActionJson) err := json.Unmarshal(jsonBytes, ¶ms) c.Assert(err, gc.IsNil) err = test.actionSpec.ValidateParams(params) c.Assert(err.Error(), gc.Equals, test.expectedError) } } func (s *ActionsSuite) TestCleanseOk(c *gc.C) { var goodInterfaceTests = []struct { description string acceptableInterface map[string]interface{} expectedInterface map[string]interface{} }{{ description: "An interface requiring no changes.", acceptableInterface: map[string]interface{}{ "key1": "value1", "key2": "value2", "key3": map[string]interface{}{ "foo1": "val1", "foo2": "val2"}}, expectedInterface: map[string]interface{}{ "key1": "value1", "key2": "value2", "key3": map[string]interface{}{ "foo1": "val1", "foo2": "val2"}}, }, { description: "Substitute a single inner map[i]i.", acceptableInterface: map[string]interface{}{ "key1": "value1", "key2": "value2", "key3": map[interface{}]interface{}{ "foo1": "val1", "foo2": "val2"}}, expectedInterface: map[string]interface{}{ "key1": "value1", "key2": "value2", "key3": map[string]interface{}{ "foo1": "val1", "foo2": "val2"}}, }, { description: "Substitute nested inner map[i]i.", acceptableInterface: map[string]interface{}{ "key1a": "val1a", "key2a": "val2a", "key3a": map[interface{}]interface{}{ "key1b": "val1b", "key2b": map[interface{}]interface{}{ "key1c": "val1c"}}}, expectedInterface: map[string]interface{}{ "key1a": "val1a", "key2a": "val2a", "key3a": map[string]interface{}{ "key1b": "val1b", "key2b": map[string]interface{}{ "key1c": "val1c"}}}, }, { description: "Substitute nested map[i]i within []i.", acceptableInterface: map[string]interface{}{ "key1a": "val1a", "key2a": []interface{}{5, "foo", map[string]interface{}{ "key1b": "val1b", "key2b": map[interface{}]interface{}{ "key1c": "val1c"}}}}, expectedInterface: map[string]interface{}{ "key1a": "val1a", "key2a": []interface{}{5, "foo", map[string]interface{}{ "key1b": "val1b", "key2b": map[string]interface{}{ "key1c": "val1c"}}}}, }} for i, test := range goodInterfaceTests { c.Logf("test %d: %s", i, test.description) cleansedInterfaceMap, err := cleanse(test.acceptableInterface) c.Assert(err, gc.IsNil) c.Assert(cleansedInterfaceMap, jc.DeepEquals, test.expectedInterface) } } func (s *ActionsSuite) TestCleanseFail(c *gc.C) { var badInterfaceTests = []struct { description string failInterface map[string]interface{} expectedError string }{{ description: "An inner map[interface{}]interface{} with an int key.", failInterface: map[string]interface{}{ "key1": "value1", "key2": "value2", "key3": map[interface{}]interface{}{ "foo1": "val1", 5: "val2"}}, expectedError: "map keyed with non-string value", }, { description: "An inner []interface{} containing a map[i]i with an int key.", failInterface: map[string]interface{}{ "key1a": "val1b", "key2a": "val2b", "key3a": []interface{}{"foo1", 5, map[interface{}]interface{}{ "key1b": "val1b", "key2b": map[interface{}]interface{}{ "key1c": "val1c", 5: "val2c"}}}}, expectedError: "map keyed with non-string value", }} for i, test := range badInterfaceTests { c.Logf("test %d: %s", i, test.description) _, err := cleanse(test.failInterface) c.Assert(err, gc.NotNil) c.Assert(err.Error(), gc.Equals, test.expectedError) } } func (s *ActionsSuite) TestReadGoodActionsYaml(c *gc.C) { var goodActionsYamlTests = []struct { description string yaml string expectedActions *Actions }{{ description: "A simple snapshot actions YAML with one parameter.", yaml: ` snapshot: description: Take a snapshot of the database. params: outfile: description: "The file to write out to." type: string required: ["outfile"] `, expectedActions: &Actions{map[string]ActionSpec{ "snapshot": { Description: "Take a snapshot of the database.", Params: map[string]interface{}{ "title": "snapshot", "description": "Take a snapshot of the database.", "type": "object", "properties": map[string]interface{}{ "outfile": map[string]interface{}{ "description": "The file to write out to.", "type": "string"}}, "required": []interface{}{"outfile"}}}}}, }, { description: "An empty Actions definition.", yaml: "", expectedActions: &Actions{ ActionSpecs: map[string]ActionSpec{}, }, }, { description: "A more complex schema with hyphenated names and multiple parameters.", yaml: ` snapshot: description: "Take a snapshot of the database." params: outfile: description: "The file to write out to." type: "string" compression-quality: description: "The compression quality." type: "integer" minimum: 0 maximum: 9 exclusiveMaximum: false remote-sync: description: "Sync a file to a remote host." params: file: description: "The file to send out." type: "string" format: "uri" remote-uri: description: "The host to sync to." type: "string" format: "uri" util: description: "The util to perform the sync (rsync or scp.)" type: "string" enum: ["rsync", "scp"] required: ["file", "remote-uri"] `, expectedActions: &Actions{map[string]ActionSpec{ "snapshot": { Description: "Take a snapshot of the database.", Params: map[string]interface{}{ "title": "snapshot", "description": "Take a snapshot of the database.", "type": "object", "properties": map[string]interface{}{ "outfile": map[string]interface{}{ "description": "The file to write out to.", "type": "string"}, "compression-quality": map[string]interface{}{ "description": "The compression quality.", "type": "integer", "minimum": 0, "maximum": 9, "exclusiveMaximum": false}}}}, "remote-sync": { Description: "Sync a file to a remote host.", Params: map[string]interface{}{ "title": "remote-sync", "description": "Sync a file to a remote host.", "type": "object", "properties": map[string]interface{}{ "file": map[string]interface{}{ "description": "The file to send out.", "type": "string", "format": "uri"}, "remote-uri": map[string]interface{}{ "description": "The host to sync to.", "type": "string", "format": "uri"}, "util": map[string]interface{}{ "description": "The util to perform the sync (rsync or scp.)", "type": "string", "enum": []interface{}{"rsync", "scp"}}}, "required": []interface{}{"file", "remote-uri"}}}}}, }, { description: "A schema with other keys, e.g. \"definitions\"", yaml: ` snapshot: description: "Take a snapshot of the database." params: outfile: description: "The file to write out to." type: "string" compression-quality: description: "The compression quality." type: "integer" minimum: 0 maximum: 9 exclusiveMaximum: false definitions: diskdevice: {} something-else: {} `, expectedActions: &Actions{map[string]ActionSpec{ "snapshot": { Description: "Take a snapshot of the database.", Params: map[string]interface{}{ "title": "snapshot", "description": "Take a snapshot of the database.", "type": "object", "properties": map[string]interface{}{ "outfile": map[string]interface{}{ "description": "The file to write out to.", "type": "string", }, "compression-quality": map[string]interface{}{ "description": "The compression quality.", "type": "integer", "minimum": 0, "maximum": 9, "exclusiveMaximum": false, }, }, "definitions": map[string]interface{}{ "diskdevice": map[string]interface{}{}, "something-else": map[string]interface{}{}, }, }, }, }}, }, { description: "A schema with no \"params\" key, implying no options.", yaml: ` snapshot: description: Take a snapshot of the database. `, expectedActions: &Actions{map[string]ActionSpec{ "snapshot": { Description: "Take a snapshot of the database.", Params: map[string]interface{}{ "description": "Take a snapshot of the database.", "title": "snapshot", "type": "object", "properties": map[string]interface{}{}, }}}}, }, { description: "A schema with no values at all, implying no options.", yaml: ` snapshot: `, expectedActions: &Actions{map[string]ActionSpec{ "snapshot": { Description: "No description", Params: map[string]interface{}{ "description": "No description", "title": "snapshot", "type": "object", "properties": map[string]interface{}{}, }}}}, }} // Beginning of testing loop for i, test := range goodActionsYamlTests { c.Logf("test %d: %s", i, test.description) reader := bytes.NewReader([]byte(test.yaml)) loadedAction, err := ReadActionsYaml(reader) c.Assert(err, gc.IsNil) c.Check(loadedAction, jc.DeepEquals, test.expectedActions) } } func (s *ActionsSuite) TestReadBadActionsYaml(c *gc.C) { var badActionsYamlTests = []struct { description string yaml string expectedError string }{{ description: "Reject JSON-Schema containing references.", yaml: ` snapshot: description: Take a snapshot of the database. params: $schema: "http://json-schema.org/draft-03/schema#" `, expectedError: "schema key \"$schema\" not compatible with this version of juju", }, { description: "Reject JSON-Schema containing references.", yaml: ` snapshot: description: Take a snapshot of the database. params: outfile: { $ref: "http://json-schema.org/draft-03/schema#" } `, expectedError: "schema key \"$ref\" not compatible with this version of juju", }, { description: "Malformed YAML: missing key in \"outfile\".", yaml: ` snapshot: description: Take a snapshot of the database. params: outfile: The file to write out to. type: string default: foo.bz2 `, expectedError: "YAML error: line 6: mapping values are not allowed in this context", }, { description: "Malformed JSON-Schema: $schema element misplaced.", yaml: ` snapshot: description: Take a snapshot of the database. params: outfile: $schema: http://json-schema.org/draft-03/schema# description: The file to write out to. type: string default: foo.bz2 `, expectedError: "YAML error: line 3: mapping values are not allowed in this context", }, { description: "Malformed Actions: hyphen at beginning of action name.", yaml: ` -snapshot: description: Take a snapshot of the database. `, expectedError: "bad action name -snapshot", }, { description: "Malformed Actions: hyphen after action name.", yaml: ` snapshot-: description: Take a snapshot of the database. `, expectedError: "bad action name snapshot-", }, { description: "Malformed Actions: caps in action name.", yaml: ` Snapshot: description: Take a snapshot of the database. `, expectedError: "bad action name Snapshot", }, { description: "A non-string description fails to parse", yaml: ` snapshot: description: ["Take a snapshot of the database."] `, expectedError: "value for schema key \"description\" must be a string", }, { description: "A non-list \"required\" key", yaml: ` snapshot: description: Take a snapshot of the database. params: outfile: description: "The file to write out to." type: string required: "outfile" `, expectedError: "value for schema key \"required\" must be a YAML list", }, { description: "A schema with an empty \"params\" key fails to parse", yaml: ` snapshot: description: Take a snapshot of the database. params: `, expectedError: "params failed to parse as a map", }, { description: "A schema with a non-map \"params\" value fails to parse", yaml: ` snapshot: description: Take a snapshot of the database. params: ["a", "b"] `, expectedError: "params failed to parse as a map", }, { description: "\"definitions\" goes against JSON-Schema definition", yaml: ` snapshot: description: "Take a snapshot of the database." params: outfile: description: "The file to write out to." type: "string" definitions: diskdevice: ["a"] something-else: {"a": "b"} `, expectedError: "invalid params schema for action schema snapshot: definitions must be of type array of schemas", }, { description: "excess keys not in the JSON-Schema spec will be rejected", yaml: ` snapshot: description: "Take a snapshot of the database." params: outfile: description: "The file to write out to." type: "string" compression-quality: description: "The compression quality." type: "integer" minimum: 0 maximum: 9 exclusiveMaximum: false definitions: diskdevice: {} something-else: {} other-key: ["some", "values"], `, expectedError: "YAML error: line 16: did not find expected key", }} for i, test := range badActionsYamlTests { c.Logf("test %d: %s", i, test.description) reader := bytes.NewReader([]byte(test.yaml)) _, err := ReadActionsYaml(reader) c.Assert(err, gc.NotNil) c.Check(err.Error(), gc.Equals, test.expectedError) } } func (s *ActionsSuite) TestRecurseMapOnKeys(c *gc.C) { tests := []struct { should string givenKeys []string givenMap map[string]interface{} expected interface{} shouldFail bool }{{ should: "fail if the specified key was not in the map", givenKeys: []string{"key", "key2"}, givenMap: map[string]interface{}{ "key": map[string]interface{}{ "key": "value", }, }, shouldFail: true, }, { should: "fail if a key was not a string", givenKeys: []string{"key", "key2"}, givenMap: map[string]interface{}{ "key": map[interface{}]interface{}{ 5: "value", }, }, shouldFail: true, }, { should: "fail if we have more keys but not a recursable val", givenKeys: []string{"key", "key2"}, givenMap: map[string]interface{}{ "key": []string{"a", "b", "c"}, }, shouldFail: true, }, { should: "retrieve a good value", givenKeys: []string{"key", "key2"}, givenMap: map[string]interface{}{ "key": map[string]interface{}{ "key2": "value", }, }, expected: "value", }, { should: "retrieve a map", givenKeys: []string{"key"}, givenMap: map[string]interface{}{ "key": map[string]interface{}{ "key": "value", }, }, expected: map[string]interface{}{ "key": "value", }, }, { should: "retrieve a slice", givenKeys: []string{"key"}, givenMap: map[string]interface{}{ "key": []string{"a", "b", "c"}, }, expected: []string{"a", "b", "c"}, }} for i, t := range tests { c.Logf("test %d: should %s\n map: %#v\n keys: %#v", i, t.should, t.givenMap, t.givenKeys) obtained, failed := recurseMapOnKeys(t.givenKeys, t.givenMap) c.Assert(!failed, gc.Equals, t.shouldFail) if !t.shouldFail { c.Check(obtained, jc.DeepEquals, t.expected) } } } func (s *ActionsSuite) TestInsertDefaultValues(c *gc.C) { schemas := map[string]string{ "simple": ` act: params: val: type: string default: somestr `[1:], "complicated": ` act: params: val: type: object properties: foo: type: string bar: type: object properties: baz: type: string default: boz `[1:], "default-object": ` act: params: val: type: object default: foo: bar bar: baz: woz `[1:], "none": ` act: params: val: type: object properties: var: type: object properties: x: type: string `[1:]} for i, t := range []struct { should string schema string withParams map[string]interface{} expectedResult map[string]interface{} expectedError string }{{ should: "error with no schema", expectedError: "schema must be of type object", }, { should: "create a map if handed nil", schema: schemas["none"], withParams: nil, expectedResult: map[string]interface{}{}, }, { should: "create and fill target if handed nil", schema: schemas["simple"], withParams: nil, expectedResult: map[string]interface{}{"val": "somestr"}, }, { should: "create a simple default value", schema: schemas["simple"], withParams: map[string]interface{}{}, expectedResult: map[string]interface{}{"val": "somestr"}, }, { should: "do nothing for no default value", schema: schemas["none"], withParams: map[string]interface{}{}, expectedResult: map[string]interface{}{}, }, { should: "insert a default value within a nested map", schema: schemas["complicated"], withParams: map[string]interface{}{}, expectedResult: map[string]interface{}{ "val": map[string]interface{}{ "bar": map[string]interface{}{ "baz": "boz", }}}, }, { should: "create a default value which is an object", schema: schemas["default-object"], withParams: map[string]interface{}{}, expectedResult: map[string]interface{}{ "val": map[string]interface{}{ "foo": "bar", "bar": map[string]interface{}{ "baz": "woz", }}}, }, { should: "not overwrite existing values with default objects", schema: schemas["default-object"], withParams: map[string]interface{}{"val": 5}, expectedResult: map[string]interface{}{"val": 5}, }, { should: "interleave defaults into existing objects", schema: schemas["complicated"], withParams: map[string]interface{}{ "val": map[string]interface{}{ "foo": "bar", "bar": map[string]interface{}{ "faz": "foz", }}}, expectedResult: map[string]interface{}{ "val": map[string]interface{}{ "foo": "bar", "bar": map[string]interface{}{ "baz": "boz", "faz": "foz", }}}, }} { c.Logf("test %d: should %s", i, t.should) schema := getSchemaForAction(c, t.schema) // Testing this method result, err := schema.InsertDefaults(t.withParams) if t.expectedError != "" { c.Check(err, gc.ErrorMatches, t.expectedError) continue } c.Assert(err, jc.ErrorIsNil) c.Check(result, jc.DeepEquals, t.expectedResult) } } func getSchemaForAction(c *gc.C, wholeSchema string) ActionSpec { // Load up the YAML schema definition. reader := bytes.NewReader([]byte(wholeSchema)) loadedActions, err := ReadActionsYaml(reader) c.Assert(err, gc.IsNil) // Same action name for all tests, "act". return loadedActions.ActionSpecs["act"] } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/hooks/0000775000175000017500000000000012672604527021450 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/hooks/hooks.go0000664000175000017500000000610612672604527023125 0ustar marcomarco// Copyright 2013 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. // hooks provides types and constants that define the hooks known to Juju. package hooks // Kind enumerates the different kinds of hooks that exist. type Kind string const ( // None of these hooks are ever associated with a relation; each of them // represents a change to the state of the unit as a whole. The values // themselves are all valid hook names. Install Kind = "install" Start Kind = "start" ConfigChanged Kind = "config-changed" UpgradeCharm Kind = "upgrade-charm" Stop Kind = "stop" Action Kind = "action" CollectMetrics Kind = "collect-metrics" MeterStatusChanged Kind = "meter-status-changed" LeaderElected Kind = "leader-elected" LeaderDeposed Kind = "leader-deposed" LeaderSettingsChanged Kind = "leader-settings-changed" UpdateStatus Kind = "update-status" // These hooks require an associated relation, and the name of the relation // unit whose change triggered the hook. The hook file names that these // kinds represent will be prefixed by the relation name; for example, // "db-relation-joined". RelationJoined Kind = "relation-joined" RelationChanged Kind = "relation-changed" RelationDeparted Kind = "relation-departed" // This hook requires an associated relation. The represented hook file name // will be prefixed by the relation name, just like the other Relation* Kind // values. RelationBroken Kind = "relation-broken" // These hooks require an associated storage. The hook file names that these // kinds represent will be prefixed by the storage name; for example, // "shared-fs-storage-attached". StorageAttached Kind = "storage-attached" StorageDetaching Kind = "storage-detaching" ) var unitHooks = []Kind{ Install, Start, ConfigChanged, UpgradeCharm, Stop, CollectMetrics, MeterStatusChanged, LeaderElected, LeaderDeposed, LeaderSettingsChanged, UpdateStatus, } // UnitHooks returns all known unit hook kinds. func UnitHooks() []Kind { hooks := make([]Kind, len(unitHooks)) copy(hooks, unitHooks) return hooks } var relationHooks = []Kind{ RelationJoined, RelationChanged, RelationDeparted, RelationBroken, } // RelationHooks returns all known relation hook kinds. func RelationHooks() []Kind { hooks := make([]Kind, len(relationHooks)) copy(hooks, relationHooks) return hooks } var storageHooks = []Kind{ StorageAttached, StorageDetaching, } // StorageHooks returns all known storage hook kinds. func StorageHooks() []Kind { hooks := make([]Kind, len(storageHooks)) copy(hooks, storageHooks) return hooks } // IsRelation returns whether the Kind represents a relation hook. func (kind Kind) IsRelation() bool { switch kind { case RelationJoined, RelationChanged, RelationDeparted, RelationBroken: return true } return false } // IsStorage returns whether the Kind represents a storage hook. func (kind Kind) IsStorage() bool { switch kind { case StorageAttached, StorageDetaching: return true } return false } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/.reviewboardrc0000664000175000017500000000017512672604527023167 0ustar marcomarcoREVIEWBOARD_URL = "https://reviews.vapour.ws/" REPOSITORY = "juju-charm" BRANCH = "master" TRACKING_BRANCH = "origin/master" charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/export_test.go0000664000175000017500000000074712672604527023244 0ustar marcomarco// Copyright 2011-2016 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm // Export meaningful bits for tests only. var ( IfaceExpander = ifaceExpander ValidateValue = validateValue ParsePayloadClass = parsePayloadClass ResourceSchema = resourceSchema ExtraBindingsSchema = extraBindingsSchema ValidateMetaExtraBindings = validateMetaExtraBindings ) func MissingSeriesError() error { return missingSeriesError } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/charm_test.go0000664000175000017500000001327712672604527023017 0ustar marcomarco// Copyright 2011, 2012, 2013 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm_test import ( "bytes" "fmt" "io" "io/ioutil" "os" "path/filepath" stdtesting "testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils/fs" gc "gopkg.in/check.v1" "gopkg.in/yaml.v1" "gopkg.in/juju/charm.v6-unstable" ) func Test(t *stdtesting.T) { gc.TestingT(t) } type CharmSuite struct{} var _ = gc.Suite(&CharmSuite{}) func (s *CharmSuite) TestReadCharm(c *gc.C) { ch, err := charm.ReadCharm(charmDirPath(c, "dummy")) c.Assert(err, gc.IsNil) c.Assert(ch.Meta().Name, gc.Equals, "dummy") bPath := archivePath(c, readCharmDir(c, "dummy")) ch, err = charm.ReadCharm(bPath) c.Assert(err, gc.IsNil) c.Assert(ch.Meta().Name, gc.Equals, "dummy") } func (s *CharmSuite) TestReadCharmDirError(c *gc.C) { ch, err := charm.ReadCharm(c.MkDir()) c.Assert(err, gc.NotNil) c.Assert(ch, gc.Equals, nil) } func (s *CharmSuite) TestReadCharmArchiveError(c *gc.C) { path := filepath.Join(c.MkDir(), "path") err := ioutil.WriteFile(path, []byte("foo"), 0644) c.Assert(err, gc.IsNil) ch, err := charm.ReadCharm(path) c.Assert(err, gc.NotNil) c.Assert(ch, gc.Equals, nil) } func (s *CharmSuite) TestSeriesToUse(c *gc.C) { tests := []struct { series string supportedSeries []string seriesToUse string err string }{{ series: "", err: "series not specified and charm does not define any", }, { series: "trusty", seriesToUse: "trusty", }, { series: "trusty", supportedSeries: []string{"precise", "trusty"}, seriesToUse: "trusty", }, { series: "", supportedSeries: []string{"precise", "trusty"}, seriesToUse: "precise", }, { series: "wily", supportedSeries: []string{"precise", "trusty"}, err: `series "wily" not supported by charm.*`, }} for _, test := range tests { series, err := charm.SeriesForCharm(test.series, test.supportedSeries) if test.err != "" { c.Assert(err, gc.ErrorMatches, test.err) continue } c.Assert(err, jc.ErrorIsNil) c.Assert(series, jc.DeepEquals, test.seriesToUse) } } func (s *CharmSuite) IsUnsupportedSeriesError(c *gc.C) { err := charm.NewUnsupportedSeriesError("series", []string{"supported"}) c.Assert(charm.IsUnsupportedSeriesError(err), jc.IsTrue) c.Assert(charm.IsUnsupportedSeriesError(fmt.Errorf("foo")), jc.IsFalse) } func (s *CharmSuite) IsMissingSeriesError(c *gc.C) { err := charm.MissingSeriesError() c.Assert(charm.IsMissingSeriesError(err), jc.IsTrue) c.Assert(charm.IsMissingSeriesError(fmt.Errorf("foo")), jc.IsFalse) } func checkDummy(c *gc.C, f charm.Charm, path string) { c.Assert(f.Revision(), gc.Equals, 1) c.Assert(f.Meta().Name, gc.Equals, "dummy") c.Assert(f.Config().Options["title"].Default, gc.Equals, "My Title") c.Assert(f.Actions(), jc.DeepEquals, &charm.Actions{ map[string]charm.ActionSpec{ "snapshot": { Description: "Take a snapshot of the database.", Params: map[string]interface{}{ "type": "object", "description": "Take a snapshot of the database.", "title": "snapshot", "properties": map[string]interface{}{ "outfile": map[string]interface{}{ "description": "The file to write out to.", "type": "string", "default": "foo.bz2", }}}}}}) switch f := f.(type) { case *charm.CharmArchive: c.Assert(f.Path, gc.Equals, path) case *charm.CharmDir: c.Assert(f.Path, gc.Equals, path) } } type YamlHacker map[interface{}]interface{} func ReadYaml(r io.Reader) YamlHacker { data, err := ioutil.ReadAll(r) if err != nil { panic(err) } m := make(map[interface{}]interface{}) err = yaml.Unmarshal(data, m) if err != nil { panic(err) } return YamlHacker(m) } func (yh YamlHacker) Reader() io.Reader { data, err := yaml.Marshal(yh) if err != nil { panic(err) } return bytes.NewBuffer(data) } // charmDirPath returns the path to the charm with the // given name in the testing repository. func charmDirPath(c *gc.C, name string) string { path := filepath.Join("internal/test-charm-repo/quantal", name) assertIsDir(c, path) return path } // bundleDirPath returns the path to the bundle with the // given name in the testing repository. func bundleDirPath(c *gc.C, name string) string { path := filepath.Join("internal/test-charm-repo/bundle", name) assertIsDir(c, path) return path } func assertIsDir(c *gc.C, path string) { info, err := os.Stat(path) c.Assert(err, gc.IsNil) c.Assert(info.IsDir(), gc.Equals, true) } // readCharmDir returns the charm with the given // name from the testing repository. func readCharmDir(c *gc.C, name string) *charm.CharmDir { path := charmDirPath(c, name) ch, err := charm.ReadCharmDir(path) c.Assert(err, gc.IsNil) return ch } // readBundleDir returns the bundle with the // given name from the testing repository. func readBundleDir(c *gc.C, name string) *charm.BundleDir { path := bundleDirPath(c, name) ch, err := charm.ReadBundleDir(path) c.Assert(err, gc.IsNil) return ch } type ArchiverTo interface { ArchiveTo(w io.Writer) error } // archivePath archives the given charm or bundle // to a newly created file and returns the path to the // file. func archivePath(c *gc.C, a ArchiverTo) string { dir := c.MkDir() path := filepath.Join(dir, "archive") file, err := os.Create(path) c.Assert(err, gc.IsNil) defer file.Close() err = a.ArchiveTo(file) c.Assert(err, gc.IsNil) return path } // cloneDir recursively copies the path directory // into a new directory and returns the path // to it. func cloneDir(c *gc.C, path string) string { newPath := filepath.Join(c.MkDir(), filepath.Base(path)) err := fs.Copy(path, newPath) c.Assert(err, gc.IsNil) return newPath } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/config_test.go0000664000175000017500000003203112672604527023157 0ustar marcomarco// Copyright 2011, 2012, 2013 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm_test import ( "bytes" "fmt" "strings" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/yaml.v1" "gopkg.in/juju/charm.v6-unstable" ) type ConfigSuite struct { config *charm.Config } var _ = gc.Suite(&ConfigSuite{}) func (s *ConfigSuite) SetUpSuite(c *gc.C) { // Just use a single shared config for the whole suite. There's no use case // for mutating a config, we assume that nobody will do so here. var err error s.config, err = charm.ReadConfig(bytes.NewBuffer([]byte(` options: title: default: My Title description: A descriptive title used for the service. type: string subtitle: default: "" description: An optional subtitle used for the service. outlook: description: No default outlook. # type defaults to string in python username: default: admin001 description: The name of the initial account (given admin permissions). type: string skill-level: description: A number indicating skill. type: int agility-ratio: description: A number from 0 to 1 indicating agility. type: float reticulate-splines: description: Whether to reticulate splines on launch, or not. type: boolean `))) c.Assert(err, gc.IsNil) } func (s *ConfigSuite) TestReadSample(c *gc.C) { c.Assert(s.config.Options, jc.DeepEquals, map[string]charm.Option{ "title": { Default: "My Title", Description: "A descriptive title used for the service.", Type: "string", }, "subtitle": { Default: "", Description: "An optional subtitle used for the service.", Type: "string", }, "username": { Default: "admin001", Description: "The name of the initial account (given admin permissions).", Type: "string", }, "outlook": { Description: "No default outlook.", Type: "string", }, "skill-level": { Description: "A number indicating skill.", Type: "int", }, "agility-ratio": { Description: "A number from 0 to 1 indicating agility.", Type: "float", }, "reticulate-splines": { Description: "Whether to reticulate splines on launch, or not.", Type: "boolean", }, }) } func (s *ConfigSuite) TestDefaultSettings(c *gc.C) { c.Assert(s.config.DefaultSettings(), jc.DeepEquals, charm.Settings{ "title": "My Title", "subtitle": "", "username": "admin001", "outlook": nil, "skill-level": nil, "agility-ratio": nil, "reticulate-splines": nil, }) } func (s *ConfigSuite) TestFilterSettings(c *gc.C) { settings := s.config.FilterSettings(charm.Settings{ "title": "something valid", "username": nil, "unknown": "whatever", "outlook": "", "skill-level": 5.5, "agility-ratio": true, "reticulate-splines": "hullo", }) c.Assert(settings, jc.DeepEquals, charm.Settings{ "title": "something valid", "username": nil, "outlook": "", }) } func (s *ConfigSuite) TestValidateSettings(c *gc.C) { for i, test := range []struct { info string input charm.Settings expect charm.Settings err string }{{ info: "nil settings are valid", expect: charm.Settings{}, }, { info: "empty settings are valid", input: charm.Settings{}, }, { info: "unknown keys are not valid", input: charm.Settings{"foo": nil}, err: `unknown option "foo"`, }, { info: "nil is valid for every value type", input: charm.Settings{ "outlook": nil, "skill-level": nil, "agility-ratio": nil, "reticulate-splines": nil, }, }, { info: "correctly-typed values are valid", input: charm.Settings{ "outlook": "stormy", "skill-level": int64(123), "agility-ratio": 0.5, "reticulate-splines": true, }, }, { info: "empty string-typed values stay empty", input: charm.Settings{"outlook": ""}, expect: charm.Settings{"outlook": ""}, }, { info: "almost-correctly-typed values are valid", input: charm.Settings{ "skill-level": 123, "agility-ratio": float32(0.5), }, expect: charm.Settings{ "skill-level": int64(123), "agility-ratio": 0.5, }, }, { info: "bad string", input: charm.Settings{"outlook": false}, err: `option "outlook" expected string, got false`, }, { info: "bad int", input: charm.Settings{"skill-level": 123.4}, err: `option "skill-level" expected int, got 123.4`, }, { info: "bad float", input: charm.Settings{"agility-ratio": "cheese"}, err: `option "agility-ratio" expected float, got "cheese"`, }, { info: "bad boolean", input: charm.Settings{"reticulate-splines": 101}, err: `option "reticulate-splines" expected boolean, got 101`, }} { c.Logf("test %d: %s", i, test.info) result, err := s.config.ValidateSettings(test.input) if test.err != "" { c.Check(err, gc.ErrorMatches, test.err) } else { c.Check(err, gc.IsNil) if test.expect == nil { c.Check(result, jc.DeepEquals, test.input) } else { c.Check(result, jc.DeepEquals, test.expect) } } } } var settingsWithNils = charm.Settings{ "outlook": nil, "skill-level": nil, "agility-ratio": nil, "reticulate-splines": nil, } var settingsWithValues = charm.Settings{ "outlook": "whatever", "skill-level": int64(123), "agility-ratio": 2.22, "reticulate-splines": true, } func (s *ConfigSuite) TestParseSettingsYAML(c *gc.C) { for i, test := range []struct { info string yaml string key string expect charm.Settings err string }{{ info: "bad structure", yaml: "`", err: `cannot parse settings data: .*`, }, { info: "bad key", yaml: "{}", key: "blah", err: `no settings found for "blah"`, }, { info: "bad settings key", yaml: "blah:\n ping: pong", key: "blah", err: `unknown option "ping"`, }, { info: "bad type for string", yaml: "blah:\n outlook: 123", key: "blah", err: `option "outlook" expected string, got 123`, }, { info: "bad type for int", yaml: "blah:\n skill-level: 12.345", key: "blah", err: `option "skill-level" expected int, got 12.345`, }, { info: "bad type for float", yaml: "blah:\n agility-ratio: blob", key: "blah", err: `option "agility-ratio" expected float, got "blob"`, }, { info: "bad type for boolean", yaml: "blah:\n reticulate-splines: 123", key: "blah", err: `option "reticulate-splines" expected boolean, got 123`, }, { info: "bad string for int", yaml: "blah:\n skill-level: cheese", key: "blah", err: `option "skill-level" expected int, got "cheese"`, }, { info: "bad string for float", yaml: "blah:\n agility-ratio: blob", key: "blah", err: `option "agility-ratio" expected float, got "blob"`, }, { info: "bad string for boolean", yaml: "blah:\n reticulate-splines: cannonball", key: "blah", err: `option "reticulate-splines" expected boolean, got "cannonball"`, }, { info: "empty dict is valid", yaml: "blah: {}", key: "blah", expect: charm.Settings{}, }, { info: "nil values are valid", yaml: `blah: outlook: null skill-level: null agility-ratio: null reticulate-splines: null`, key: "blah", expect: settingsWithNils, }, { info: "empty strings for bool options are not accepted", yaml: `blah: outlook: "" skill-level: 123 agility-ratio: 12.0 reticulate-splines: ""`, key: "blah", err: `option "reticulate-splines" expected boolean, got ""`, }, { info: "empty strings for int options are not accepted", yaml: `blah: outlook: "" skill-level: "" agility-ratio: 12.0 reticulate-splines: false`, key: "blah", err: `option "skill-level" expected int, got ""`, }, { info: "empty strings for float options are not accepted", yaml: `blah: outlook: "" skill-level: 123 agility-ratio: "" reticulate-splines: false`, key: "blah", err: `option "agility-ratio" expected float, got ""`, }, { info: "appropriate strings are valid", yaml: `blah: outlook: whatever skill-level: "123" agility-ratio: "2.22" reticulate-splines: "true"`, key: "blah", expect: settingsWithValues, }, { info: "appropriate types are valid", yaml: `blah: outlook: whatever skill-level: 123 agility-ratio: 2.22 reticulate-splines: y`, key: "blah", expect: settingsWithValues, }} { c.Logf("test %d: %s", i, test.info) result, err := s.config.ParseSettingsYAML([]byte(test.yaml), test.key) if test.err != "" { c.Check(err, gc.ErrorMatches, test.err) } else { c.Check(err, gc.IsNil) c.Check(result, jc.DeepEquals, test.expect) } } } func (s *ConfigSuite) TestParseSettingsStrings(c *gc.C) { for i, test := range []struct { info string input map[string]string expect charm.Settings err string }{{ info: "nil map is valid", expect: charm.Settings{}, }, { info: "empty map is valid", input: map[string]string{}, expect: charm.Settings{}, }, { info: "empty strings for string options are valid", input: map[string]string{"outlook": ""}, expect: charm.Settings{"outlook": ""}, }, { info: "empty strings for non-string options are invalid", input: map[string]string{"skill-level": ""}, err: `option "skill-level" expected int, got ""`, }, { info: "strings are converted", input: map[string]string{ "outlook": "whatever", "skill-level": "123", "agility-ratio": "2.22", "reticulate-splines": "true", }, expect: settingsWithValues, }, { info: "bad string for int", input: map[string]string{"skill-level": "cheese"}, err: `option "skill-level" expected int, got "cheese"`, }, { info: "bad string for float", input: map[string]string{"agility-ratio": "blob"}, err: `option "agility-ratio" expected float, got "blob"`, }, { info: "bad string for boolean", input: map[string]string{"reticulate-splines": "cannonball"}, err: `option "reticulate-splines" expected boolean, got "cannonball"`, }} { c.Logf("test %d: %s", i, test.info) result, err := s.config.ParseSettingsStrings(test.input) if test.err != "" { c.Check(err, gc.ErrorMatches, test.err) } else { c.Check(err, gc.IsNil) c.Check(result, jc.DeepEquals, test.expect) } } } func (s *ConfigSuite) TestConfigError(c *gc.C) { _, err := charm.ReadConfig(bytes.NewBuffer([]byte(`options: {t: {type: foo}}`))) c.Assert(err, gc.ErrorMatches, `invalid config: option "t" has unknown type "foo"`) } func (s *ConfigSuite) TestConfigWithNoOptions(c *gc.C) { _, err := charm.ReadConfig(strings.NewReader("other:\n")) c.Assert(err, gc.ErrorMatches, "invalid config: empty configuration") _, err = charm.ReadConfig(strings.NewReader("\n")) c.Assert(err, gc.ErrorMatches, "invalid config: empty configuration") _, err = charm.ReadConfig(strings.NewReader("null\n")) c.Assert(err, gc.ErrorMatches, "invalid config: empty configuration") _, err = charm.ReadConfig(strings.NewReader("options:\n")) c.Assert(err, gc.IsNil) } func (s *ConfigSuite) TestDefaultType(c *gc.C) { assertDefault := func(type_ string, value string, expected interface{}) { config := fmt.Sprintf(`options: {t: {type: %s, default: %s}}`, type_, value) result, err := charm.ReadConfig(bytes.NewBuffer([]byte(config))) c.Assert(err, gc.IsNil) c.Assert(result.Options["t"].Default, gc.Equals, expected) } assertDefault("boolean", "true", true) assertDefault("string", "golden grahams", "golden grahams") assertDefault("string", `""`, "") assertDefault("float", "2.2e11", 2.2e11) assertDefault("int", "99", int64(99)) assertTypeError := func(type_, str, value string) { config := fmt.Sprintf(`options: {t: {type: %s, default: %s}}`, type_, str) _, err := charm.ReadConfig(bytes.NewBuffer([]byte(config))) expected := fmt.Sprintf(`invalid config default: option "t" expected %s, got %s`, type_, value) c.Assert(err, gc.ErrorMatches, expected) } assertTypeError("boolean", "henry", `"henry"`) assertTypeError("string", "2.5", "2.5") assertTypeError("float", "123", "123") assertTypeError("int", "true", "true") } // When an empty config is supplied an error should be returned func (s *ConfigSuite) TestEmptyConfigReturnsError(c *gc.C) { config := "" result, err := charm.ReadConfig(bytes.NewBuffer([]byte(config))) c.Assert(result, gc.IsNil) c.Assert(err, gc.ErrorMatches, "invalid config: empty configuration") } func (s *ConfigSuite) TestYAMLMarshal(c *gc.C) { cfg, err := charm.ReadConfig(strings.NewReader(` options: minimal: type: string withdescription: type: int description: d withdefault: type: boolean description: d default: true `)) c.Assert(err, gc.IsNil) c.Assert(cfg.Options, gc.HasLen, 3) newYAML, err := yaml.Marshal(cfg) c.Assert(err, gc.IsNil) newCfg, err := charm.ReadConfig(bytes.NewReader(newYAML)) c.Assert(err, gc.IsNil) c.Assert(newCfg, jc.DeepEquals, cfg) } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/url.go0000664000175000017500000002435312672604527021465 0ustar marcomarco// Copyright 2011, 2012, 2013 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm import ( "encoding/json" "fmt" gourl "net/url" "regexp" "strconv" "strings" "github.com/juju/names" "gopkg.in/mgo.v2/bson" ) // Location represents a charm location, which must declare a path component // and a string representaion. type Location interface { Path() string String() string } // URL represents a charm or bundle location: // // cs:~joe/oneiric/wordpress // cs:oneiric/wordpress-42 // local:oneiric/wordpress // cs:~joe/wordpress // cs:wordpress // cs:precise/wordpress-20 // cs:development/precise/wordpress-20 // cs:~joe/development/wordpress // type URL struct { Schema string // "cs" or "local". User string // "joe". Name string // "wordpress". Revision int // -1 if unset, N otherwise. Series string // "precise" or "" if unset; "bundle" if it's a bundle. Channel Channel // "development" or "" if no channel. } var ErrUnresolvedUrl error = fmt.Errorf("charm or bundle url series is not resolved") // Channel represents different stages in the development of a charm or bundle. type Channel string const ( // DevelopmentChannel is the channel used for charms or bundles under // development. DevelopmentChannel Channel = "development" ) var ( validSeries = regexp.MustCompile("^[a-z]+([a-z0-9]+)?$") validName = regexp.MustCompile("^[a-z][a-z0-9]*(-[a-z0-9]*[a-z][a-z0-9]*)*$") ) // IsValidSeries reports whether series is a valid series in charm or bundle // URLs. func IsValidSeries(series string) bool { return validSeries.MatchString(series) } // IsValidChannel reports whether channel is a valid channel in charm or bundle // URLs. func IsValidChannel(channel Channel) bool { return channel == DevelopmentChannel } // IsValidName reports whether name is a valid charm or bundle name. func IsValidName(name string) bool { return validName.MatchString(name) } // WithRevision returns a URL equivalent to url but with Revision set // to revision. func (url *URL) WithRevision(revision int) *URL { urlCopy := *url urlCopy.Revision = revision return &urlCopy } // WithChannel returns a URL equivalent to url but with the given channel. func (url *URL) WithChannel(channel Channel) *URL { urlCopy := *url urlCopy.Channel = channel return &urlCopy } // MustParseURL works like ParseURL, but panics in case of errors. func MustParseURL(url string) *URL { u, err := ParseURL(url) if err != nil { panic(err) } return u } // ParseURL parses the provided charm URL string into its respective // structure. // // Additionally, fully-qualified charmstore URLs are supported; note that this // currently assumes that they will map to jujucharms.com (that is, // fully-qualified URLs currently map to the 'cs' schema): // // https://jujucharms.com/name // https://jujucharms.com/name/series // https://jujucharms.com/name/revision // https://jujucharms.com/name/series/revision // https://jujucharms.com/u/user/name // https://jujucharms.com/u/user/name/series // https://jujucharms.com/u/user/name/revision // https://jujucharms.com/u/user/name/series/revision // https://jujucharms.com/channel/name // https://jujucharms.com/channel/name/series // https://jujucharms.com/channel/name/revision // https://jujucharms.com/channel/name/series/revision // https://jujucharms.com/u/user/channel/name // https://jujucharms.com/u/user/channel/name/series // https://jujucharms.com/u/user/channel/name/revision // https://jujucharms.com/u/user/channel/name/series/revision // // A missing schema is assumed to be 'cs'. func ParseURL(url string) (*URL, error) { // Check if we're dealing with a v1 or v2 URL. u, err := gourl.Parse(url) if err != nil { return nil, fmt.Errorf("cannot parse charm or bundle URL: %q", url) } if u.RawQuery != "" || u.Fragment != "" || u.User != nil { return nil, fmt.Errorf("charm or bundle URL %q has unrecognized parts", url) } var curl *URL switch { case u.Opaque != "": // Shortcut old-style URLs. u.Path = u.Opaque curl, err = parseV1URL(u, url) case u.Scheme == "http" || u.Scheme == "https": // Shortcut new-style URLs. curl, err = parseV2URL(u) default: // TODO: for now, fall through to parsing v1 references; this will be // expanded to be more robust in the future. curl, err = parseV1URL(u, url) } if err != nil { return nil, err } if curl.Schema == "" { curl.Schema = "cs" } return curl, nil } func parseV1URL(url *gourl.URL, originalURL string) (*URL, error) { var r URL if url.Scheme != "" { r.Schema = url.Scheme if r.Schema != "cs" && r.Schema != "local" { return nil, fmt.Errorf("charm or bundle URL has invalid schema: %q", originalURL) } } i := 0 parts := strings.Split(url.Path[i:], "/") if len(parts) < 1 || len(parts) > 4 { return nil, fmt.Errorf("charm or bundle URL has invalid form: %q", originalURL) } // ~ if strings.HasPrefix(parts[0], "~") { if r.Schema == "local" { return nil, fmt.Errorf("local charm or bundle URL with user name: %q", originalURL) } r.User, parts = parts[0][1:], parts[1:] } // if len(parts) > 1 { if IsValidChannel(Channel(parts[0])) { if r.Schema == "local" { return nil, fmt.Errorf("local charm or bundle URL with channel: %q", originalURL) } r.Channel, parts = Channel(parts[0]), parts[1:] } } if len(parts) > 2 { return nil, fmt.Errorf("charm or bundle URL has invalid form: %q", originalURL) } // if len(parts) == 2 { r.Series, parts = parts[0], parts[1:] if !IsValidSeries(r.Series) { return nil, fmt.Errorf("charm or bundle URL has invalid series: %q", originalURL) } } if len(parts) < 1 { return nil, fmt.Errorf("URL without charm or bundle name: %q", originalURL) } // [-] r.Name = parts[0] r.Revision = -1 for i := len(r.Name) - 1; i > 0; i-- { c := r.Name[i] if c >= '0' && c <= '9' { continue } if c == '-' && i != len(r.Name)-1 { var err error r.Revision, err = strconv.Atoi(r.Name[i+1:]) if err != nil { panic(err) // We just checked it was right. } r.Name = r.Name[:i] } break } if r.User != "" { if !names.IsValidUser(r.User) { return nil, fmt.Errorf("charm or bundle URL has invalid user name: %q", originalURL) } } if !IsValidName(r.Name) { return nil, fmt.Errorf("URL has invalid charm or bundle name: %q", originalURL) } return &r, nil } func parseV2URL(url *gourl.URL) (*URL, error) { var r URL r.Schema = "cs" parts := strings.Split(strings.Trim(url.Path, "/"), "/") if parts[0] == "u" { if len(parts) < 3 { return nil, fmt.Errorf(`charm or bundle URL %q malformed, expected "/u//"`, url) } r.User, parts = parts[1], parts[2:] } if len(parts) > 1 && IsValidChannel(Channel(parts[0])) { r.Channel, parts = Channel(parts[0]), parts[1:] } r.Name, parts = parts[0], parts[1:] r.Revision = -1 if len(parts) > 0 { revision, err := strconv.Atoi(parts[0]) if err == nil { r.Revision = revision } else { r.Series = parts[0] if !IsValidSeries(r.Series) { return nil, fmt.Errorf("charm or bundle URL has invalid series: %q", url) } parts = parts[1:] if len(parts) == 1 { r.Revision, err = strconv.Atoi(parts[0]) if err != nil { return nil, fmt.Errorf("charm or bundle URL has malformed revision: %q in %q", parts[0], url) } } else { if len(parts) != 0 { return nil, fmt.Errorf("charm or bundle URL has invalid form: %q", url) } } } } if r.User != "" { if !names.IsValidUser(r.User) { return nil, fmt.Errorf("charm or bundle URL has invalid user name: %q", url) } } if !IsValidName(r.Name) { return nil, fmt.Errorf("URL has invalid charm or bundle name: %q", url) } return &r, nil } func (r *URL) path() string { var parts []string if r.User != "" { parts = append(parts, fmt.Sprintf("~%s", r.User)) } if r.Channel != "" { parts = append(parts, string(r.Channel)) } if r.Series != "" { parts = append(parts, r.Series) } if r.Revision >= 0 { parts = append(parts, fmt.Sprintf("%s-%d", r.Name, r.Revision)) } else { parts = append(parts, r.Name) } return strings.Join(parts, "/") } func (r URL) Path() string { return r.path() } // InferURL parses src as a reference, fills out the series in the // returned URL using defaultSeries if necessary. // // This function is deprecated. New code should use ParseURL instead. func InferURL(src, defaultSeries string) (*URL, error) { u, err := ParseURL(src) if err != nil { return nil, err } if u.Series == "" { if defaultSeries == "" { return nil, fmt.Errorf("cannot infer charm or bundle URL for %q: charm or bundle url series is not resolved", src) } u.Series = defaultSeries } return u, nil } func (u URL) String() string { return fmt.Sprintf("%s:%s", u.Schema, u.Path()) } // GetBSON turns u into a bson.Getter so it can be saved directly // on a MongoDB database with mgo. func (u *URL) GetBSON() (interface{}, error) { if u == nil { return nil, nil } return u.String(), nil } // SetBSON turns u into a bson.Setter so it can be loaded directly // from a MongoDB database with mgo. func (u *URL) SetBSON(raw bson.Raw) error { if raw.Kind == 10 { return bson.SetZero } var s string err := raw.Unmarshal(&s) if err != nil { return err } url, err := ParseURL(s) if err != nil { return err } *u = *url return nil } func (u *URL) MarshalJSON() ([]byte, error) { if u == nil { panic("cannot marshal nil *charm.URL") } return json.Marshal(u.String()) } func (u *URL) UnmarshalJSON(b []byte) error { var s string if err := json.Unmarshal(b, &s); err != nil { return err } url, err := ParseURL(s) if err != nil { return err } *u = *url return nil } // Quote translates a charm url string into one which can be safely used // in a file path. ASCII letters, ASCII digits, dot and dash stay the // same; other characters are translated to their hex representation // surrounded by underscores. func Quote(unsafe string) string { safe := make([]byte, 0, len(unsafe)*4) for i := 0; i < len(unsafe); i++ { b := unsafe[i] switch { case b >= 'a' && b <= 'z', b >= 'A' && b <= 'Z', b >= '0' && b <= '9', b == '.', b == '-': safe = append(safe, b) default: safe = append(safe, fmt.Sprintf("_%02x_", b)...) } } return string(safe) } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/dependencies.tsv0000664000175000017500000000246112672604527023514 0ustar marcomarcogithub.com/juju/errors git 4567a5e69fd3130ca0d89f69478e7ac025b67452 2015-03-27T19:24:31Z github.com/juju/gojsonpointer git afe8b77aa08f272b49e01b82de78510c11f61500 2015-02-04T19:46:29Z github.com/juju/gojsonreference git f0d24ac5ee330baa21721cdff56d45e4ee42628e 2015-02-04T19:46:33Z github.com/juju/gojsonschema git e1ad140384f254c82f89450d9a7c8dd38a632838 2015-03-12T17:00:16Z github.com/juju/loggo git 8477fc936adf0e382d680310047ca27e128a309a 2015-05-27T03:58:39Z github.com/juju/names git a6a253b0a94cc79e99a68d284b970ffce2a11ecd 2015-07-09T13:59:32Z github.com/juju/schema git 1e25943f8c6fd6815282d6f1ac87091d21e14e19 2016-03-01T11:16:46Z github.com/juju/testing git ee18040b46bb1f8c93438383bd51ec77eb8c02ab 2016-01-12T21:04:04Z github.com/juju/utils git ef8480bcaabae506777530725c81d83a4de2fb06 2016-01-12T23:14:21Z github.com/juju/version git 102b12db83e38cb2ce7003544092ea7b0ca59e92 2015-11-07T04:32:11Z golang.org/x/crypto git aedad9a179ec1ea11b7064c57cbc6dc30d7724ec 2015-08-30T18:06:42Z gopkg.in/check.v1 git b3d3430320d4260e5fea99841af984b3badcea63 2015-06-26T10:50:28Z gopkg.in/mgo.v2 git f4923a569136442e900b8cf5c1a706c0a8b0883c 2015-08-21T15:31:23Z gopkg.in/yaml.v1 git 9f9df34309c04878acc86042b16630b0f696e1de 2014-09-24T16:16:07Z gopkg.in/yaml.v2 git 53feefa2559fb8dfa8d81baad31be332c97d6c77 2015-09-24T14:23:14Z charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/actions.go0000664000175000017500000001753312672604527022325 0ustar marcomarco// Copyright 2011-2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm import ( "fmt" "io" "io/ioutil" "regexp" "strings" "github.com/juju/errors" gjs "github.com/juju/gojsonschema" "gopkg.in/yaml.v1" ) var prohibitedSchemaKeys = map[string]bool{"$ref": true, "$schema": true} var actionNameRule = regexp.MustCompile("^[a-z](?:[a-z-]*[a-z])?$") // Actions defines the available actions for the charm. Additional params // may be added as metadata at a future time (e.g. version.) type Actions struct { ActionSpecs map[string]ActionSpec `yaml:"actions,omitempty" bson:",omitempty"` } // Build this out further if it becomes necessary. func NewActions() *Actions { return &Actions{} } // ActionSpec is a definition of the parameters and traits of an Action. // The Params map is expected to conform to JSON-Schema Draft 4 as defined at // http://json-schema.org/draft-04/schema# (see http://json-schema.org/latest/json-schema-core.html) type ActionSpec struct { Description string Params map[string]interface{} } // ValidateParams validates the passed params map against the given ActionSpec // and returns any error encountered. // Usage: // err := ch.Actions().ActionSpecs["snapshot"].ValidateParams(someMap) func (spec *ActionSpec) ValidateParams(params map[string]interface{}) error { // Load the schema from the Charm. specLoader := gjs.NewGoLoader(spec.Params) schema, err := gjs.NewSchema(specLoader) if err != nil { return err } // Load the params as a document to validate. // If an empty map was passed, we need an empty map to validate against. p := map[string]interface{}{} if len(params) > 0 { p = params } docLoader := gjs.NewGoLoader(p) results, err := schema.Validate(docLoader) if err != nil { return err } if results.Valid() { return nil } // Handle any errors generated by the Validate(). var errorStrings []string for _, validationError := range results.Errors() { errorStrings = append(errorStrings, validationError.String()) } return errors.Errorf("validation failed: %s", strings.Join(errorStrings, "; ")) } // InsertDefaults inserts the schema's default values in target using // github.com/juju/gojsonschema. If a nil target is received, an empty map // will be created as the target. The target is then mutated to include the // defaults. // // The returned map will be the transformed or created target map. func (spec *ActionSpec) InsertDefaults(target map[string]interface{}) (map[string]interface{}, error) { specLoader := gjs.NewGoLoader(spec.Params) schema, err := gjs.NewSchema(specLoader) if err != nil { return target, err } return schema.InsertDefaults(target) } // ReadActions builds an Actions spec from a charm's actions.yaml. func ReadActionsYaml(r io.Reader) (*Actions, error) { data, err := ioutil.ReadAll(r) if err != nil { return nil, err } result := &Actions{ ActionSpecs: map[string]ActionSpec{}, } var unmarshaledActions map[string]map[string]interface{} if err := yaml.Unmarshal(data, &unmarshaledActions); err != nil { return nil, err } for name, actionSpec := range unmarshaledActions { if valid := actionNameRule.MatchString(name); !valid { return nil, fmt.Errorf("bad action name %s", name) } desc := "No description" thisActionSchema := map[string]interface{}{ "description": desc, "type": "object", "title": name, "properties": map[string]interface{}{}, } for key, value := range actionSpec { switch key { case "description": // These fields must be strings. typed, ok := value.(string) if !ok { return nil, errors.Errorf("value for schema key %q must be a string", key) } thisActionSchema[key] = typed desc = typed case "title": // These fields must be strings. typed, ok := value.(string) if !ok { return nil, errors.Errorf("value for schema key %q must be a string", key) } thisActionSchema[key] = typed case "required": typed, ok := value.([]interface{}) if !ok { return nil, errors.Errorf("value for schema key %q must be a YAML list", key) } thisActionSchema[key] = typed case "params": // Clean any map[interface{}]interface{}s out so they don't // cause problems with BSON serialization later. cleansedParams, err := cleanse(value) if err != nil { return nil, err } // JSON-Schema must be a map typed, ok := cleansedParams.(map[string]interface{}) if !ok { return nil, errors.New("params failed to parse as a map") } thisActionSchema["properties"] = typed default: // In case this has nested maps, we must clean them out. typed, err := cleanse(value) if err != nil { return nil, err } thisActionSchema[key] = typed } } // Make sure the new Params doc conforms to JSON-Schema // Draft 4 (http://json-schema.org/latest/json-schema-core.html) schemaLoader := gjs.NewGoLoader(thisActionSchema) _, err := gjs.NewSchema(schemaLoader) if err != nil { return nil, errors.Annotatef(err, "invalid params schema for action schema %s", name) } // Now assign the resulting schema to the final entry for the result. result.ActionSpecs[name] = ActionSpec{ Description: desc, Params: thisActionSchema, } } return result, nil } // cleanse rejects schemas containing references or maps keyed with non- // strings, and coerces acceptable maps to contain only maps with string keys. func cleanse(input interface{}) (interface{}, error) { switch typedInput := input.(type) { // In this case, recurse in. case map[string]interface{}: newMap := make(map[string]interface{}) for key, value := range typedInput { if prohibitedSchemaKeys[key] { return nil, fmt.Errorf("schema key %q not compatible with this version of juju", key) } newValue, err := cleanse(value) if err != nil { return nil, err } newMap[key] = newValue } return newMap, nil // Coerce keys to strings and error out if there's a problem; then recurse. case map[interface{}]interface{}: newMap := make(map[string]interface{}) for key, value := range typedInput { typedKey, ok := key.(string) if !ok { return nil, errors.New("map keyed with non-string value") } newMap[typedKey] = value } return cleanse(newMap) // Recurse case []interface{}: newSlice := make([]interface{}, 0) for _, sliceValue := range typedInput { newSliceValue, err := cleanse(sliceValue) if err != nil { return nil, errors.New("map keyed with non-string value") } newSlice = append(newSlice, newSliceValue) } return newSlice, nil // Other kinds of values are OK. default: return input, nil } } // recurseMapOnKeys returns the value of a map keyed recursively by the // strings given in "keys". Thus, recurseMapOnKeys({a,b}, {a:{b:{c:d}}}) // would return {c:d}. func recurseMapOnKeys(keys []string, params map[string]interface{}) (interface{}, bool) { key, rest := keys[0], keys[1:] answer, ok := params[key] // If we're out of keys, we have our answer. if len(rest) == 0 { return answer, ok } // If we're not out of keys, but we tried a key that wasn't in the // map, there's no answer. if !ok { return nil, false } switch typed := answer.(type) { // If our value is a map[s]i{}, we can keep recursing. case map[string]interface{}: return recurseMapOnKeys(keys[1:], typed) // If it's a map[i{}]i{}, we need to check whether it's a map[s]i{}. case map[interface{}]interface{}: m := make(map[string]interface{}) for k, v := range typed { if tK, ok := k.(string); ok { m[tK] = v } else { // If it's not, we don't have something we // can work with. return nil, false } } // If it is, recurse into it. return recurseMapOnKeys(keys[1:], m) // Otherwise, we're trying to recurse into something we don't know // how to deal with, so our answer is that we don't have an answer. default: return nil, false } } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/payloads.go0000664000175000017500000000252012672604527022467 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm import ( "fmt" "github.com/juju/schema" ) var payloadClassSchema = schema.FieldMap( schema.Fields{ "type": schema.String(), }, schema.Defaults{}, ) // PayloadClass holds the information about a payload class, as stored // in a charm's metadata. type PayloadClass struct { // Name identifies the payload class. Name string // Type identifies the type of payload (e.g. kvm, docker). Type string } func parsePayloadClasses(data interface{}) map[string]PayloadClass { if data == nil { return nil } result := make(map[string]PayloadClass) for name, val := range data.(map[string]interface{}) { result[name] = parsePayloadClass(name, val) } return result } func parsePayloadClass(name string, data interface{}) PayloadClass { payloadClass := PayloadClass{ Name: name, } if data == nil { return payloadClass } pcMap := data.(map[string]interface{}) if val := pcMap["type"]; val != nil { payloadClass.Type = val.(string) } return payloadClass } // Validate checks the payload class to ensure its data is valid. func (pc PayloadClass) Validate() error { if pc.Name == "" { return fmt.Errorf("payload class missing name") } if pc.Type == "" { return fmt.Errorf("payload class missing type") } return nil } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/resources.go0000664000175000017500000000214612672604527022671 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm import ( "fmt" "github.com/juju/schema" "gopkg.in/juju/charm.v6-unstable/resource" ) var resourceSchema = schema.FieldMap( schema.Fields{ "type": schema.String(), "filename": schema.String(), // TODO(ericsnow) Change to "path"? "description": schema.String(), }, schema.Defaults{ "type": resource.TypeFile.String(), "description": "", }, ) func parseMetaResources(data interface{}) (map[string]resource.Meta, error) { if data == nil { return nil, nil } result := make(map[string]resource.Meta) for name, val := range data.(map[string]interface{}) { meta, err := resource.ParseMeta(name, val) if err != nil { return nil, err } result[name] = meta } return result, nil } func validateMetaResources(resources map[string]resource.Meta) error { for name, res := range resources { if res.Name != name { return fmt.Errorf("mismatch on resource name (%q != %q)", res.Name, name) } if err := res.Validate(); err != nil { return err } } return nil } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/charmarchive.go0000664000175000017500000001672112672604527023317 0ustar marcomarco// Copyright 2011, 2012, 2013 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm import ( "archive/zip" "bytes" "errors" "fmt" "io" "io/ioutil" "os" "path/filepath" "strconv" "github.com/juju/utils/set" ziputil "github.com/juju/utils/zip" ) // The CharmArchive type encapsulates access to data and operations // on a charm archive. type CharmArchive struct { zopen zipOpener Path string // May be empty if CharmArchive wasn't read from a file meta *Meta config *Config metrics *Metrics actions *Actions revision int } // Trick to ensure *CharmArchive implements the Charm interface. var _ Charm = (*CharmArchive)(nil) // ReadCharmArchive returns a CharmArchive for the charm in path. func ReadCharmArchive(path string) (*CharmArchive, error) { a, err := readCharmArchive(newZipOpenerFromPath(path)) if err != nil { return nil, err } a.Path = path return a, nil } // ReadCharmArchiveBytes returns a CharmArchive read from the given data. // Make sure the archive fits in memory before using this. func ReadCharmArchiveBytes(data []byte) (archive *CharmArchive, err error) { zopener := newZipOpenerFromReader(bytes.NewReader(data), int64(len(data))) return readCharmArchive(zopener) } // ReadCharmArchiveFromReader returns a CharmArchive that uses // r to read the charm. The given size must hold the number // of available bytes in the file. // // Note that the caller is responsible for closing r - methods on // the returned CharmArchive may fail after that. func ReadCharmArchiveFromReader(r io.ReaderAt, size int64) (archive *CharmArchive, err error) { return readCharmArchive(newZipOpenerFromReader(r, size)) } func readCharmArchive(zopen zipOpener) (archive *CharmArchive, err error) { b := &CharmArchive{ zopen: zopen, } zipr, err := zopen.openZip() if err != nil { return nil, err } defer zipr.Close() reader, err := zipOpenFile(zipr, "metadata.yaml") if err != nil { return nil, err } b.meta, err = ReadMeta(reader) reader.Close() if err != nil { return nil, err } reader, err = zipOpenFile(zipr, "config.yaml") if _, ok := err.(*noCharmArchiveFile); ok { b.config = NewConfig() } else if err != nil { return nil, err } else { b.config, err = ReadConfig(reader) reader.Close() if err != nil { return nil, err } } reader, err = zipOpenFile(zipr, "metrics.yaml") if err == nil { b.metrics, err = ReadMetrics(reader) reader.Close() if err != nil { return nil, err } } else if _, ok := err.(*noCharmArchiveFile); !ok { return nil, err } reader, err = zipOpenFile(zipr, "actions.yaml") if _, ok := err.(*noCharmArchiveFile); ok { b.actions = NewActions() } else if err != nil { return nil, err } else { b.actions, err = ReadActionsYaml(reader) reader.Close() if err != nil { return nil, err } } reader, err = zipOpenFile(zipr, "revision") if err != nil { if _, ok := err.(*noCharmArchiveFile); !ok { return nil, err } b.revision = b.meta.OldRevision } else { _, err = fmt.Fscan(reader, &b.revision) if err != nil { return nil, errors.New("invalid revision file") } } return b, nil } func zipOpenFile(zipr *zipReadCloser, path string) (rc io.ReadCloser, err error) { for _, fh := range zipr.File { if fh.Name == path { return fh.Open() } } return nil, &noCharmArchiveFile{path} } type noCharmArchiveFile struct { path string } func (err noCharmArchiveFile) Error() string { return fmt.Sprintf("archive file %q not found", err.path) } // Revision returns the revision number for the charm // expanded in dir. func (a *CharmArchive) Revision() int { return a.revision } // SetRevision changes the charm revision number. This affects the // revision reported by Revision and the revision of the charm // directory created by ExpandTo. func (a *CharmArchive) SetRevision(revision int) { a.revision = revision } // Meta returns the Meta representing the metadata.yaml file from archive. func (a *CharmArchive) Meta() *Meta { return a.meta } // Config returns the Config representing the config.yaml file // for the charm archive. func (a *CharmArchive) Config() *Config { return a.config } // Metrics returns the Metrics representing the metrics.yaml file // for the charm archive. func (a *CharmArchive) Metrics() *Metrics { return a.metrics } // Actions returns the Actions map for the actions.yaml file for the charm // archive. func (a *CharmArchive) Actions() *Actions { return a.actions } type zipReadCloser struct { io.Closer *zip.Reader } // zipOpener holds the information needed to open a zip // file. type zipOpener interface { openZip() (*zipReadCloser, error) } // newZipOpenerFromPath returns a zipOpener that can be // used to read the archive from the given path. func newZipOpenerFromPath(path string) zipOpener { return &zipPathOpener{path: path} } // newZipOpenerFromReader returns a zipOpener that can be // used to read the archive from the given ReaderAt // holding the given number of bytes. func newZipOpenerFromReader(r io.ReaderAt, size int64) zipOpener { return &zipReaderOpener{ r: r, size: size, } } type zipPathOpener struct { path string } func (zo *zipPathOpener) openZip() (*zipReadCloser, error) { f, err := os.Open(zo.path) if err != nil { return nil, err } fi, err := f.Stat() if err != nil { f.Close() return nil, err } r, err := zip.NewReader(f, fi.Size()) if err != nil { f.Close() return nil, err } return &zipReadCloser{Closer: f, Reader: r}, nil } type zipReaderOpener struct { r io.ReaderAt size int64 } func (zo *zipReaderOpener) openZip() (*zipReadCloser, error) { r, err := zip.NewReader(zo.r, zo.size) if err != nil { return nil, err } return &zipReadCloser{Closer: ioutil.NopCloser(nil), Reader: r}, nil } // Manifest returns a set of the charm's contents. func (a *CharmArchive) Manifest() (set.Strings, error) { zipr, err := a.zopen.openZip() if err != nil { return set.NewStrings(), err } defer zipr.Close() paths, err := ziputil.Find(zipr.Reader, "*") if err != nil { return set.NewStrings(), err } manifest := set.NewStrings(paths...) // We always write out a revision file, even if there isn't one in the // archive; and we always strip ".", because that's sometimes not present. manifest.Add("revision") manifest.Remove(".") return manifest, nil } // ExpandTo expands the charm archive into dir, creating it if necessary. // If any errors occur during the expansion procedure, the process will // abort. func (a *CharmArchive) ExpandTo(dir string) error { zipr, err := a.zopen.openZip() if err != nil { return err } defer zipr.Close() if err := ziputil.ExtractAll(zipr.Reader, dir); err != nil { return err } hooksDir := filepath.Join(dir, "hooks") fixHook := fixHookFunc(hooksDir, a.meta.Hooks()) if err := filepath.Walk(hooksDir, fixHook); err != nil { if !os.IsNotExist(err) { return err } } revFile, err := os.Create(filepath.Join(dir, "revision")) if err != nil { return err } _, err = revFile.Write([]byte(strconv.Itoa(a.revision))) revFile.Close() return err } // fixHookFunc returns a WalkFunc that makes sure hooks are owner-executable. func fixHookFunc(hooksDir string, hookNames map[string]bool) filepath.WalkFunc { return func(path string, info os.FileInfo, err error) error { if err != nil { return err } mode := info.Mode() if path != hooksDir && mode.IsDir() { return filepath.SkipDir } if name := filepath.Base(path); hookNames[name] { if mode&0100 == 0 { return os.Chmod(path, mode|0100) } } return nil } } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/charmdir.go0000664000175000017500000001645312672604527022456 0ustar marcomarco// Copyright 2011, 2012, 2013 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm import ( "archive/zip" "errors" "fmt" "io" "os" "path/filepath" "strconv" "strings" "syscall" ) // The CharmDir type encapsulates access to data and operations // on a charm directory. type CharmDir struct { Path string meta *Meta config *Config metrics *Metrics actions *Actions revision int } // Trick to ensure *CharmDir implements the Charm interface. var _ Charm = (*CharmDir)(nil) // ReadCharmDir returns a CharmDir representing an expanded charm directory. func ReadCharmDir(path string) (dir *CharmDir, err error) { dir = &CharmDir{Path: path} file, err := os.Open(dir.join("metadata.yaml")) if err != nil { return nil, err } dir.meta, err = ReadMeta(file) file.Close() if err != nil { return nil, err } file, err = os.Open(dir.join("config.yaml")) if _, ok := err.(*os.PathError); ok { dir.config = NewConfig() } else if err != nil { return nil, err } else { dir.config, err = ReadConfig(file) file.Close() if err != nil { return nil, err } } file, err = os.Open(dir.join("metrics.yaml")) if err == nil { dir.metrics, err = ReadMetrics(file) file.Close() if err != nil { return nil, err } } else if !os.IsNotExist(err) { return nil, err } file, err = os.Open(dir.join("actions.yaml")) if _, ok := err.(*os.PathError); ok { dir.actions = NewActions() } else if err != nil { return nil, err } else { dir.actions, err = ReadActionsYaml(file) file.Close() if err != nil { return nil, err } } if file, err = os.Open(dir.join("revision")); err == nil { _, err = fmt.Fscan(file, &dir.revision) file.Close() if err != nil { return nil, errors.New("invalid revision file") } } else { dir.revision = dir.meta.OldRevision } return dir, nil } // join builds a path rooted at the charm's expanded directory // path and the extra path components provided. func (dir *CharmDir) join(parts ...string) string { parts = append([]string{dir.Path}, parts...) return filepath.Join(parts...) } // Revision returns the revision number for the charm // expanded in dir. func (dir *CharmDir) Revision() int { return dir.revision } // Meta returns the Meta representing the metadata.yaml file // for the charm expanded in dir. func (dir *CharmDir) Meta() *Meta { return dir.meta } // Config returns the Config representing the config.yaml file // for the charm expanded in dir. func (dir *CharmDir) Config() *Config { return dir.config } // Metrics returns the Metrics representing the metrics.yaml file // for the charm expanded in dir. func (dir *CharmDir) Metrics() *Metrics { return dir.metrics } // Actions returns the Actions representing the actions.yaml file // for the charm expanded in dir. func (dir *CharmDir) Actions() *Actions { return dir.actions } // SetRevision changes the charm revision number. This affects // the revision reported by Revision and the revision of the // charm archived by ArchiveTo. // The revision file in the charm directory is not modified. func (dir *CharmDir) SetRevision(revision int) { dir.revision = revision } // SetDiskRevision does the same as SetRevision but also changes // the revision file in the charm directory. func (dir *CharmDir) SetDiskRevision(revision int) error { dir.SetRevision(revision) file, err := os.OpenFile(dir.join("revision"), os.O_WRONLY|os.O_CREATE, 0644) if err != nil { return err } _, err = file.Write([]byte(strconv.Itoa(revision))) file.Close() return err } // resolveSymlinkedRoot returns the target destination of a // charm root directory if the root directory is a symlink. func resolveSymlinkedRoot(rootPath string) (string, error) { info, err := os.Lstat(rootPath) if err == nil && info.Mode()&os.ModeSymlink != 0 { rootPath, err = filepath.EvalSymlinks(rootPath) if err != nil { return "", fmt.Errorf("cannot read path symlink at %q: %v", rootPath, err) } } return rootPath, nil } // ArchiveTo creates a charm file from the charm expanded in dir. // By convention a charm archive should have a ".charm" suffix. func (dir *CharmDir) ArchiveTo(w io.Writer) error { return writeArchive(w, dir.Path, dir.revision, dir.Meta().Hooks()) } func writeArchive(w io.Writer, path string, revision int, hooks map[string]bool) error { zipw := zip.NewWriter(w) defer zipw.Close() // The root directory may be symlinked elsewhere so // resolve that before creating the zip. rootPath, err := resolveSymlinkedRoot(path) if err != nil { return err } zp := zipPacker{zipw, rootPath, hooks} if revision != -1 { zp.AddRevision(revision) } return filepath.Walk(rootPath, zp.WalkFunc()) } type zipPacker struct { *zip.Writer root string hooks map[string]bool } func (zp *zipPacker) WalkFunc() filepath.WalkFunc { return func(path string, fi os.FileInfo, err error) error { return zp.visit(path, fi, err) } } func (zp *zipPacker) AddRevision(revision int) error { h := &zip.FileHeader{Name: "revision"} h.SetMode(syscall.S_IFREG | 0644) w, err := zp.CreateHeader(h) if err == nil { _, err = w.Write([]byte(strconv.Itoa(revision))) } return err } func (zp *zipPacker) visit(path string, fi os.FileInfo, err error) error { if err != nil { return err } relpath, err := filepath.Rel(zp.root, path) if err != nil { return err } method := zip.Deflate hidden := len(relpath) > 1 && relpath[0] == '.' if fi.IsDir() { if relpath == "build" { return filepath.SkipDir } if hidden { return filepath.SkipDir } relpath += "/" method = zip.Store } mode := fi.Mode() if err := checkFileType(relpath, mode); err != nil { return err } if mode&os.ModeSymlink != 0 { method = zip.Store } if hidden || relpath == "revision" { return nil } h := &zip.FileHeader{ Name: relpath, Method: method, } perm := os.FileMode(0644) if mode&os.ModeSymlink != 0 { perm = 0777 } else if mode&0100 != 0 { perm = 0755 } if filepath.Dir(relpath) == "hooks" { hookName := filepath.Base(relpath) if _, ok := zp.hooks[hookName]; ok && !fi.IsDir() && mode&0100 == 0 { logger.Warningf("making %q executable in charm", path) perm = perm | 0100 } } h.SetMode(mode&^0777 | perm) w, err := zp.CreateHeader(h) if err != nil || fi.IsDir() { return err } var data []byte if mode&os.ModeSymlink != 0 { target, err := os.Readlink(path) if err != nil { return err } if err := checkSymlinkTarget(zp.root, relpath, target); err != nil { return err } data = []byte(target) _, err = w.Write(data) } else { file, err := os.Open(path) if err != nil { return err } defer file.Close() _, err = io.Copy(w, file) } return err } func checkSymlinkTarget(basedir, symlink, target string) error { if filepath.IsAbs(target) { return fmt.Errorf("symlink %q is absolute: %q", symlink, target) } p := filepath.Join(filepath.Dir(symlink), target) if p == ".." || strings.HasPrefix(p, "../") { return fmt.Errorf("symlink %q links out of charm: %q", symlink, target) } return nil } func checkFileType(path string, mode os.FileMode) error { e := "file has an unknown type: %q" switch mode & os.ModeType { case os.ModeDir, os.ModeSymlink, 0: return nil case os.ModeNamedPipe: e = "file is a named pipe: %q" case os.ModeSocket: e = "file is a socket: %q" case os.ModeDevice: e = "file is a device: %q" } return fmt.Errorf(e, path) } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/payloads_test.go0000664000175000017500000000363612672604527023537 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm_test import ( jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" ) var _ = gc.Suite(&payloadClassSuite{}) type payloadClassSuite struct{} func (s *payloadClassSuite) TestParsePayloadClassOkay(c *gc.C) { name := "my-payload" data := map[string]interface{}{ "type": "docker", } payloadClass := charm.ParsePayloadClass(name, data) c.Check(payloadClass, jc.DeepEquals, charm.PayloadClass{ Name: "my-payload", Type: "docker", }) } func (s *payloadClassSuite) TestParsePayloadClassMissingName(c *gc.C) { name := "" data := map[string]interface{}{ "type": "docker", } payloadClass := charm.ParsePayloadClass(name, data) c.Check(payloadClass, jc.DeepEquals, charm.PayloadClass{ Name: "", Type: "docker", }) } func (s *payloadClassSuite) TestParsePayloadClassEmpty(c *gc.C) { name := "my-payload" var data map[string]interface{} payloadClass := charm.ParsePayloadClass(name, data) c.Check(payloadClass, jc.DeepEquals, charm.PayloadClass{ Name: "my-payload", }) } func (s *payloadClassSuite) TestValidateFull(c *gc.C) { payloadClass := charm.PayloadClass{ Name: "my-payload", Type: "docker", } err := payloadClass.Validate() c.Check(err, jc.ErrorIsNil) } func (s *payloadClassSuite) TestValidateZeroValue(c *gc.C) { var payloadClass charm.PayloadClass err := payloadClass.Validate() c.Check(err, gc.NotNil) } func (s *payloadClassSuite) TestValidateMissingName(c *gc.C) { payloadClass := charm.PayloadClass{ Type: "docker", } err := payloadClass.Validate() c.Check(err, gc.ErrorMatches, `payload class missing name`) } func (s *payloadClassSuite) TestValidateMissingType(c *gc.C) { payloadClass := charm.PayloadClass{ Name: "my-payload", } err := payloadClass.Validate() c.Check(err, gc.ErrorMatches, `payload class missing type`) } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/extra_bindings.go0000664000175000017500000000417012672604527023656 0ustar marcomarco// Copyright 2016 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm import ( "fmt" "strings" "github.com/juju/schema" "github.com/juju/utils/set" ) // ExtraBinding represents an extra bindable endpoint that is not a relation. type ExtraBinding struct { Name string `bson:"name" json:"Name"` } // When specified, the "extra-bindings" section in the metadata.yaml // should have the following format: // // extra-bindings: // "": // ... // Endpoint names are strings and must not match existing relation names from // the Provides, Requires, or Peers metadata sections. The values beside each // endpoint name must be left out (i.e. "foo": is invalid). var extraBindingsSchema = schema.Map(schema.NonEmptyString("binding name"), schema.Nil("")) func parseMetaExtraBindings(data interface{}) (map[string]ExtraBinding, error) { if data == nil { return nil, nil } bindingsMap := data.(map[interface{}]interface{}) result := make(map[string]ExtraBinding) for name, _ := range bindingsMap { stringName := name.(string) result[stringName] = ExtraBinding{Name: stringName} } return result, nil } func validateMetaExtraBindings(meta Meta) error { extraBindings := meta.ExtraBindings if extraBindings == nil { return nil } else if len(extraBindings) == 0 { return fmt.Errorf("extra bindings cannot be empty when specified") } usedExtraNames := set.NewStrings() for name, binding := range extraBindings { if binding.Name == "" || name == "" { return fmt.Errorf("missing binding name") } if binding.Name != name { return fmt.Errorf("mismatched extra binding name: got %q, expected %q", binding.Name, name) } usedExtraNames.Add(name) } usedRelationNames := set.NewStrings() for relationName, _ := range meta.CombinedRelations() { usedRelationNames.Add(relationName) } notAllowedNames := usedExtraNames.Intersection(usedRelationNames) if !notAllowedNames.IsEmpty() { notAllowedList := strings.Join(notAllowedNames.SortedValues(), ", ") return fmt.Errorf("relation names (%s) cannot be used in extra bindings", notAllowedList) } return nil } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/meta.go0000664000175000017500000006035012672604527021606 0ustar marcomarco// Copyright 2011, 2012, 2013 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm import ( "fmt" "io" "io/ioutil" "regexp" "strconv" "strings" "github.com/juju/schema" "github.com/juju/utils" "github.com/juju/version" "gopkg.in/yaml.v1" "github.com/juju/errors" "gopkg.in/juju/charm.v6-unstable/hooks" "gopkg.in/juju/charm.v6-unstable/resource" ) // RelationScope describes the scope of a relation. type RelationScope string // Note that schema doesn't support custom string types, // so when we use these values in a schema.Checker, // we must store them as strings, not RelationScopes. const ( ScopeGlobal RelationScope = "global" ScopeContainer RelationScope = "container" ) // RelationRole defines the role of a relation. type RelationRole string const ( RoleProvider RelationRole = "provider" RoleRequirer RelationRole = "requirer" RolePeer RelationRole = "peer" ) // StorageType defines a storage type. type StorageType string const ( StorageBlock StorageType = "block" StorageFilesystem StorageType = "filesystem" ) // Storage represents a charm's storage requirement. type Storage struct { // Name is the name of the store. // // Name has no default, and must be specified. Name string `bson:"name"` // Description is a description of the store. // // Description has no default, and is optional. Description string `bson:"description"` // Type is the storage type: filesystem or block-device. // // Type has no default, and must be specified. Type StorageType `bson:"type"` // Shared indicates that the storage is shared between all units of // a service deployed from the charm. It is an error to attempt to // assign non-shareable storage to a "shared" storage requirement. // // Shared defaults to false. Shared bool `bson:"shared"` // ReadOnly indicates that the storage should be made read-only if // possible. If the storage cannot be made read-only, Juju will warn // the user. // // ReadOnly defaults to false. ReadOnly bool `bson:"read-only"` // CountMin is the number of storage instances that must be attached // to the charm for it to be useful; the charm will not install until // this number has been satisfied. This must be a non-negative number. // // CountMin defaults to 1 for singleton stores. CountMin int `bson:"countmin"` // CountMax is the largest number of storage instances that can be // attached to the charm. If CountMax is -1, then there is no upper // bound. // // CountMax defaults to 1 for singleton stores. CountMax int `bson:"countmax"` // MinimumSize is the minimum size of store that the charm needs to // work at all. This is not a recommended size or a comfortable size // or a will-work-well size, just a bare minimum below which the charm // is going to break. // MinimumSize requires a unit, one of MGTPEZY, and is stored as MiB. // // There is no default MinimumSize; if left unspecified, a provider // specific default will be used, typically 1GB for block storage. MinimumSize uint64 `bson:"minimum-size"` // Location is the mount location for filesystem stores. For multi- // stores, the location acts as the parent directory for each mounted // store. // // Location has no default, and is optional. Location string `bson:"location,omitempty"` // Properties allow the charm author to characterise the relative storage // performance requirements and sensitivities for each store. // eg “transient†is used to indicate that non persistent storage is acceptable, // such as tmpfs or ephemeral instance disks. // // Properties has no default, and is optional. Properties []string `bson:"properties,omitempty"` } // Relation represents a single relation defined in the charm // metadata.yaml file. type Relation struct { Name string `bson:"name"` Role RelationRole `bson:"role"` Interface string `bson:"interface"` Optional bool `bson:"optional"` Limit int `bson:"limit"` Scope RelationScope `bson:"scope"` } // ImplementedBy returns whether the relation is implemented by the supplied charm. func (r Relation) ImplementedBy(ch Charm) bool { if r.IsImplicit() { return true } var m map[string]Relation switch r.Role { case RoleProvider: m = ch.Meta().Provides case RoleRequirer: m = ch.Meta().Requires case RolePeer: m = ch.Meta().Peers default: panic(fmt.Errorf("unknown relation role %q", r.Role)) } rel, found := m[r.Name] if !found { return false } if rel.Interface == r.Interface { switch r.Scope { case ScopeGlobal: return rel.Scope != ScopeContainer case ScopeContainer: return true default: panic(fmt.Errorf("unknown relation scope %q", r.Scope)) } } return false } // IsImplicit returns whether the relation is supplied by juju itself, // rather than by a charm. func (r Relation) IsImplicit() bool { return (r.Name == "juju-info" && r.Interface == "juju-info" && r.Role == RoleProvider) } // Meta represents all the known content that may be defined // within a charm's metadata.yaml file. // Note: Series is serialised for backward compatibility // as "supported-series" because a previous // charm version had an incompatible Series field that // was unused in practice but still serialized. This // only applies to JSON because Meta has a custom // YAML marshaller. type Meta struct { Name string `bson:"name" json:"Name"` Summary string `bson:"summary" json:"Summary"` Description string `bson:"description" json:"Description"` Subordinate bool `bson:"subordinate" json:"Subordinate"` Provides map[string]Relation `bson:"provides,omitempty" json:"Provides,omitempty"` Requires map[string]Relation `bson:"requires,omitempty" json:"Requires,omitempty"` Peers map[string]Relation `bson:"peers,omitempty" json:"Peers,omitempty"` ExtraBindings map[string]ExtraBinding `bson:"extra-bindings,omitempty" json:"ExtraBindings,omitempty"` Format int `bson:"format,omitempty" json:"Format,omitempty"` OldRevision int `bson:"oldrevision,omitempty" json:"OldRevision"` // Obsolete Categories []string `bson:"categories,omitempty" json:"Categories,omitempty"` Tags []string `bson:"tags,omitempty" json:"Tags,omitempty"` Series []string `bson:"series,omitempty" json:"SupportedSeries,omitempty"` Storage map[string]Storage `bson:"storage,omitempty" json:"Storage,omitempty"` PayloadClasses map[string]PayloadClass `bson:"payloadclasses,omitempty" json:"PayloadClasses,omitempty"` Resources map[string]resource.Meta `bson:"resources,omitempty" json:"Resources,omitempty"` Terms []string `bson:"terms,omitempty" json:"Terms,omitempty"` MinJujuVersion version.Number `bson:"min-juju-version,omitempty" json:"min-juju-version,omitempty"` } func generateRelationHooks(relName string, allHooks map[string]bool) { for _, hookName := range hooks.RelationHooks() { allHooks[fmt.Sprintf("%s-%s", relName, hookName)] = true } } // Hooks returns a map of all possible valid hooks, taking relations // into account. It's a map to enable fast lookups, and the value is // always true. func (m Meta) Hooks() map[string]bool { allHooks := make(map[string]bool) // Unit hooks for _, hookName := range hooks.UnitHooks() { allHooks[string(hookName)] = true } // Relation hooks for hookName := range m.Provides { generateRelationHooks(hookName, allHooks) } for hookName := range m.Requires { generateRelationHooks(hookName, allHooks) } for hookName := range m.Peers { generateRelationHooks(hookName, allHooks) } return allHooks } // Used for parsing Categories and Tags. func parseStringList(list interface{}) []string { if list == nil { return nil } slice := list.([]interface{}) result := make([]string, 0, len(slice)) for _, elem := range slice { result = append(result, elem.(string)) } return result } var termNameRE = regexp.MustCompile("^[a-z]+([a-z0-9-]+)/[0-9]+?$") func checkTerm(s string) error { match := termNameRE.FindStringSubmatch(s) if match == nil { return fmt.Errorf("invalid term name %q: must match %s", s, termNameRE.String()) } return nil } // ReadMeta reads the content of a metadata.yaml file and returns // its representation. func ReadMeta(r io.Reader) (meta *Meta, err error) { data, err := ioutil.ReadAll(r) if err != nil { return } raw := make(map[interface{}]interface{}) err = yaml.Unmarshal(data, raw) if err != nil { return } v, err := charmSchema.Coerce(raw, nil) if err != nil { return nil, errors.New("metadata: " + err.Error()) } m := v.(map[string]interface{}) meta, err = parseMeta(m) if err != nil { return nil, err } if err := meta.Check(); err != nil { return nil, err } // TODO(ericsnow) This line should be moved into parseMeta as soon // as the terms code gets fixed. meta.Terms = parseStringList(m["terms"]) return meta, nil } func parseMeta(m map[string]interface{}) (*Meta, error) { var meta Meta var err error meta.Name = m["name"].(string) // Schema decodes as int64, but the int range should be good // enough for revisions. meta.Summary = m["summary"].(string) meta.Description = m["description"].(string) meta.Provides = parseRelations(m["provides"], RoleProvider) meta.Requires = parseRelations(m["requires"], RoleRequirer) meta.Peers = parseRelations(m["peers"], RolePeer) meta.ExtraBindings, err = parseMetaExtraBindings(m["extra-bindings"]) if err != nil { return nil, err } meta.Format = int(m["format"].(int64)) meta.Categories = parseStringList(m["categories"]) meta.Tags = parseStringList(m["tags"]) if subordinate := m["subordinate"]; subordinate != nil { meta.Subordinate = subordinate.(bool) } if rev := m["revision"]; rev != nil { // Obsolete meta.OldRevision = int(m["revision"].(int64)) } meta.Series = parseStringList(m["series"]) meta.Storage = parseStorage(m["storage"]) meta.PayloadClasses = parsePayloadClasses(m["payloads"]) if ver := m["min-juju-version"]; ver != nil { minver, err := version.Parse(ver.(string)) if err != nil { return &meta, errors.Annotate(err, "invalid min-juju-version") } meta.MinJujuVersion = minver } resources, err := parseMetaResources(m["resources"]) if err != nil { return nil, err } meta.Resources = resources return &meta, nil } // MarshalYAML implements yaml.Marshaler (yaml.v2). func (m Meta) MarshalYAML() (interface{}, error) { var minver string if m.MinJujuVersion != version.Zero { minver = m.MinJujuVersion.String() } return struct { Name string `yaml:"name"` Summary string `yaml:"summary"` Description string `yaml:"description"` Provides map[string]marshaledRelation `yaml:"provides,omitempty"` Requires map[string]marshaledRelation `yaml:"requires,omitempty"` Peers map[string]marshaledRelation `yaml:"peers,omitempty"` ExtraBindings map[string]interface{} `yaml:"extra-bindings,omitempty"` Categories []string `yaml:"categories,omitempty"` Tags []string `yaml:"tags,omitempty"` Subordinate bool `yaml:"subordinate,omitempty"` Series []string `yaml:"series,omitempty"` Terms []string `yaml:"terms,omitempty"` MinJujuVersion string `yaml:"min-juju-version,omitempty"` }{ Name: m.Name, Summary: m.Summary, Description: m.Description, Provides: marshaledRelations(m.Provides), Requires: marshaledRelations(m.Requires), Peers: marshaledRelations(m.Peers), ExtraBindings: marshaledExtraBindings(m.ExtraBindings), Categories: m.Categories, Tags: m.Tags, Subordinate: m.Subordinate, Series: m.Series, Terms: m.Terms, MinJujuVersion: minver, }, nil } // GetYAML implements yaml.Getter.GetYAML (yaml.v1). func (m Meta) GetYAML() (tag string, value interface{}) { v, _ := m.MarshalYAML() return "", v } func marshaledRelations(relations map[string]Relation) map[string]marshaledRelation { marshaled := make(map[string]marshaledRelation) for name, relation := range relations { marshaled[name] = marshaledRelation(relation) } return marshaled } type marshaledRelation Relation func (r marshaledRelation) GetYAML() (tag string, value interface{}) { // See calls to ifaceExpander in charmSchema. noLimit := 1 if r.Role == RoleProvider { noLimit = 0 } if !r.Optional && r.Limit == noLimit && r.Scope == ScopeGlobal { // All attributes are default, so use the simple string form of the relation. return "", r.Interface } mr := struct { Interface string `yaml:"interface"` Limit *int `yaml:"limit,omitempty"` Optional bool `yaml:"optional,omitempty"` Scope RelationScope `yaml:"scope,omitempty"` }{ Interface: r.Interface, Optional: r.Optional, } if r.Limit != noLimit { mr.Limit = &r.Limit } if r.Scope != ScopeGlobal { mr.Scope = r.Scope } return "", mr } func marshaledExtraBindings(bindings map[string]ExtraBinding) map[string]interface{} { marshaled := make(map[string]interface{}) for _, binding := range bindings { marshaled[binding.Name] = nil } return marshaled } // Check checks that the metadata is well-formed. func (meta Meta) Check() error { // Check for duplicate or forbidden relation names or interfaces. names := map[string]bool{} checkRelations := func(src map[string]Relation, role RelationRole) error { for name, rel := range src { if rel.Name != name { return fmt.Errorf("charm %q has mismatched relation name %q; expected %q", meta.Name, rel.Name, name) } if rel.Role != role { return fmt.Errorf("charm %q has mismatched role %q; expected %q", meta.Name, rel.Role, role) } // Container-scoped require relations on subordinates are allowed // to use the otherwise-reserved juju-* namespace. if !meta.Subordinate || role != RoleRequirer || rel.Scope != ScopeContainer { if reservedName(name) { return fmt.Errorf("charm %q using a reserved relation name: %q", meta.Name, name) } } if role != RoleRequirer { if reservedName(rel.Interface) { return fmt.Errorf("charm %q relation %q using a reserved interface: %q", meta.Name, name, rel.Interface) } } if names[name] { return fmt.Errorf("charm %q using a duplicated relation name: %q", meta.Name, name) } names[name] = true } return nil } if err := checkRelations(meta.Provides, RoleProvider); err != nil { return err } if err := checkRelations(meta.Requires, RoleRequirer); err != nil { return err } if err := checkRelations(meta.Peers, RolePeer); err != nil { return err } if err := validateMetaExtraBindings(meta); err != nil { return fmt.Errorf("charm %q has invalid extra bindings: %v", meta.Name, err) } // Subordinate charms must have at least one relation that // has container scope, otherwise they can't relate to the // principal. if meta.Subordinate { valid := false if meta.Requires != nil { for _, relationData := range meta.Requires { if relationData.Scope == ScopeContainer { valid = true break } } } if !valid { return fmt.Errorf("subordinate charm %q lacks \"requires\" relation with container scope", meta.Name) } } for _, series := range meta.Series { if !IsValidSeries(series) { return fmt.Errorf("charm %q declares invalid series: %q", meta.Name, series) } } names = make(map[string]bool) for name, store := range meta.Storage { if store.Location != "" && store.Type != StorageFilesystem { return fmt.Errorf(`charm %q storage %q: location may not be specified for "type: %s"`, meta.Name, name, store.Type) } if store.Type == "" { return fmt.Errorf("charm %q storage %q: type must be specified", meta.Name, name) } if store.CountMin < 0 { return fmt.Errorf("charm %q storage %q: invalid minimum count %d", meta.Name, name, store.CountMin) } if store.CountMax == 0 || store.CountMax < -1 { return fmt.Errorf("charm %q storage %q: invalid maximum count %d", meta.Name, name, store.CountMax) } if names[name] { return fmt.Errorf("charm %q storage %q: duplicated storage name", meta.Name, name) } names[name] = true } for name, payloadClass := range meta.PayloadClasses { if payloadClass.Name != name { return fmt.Errorf("mismatch on payload class name (%q != %q)", payloadClass.Name, name) } if err := payloadClass.Validate(); err != nil { return err } } if err := validateMetaResources(meta.Resources); err != nil { return err } for _, term := range meta.Terms { if terr := checkTerm(term); terr != nil { return terr } } return nil } func reservedName(name string) bool { return name == "juju" || strings.HasPrefix(name, "juju-") } func parseRelations(relations interface{}, role RelationRole) map[string]Relation { if relations == nil { return nil } result := make(map[string]Relation) for name, rel := range relations.(map[string]interface{}) { relMap := rel.(map[string]interface{}) relation := Relation{ Name: name, Role: role, Interface: relMap["interface"].(string), Optional: relMap["optional"].(bool), } if scope := relMap["scope"]; scope != nil { relation.Scope = RelationScope(scope.(string)) } if relMap["limit"] != nil { // Schema defaults to int64, but we know // the int range should be more than enough. relation.Limit = int(relMap["limit"].(int64)) } result[name] = relation } return result } // CombinedRelations returns all defined relations, regardless of their type in // a single map. func (m Meta) CombinedRelations() map[string]Relation { combined := make(map[string]Relation) for name, relation := range m.Provides { combined[name] = relation } for name, relation := range m.Requires { combined[name] = relation } for name, relation := range m.Peers { combined[name] = relation } return combined } // Schema coercer that expands the interface shorthand notation. // A consistent format is easier to work with than considering the // potential difference everywhere. // // Supports the following variants:: // // provides: // server: riak // admin: http // foobar: // interface: blah // // provides: // server: // interface: mysql // limit: // optional: false // // In all input cases, the output is the fully specified interface // representation as seen in the mysql interface description above. func ifaceExpander(limit interface{}) schema.Checker { return ifaceExpC{limit} } type ifaceExpC struct { limit interface{} } var ( stringC = schema.String() mapC = schema.StringMap(schema.Any()) ) func (c ifaceExpC) Coerce(v interface{}, path []string) (newv interface{}, err error) { s, err := stringC.Coerce(v, path) if err == nil { newv = map[string]interface{}{ "interface": s, "limit": c.limit, "optional": false, "scope": string(ScopeGlobal), } return } v, err = mapC.Coerce(v, path) if err != nil { return } m := v.(map[string]interface{}) if _, ok := m["limit"]; !ok { m["limit"] = c.limit } return ifaceSchema.Coerce(m, path) } var ifaceSchema = schema.FieldMap( schema.Fields{ "interface": schema.String(), "limit": schema.OneOf(schema.Const(nil), schema.Int()), "scope": schema.OneOf(schema.Const(string(ScopeGlobal)), schema.Const(string(ScopeContainer))), "optional": schema.Bool(), }, schema.Defaults{ "scope": string(ScopeGlobal), "optional": false, }, ) func parseStorage(stores interface{}) map[string]Storage { if stores == nil { return nil } result := make(map[string]Storage) for name, store := range stores.(map[string]interface{}) { storeMap := store.(map[string]interface{}) store := Storage{ Name: name, Type: StorageType(storeMap["type"].(string)), Shared: storeMap["shared"].(bool), ReadOnly: storeMap["read-only"].(bool), CountMin: 1, CountMax: 1, } if desc, ok := storeMap["description"].(string); ok { store.Description = desc } if multiple, ok := storeMap["multiple"].(map[string]interface{}); ok { if r, ok := multiple["range"].([2]int); ok { store.CountMin, store.CountMax = r[0], r[1] } } if minSize, ok := storeMap["minimum-size"].(uint64); ok { store.MinimumSize = minSize } if loc, ok := storeMap["location"].(string); ok { store.Location = loc } if properties, ok := storeMap["properties"].([]interface{}); ok { for _, p := range properties { store.Properties = append(store.Properties, p.(string)) } } result[name] = store } return result } var storageSchema = schema.FieldMap( schema.Fields{ "type": schema.OneOf(schema.Const(string(StorageBlock)), schema.Const(string(StorageFilesystem))), "shared": schema.Bool(), "read-only": schema.Bool(), "multiple": schema.FieldMap( schema.Fields{ "range": storageCountC{}, // m, m-n, m+, m- }, schema.Defaults{}, ), "minimum-size": storageSizeC{}, "location": schema.String(), "description": schema.String(), "properties": schema.List(propertiesC{}), }, schema.Defaults{ "shared": false, "read-only": false, "multiple": schema.Omit, "location": schema.Omit, "description": schema.Omit, "properties": schema.Omit, "minimum-size": schema.Omit, }, ) type storageCountC struct{} var storageCountRE = regexp.MustCompile("^([0-9]+)([-+]|-[0-9]+)$") func (c storageCountC) Coerce(v interface{}, path []string) (newv interface{}, err error) { s, err := schema.OneOf(schema.Int(), stringC).Coerce(v, path) if err != nil { return nil, err } if m, ok := s.(int64); ok { // We've got a count of the form "m": m represents // both the minimum and maximum. if m <= 0 { return nil, fmt.Errorf("%s: invalid count %v", strings.Join(path[1:], ""), m) } return [2]int{int(m), int(m)}, nil } match := storageCountRE.FindStringSubmatch(s.(string)) if match == nil { return nil, fmt.Errorf("%s: value %q does not match 'm', 'm-n', or 'm+'", strings.Join(path[1:], ""), s) } var m, n int if m, err = strconv.Atoi(match[1]); err != nil { return nil, err } if len(match[2]) == 1 { // We've got a count of the form "m+" or "m-": // m represents the minimum, and there is no // upper bound. n = -1 } else { if n, err = strconv.Atoi(match[2][1:]); err != nil { return nil, err } } return [2]int{m, n}, nil } type storageSizeC struct{} func (c storageSizeC) Coerce(v interface{}, path []string) (newv interface{}, err error) { s, err := schema.String().Coerce(v, path) if err != nil { return nil, err } return utils.ParseSize(s.(string)) } type propertiesC struct{} func (c propertiesC) Coerce(v interface{}, path []string) (newv interface{}, err error) { return schema.OneOf(schema.Const("transient")).Coerce(v, path) } var charmSchema = schema.FieldMap( schema.Fields{ "name": schema.String(), "summary": schema.String(), "description": schema.String(), "peers": schema.StringMap(ifaceExpander(int64(1))), "provides": schema.StringMap(ifaceExpander(nil)), "requires": schema.StringMap(ifaceExpander(int64(1))), "extra-bindings": extraBindingsSchema, "revision": schema.Int(), // Obsolete "format": schema.Int(), "subordinate": schema.Bool(), "categories": schema.List(schema.String()), "tags": schema.List(schema.String()), "series": schema.List(schema.String()), "storage": schema.StringMap(storageSchema), "payloads": schema.StringMap(payloadClassSchema), "resources": schema.StringMap(resourceSchema), "terms": schema.List(schema.String()), "min-juju-version": schema.String(), }, schema.Defaults{ "provides": schema.Omit, "requires": schema.Omit, "peers": schema.Omit, "extra-bindings": schema.Omit, "revision": schema.Omit, "format": 1, "subordinate": schema.Omit, "categories": schema.Omit, "tags": schema.Omit, "series": schema.Omit, "storage": schema.Omit, "payloads": schema.Omit, "resources": schema.Omit, "terms": schema.Omit, "min-juju-version": schema.Omit, }, ) charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/url_test.go0000664000175000017500000004556412672604527022533 0ustar marcomarco// Copyright 2011, 2012, 2013 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm_test import ( "encoding/json" "fmt" "regexp" "strings" gc "gopkg.in/check.v1" "gopkg.in/mgo.v2/bson" "gopkg.in/juju/charm.v6-unstable" ) type URLSuite struct{} var _ = gc.Suite(&URLSuite{}) var urlTests = []struct { s, err string exact string url *charm.URL }{{ s: "cs:~user/series/name", url: &charm.URL{"cs", "user", "name", -1, "series", ""}, }, { s: "cs:~user/series/name-0", url: &charm.URL{"cs", "user", "name", 0, "series", ""}, }, { s: "cs:series/name", url: &charm.URL{"cs", "", "name", -1, "series", ""}, }, { s: "cs:series/name-42", url: &charm.URL{"cs", "", "name", 42, "series", ""}, }, { s: "local:series/name-1", url: &charm.URL{"local", "", "name", 1, "series", ""}, }, { s: "local:series/name", url: &charm.URL{"local", "", "name", -1, "series", ""}, }, { s: "local:series/n0-0n-n0", url: &charm.URL{"local", "", "n0-0n-n0", -1, "series", ""}, }, { s: "cs:~user/name", url: &charm.URL{"cs", "user", "name", -1, "", ""}, }, { s: "cs:name", url: &charm.URL{"cs", "", "name", -1, "", ""}, }, { s: "local:name", url: &charm.URL{"local", "", "name", -1, "", ""}, }, { s: "cs:~user/development/series/name-0", url: &charm.URL{"cs", "user", "name", 0, "series", charm.DevelopmentChannel}, }, { s: "cs:~user/development/series/name-0", url: &charm.URL{"cs", "user", "name", 0, "series", charm.DevelopmentChannel}, }, { s: "cs:development/series/name", url: &charm.URL{"cs", "", "name", -1, "series", charm.DevelopmentChannel}, }, { s: "cs:development/series/name-42", url: &charm.URL{"cs", "", "name", 42, "series", charm.DevelopmentChannel}, }, { s: "cs:~user/development/name", url: &charm.URL{"cs", "user", "name", -1, "", charm.DevelopmentChannel}, }, { s: "cs:development/name", url: &charm.URL{"cs", "", "name", -1, "", charm.DevelopmentChannel}, }, { s: "http://jujucharms.com/u/user/name/series/1", url: &charm.URL{"cs", "user", "name", 1, "series", ""}, exact: "cs:~user/series/name-1", }, { s: "http://www.jujucharms.com/u/user/name/series/1", url: &charm.URL{"cs", "user", "name", 1, "series", ""}, exact: "cs:~user/series/name-1", }, { s: "https://www.jujucharms.com/u/user/name/series/1", url: &charm.URL{"cs", "user", "name", 1, "series", ""}, exact: "cs:~user/series/name-1", }, { s: "https://jujucharms.com/u/user/name/series/1", url: &charm.URL{"cs", "user", "name", 1, "series", ""}, exact: "cs:~user/series/name-1", }, { s: "https://jujucharms.com/u/user/name/series", url: &charm.URL{"cs", "user", "name", -1, "series", ""}, exact: "cs:~user/series/name", }, { s: "https://jujucharms.com/u/user/name/1", url: &charm.URL{"cs", "user", "name", 1, "", ""}, exact: "cs:~user/name-1", }, { s: "https://jujucharms.com/u/user/name", url: &charm.URL{"cs", "user", "name", -1, "", ""}, exact: "cs:~user/name", }, { s: "https://jujucharms.com/name", url: &charm.URL{"cs", "", "name", -1, "", ""}, exact: "cs:name", }, { s: "https://jujucharms.com/name/series", url: &charm.URL{"cs", "", "name", -1, "series", ""}, exact: "cs:series/name", }, { s: "https://jujucharms.com/name/1", url: &charm.URL{"cs", "", "name", 1, "", ""}, exact: "cs:name-1", }, { s: "https://jujucharms.com/name/series/1", url: &charm.URL{"cs", "", "name", 1, "series", ""}, exact: "cs:series/name-1", }, { s: "https://jujucharms.com/u/user/name/series/1/", url: &charm.URL{"cs", "user", "name", 1, "series", ""}, exact: "cs:~user/series/name-1", }, { s: "https://jujucharms.com/u/user/name/series/", url: &charm.URL{"cs", "user", "name", -1, "series", ""}, exact: "cs:~user/series/name", }, { s: "https://jujucharms.com/u/user/name/1/", url: &charm.URL{"cs", "user", "name", 1, "", ""}, exact: "cs:~user/name-1", }, { s: "https://jujucharms.com/u/user/name/", url: &charm.URL{"cs", "user", "name", -1, "", ""}, exact: "cs:~user/name", }, { s: "https://jujucharms.com/name/", url: &charm.URL{"cs", "", "name", -1, "", ""}, exact: "cs:name", }, { s: "https://jujucharms.com/name/series/", url: &charm.URL{"cs", "", "name", -1, "series", ""}, exact: "cs:series/name", }, { s: "https://jujucharms.com/name/1/", url: &charm.URL{"cs", "", "name", 1, "", ""}, exact: "cs:name-1", }, { s: "https://jujucharms.com/name/series/1/", url: &charm.URL{"cs", "", "name", 1, "series", ""}, exact: "cs:series/name-1", }, { s: "https://jujucharms.com/u/user/development/name/series/1", url: &charm.URL{"cs", "user", "name", 1, "series", charm.DevelopmentChannel}, exact: "cs:~user/development/series/name-1", }, { s: "https://jujucharms.com/u/user/development/name/series", url: &charm.URL{"cs", "user", "name", -1, "series", charm.DevelopmentChannel}, exact: "cs:~user/development/series/name", }, { s: "https://jujucharms.com/u/user/development/name/1", url: &charm.URL{"cs", "user", "name", 1, "", charm.DevelopmentChannel}, exact: "cs:~user/development/name-1", }, { s: "https://jujucharms.com/u/user/development/name", url: &charm.URL{"cs", "user", "name", -1, "", charm.DevelopmentChannel}, exact: "cs:~user/development/name", }, { s: "https://jujucharms.com/development/name", url: &charm.URL{"cs", "", "name", -1, "", charm.DevelopmentChannel}, exact: "cs:development/name", }, { s: "https://jujucharms.com/development/name/series", url: &charm.URL{"cs", "", "name", -1, "series", charm.DevelopmentChannel}, exact: "cs:development/series/name", }, { s: "https://jujucharms.com/development/name/1", url: &charm.URL{"cs", "", "name", 1, "", charm.DevelopmentChannel}, exact: "cs:development/name-1", }, { s: "https://jujucharms.com/development/name/series/1", url: &charm.URL{"cs", "", "name", 1, "series", charm.DevelopmentChannel}, exact: "cs:development/series/name-1", }, { s: "https://jujucharms.com/u/user/development/name/series/", url: &charm.URL{"cs", "user", "name", -1, "series", charm.DevelopmentChannel}, exact: "cs:~user/development/series/name", }, { s: "https://jujucharms.com/u/user/development/name/1/", url: &charm.URL{"cs", "user", "name", 1, "", charm.DevelopmentChannel}, exact: "cs:~user/development/name-1", }, { s: "https://jujucharms.com/u/user/development/name/", url: &charm.URL{"cs", "user", "name", -1, "", charm.DevelopmentChannel}, exact: "cs:~user/development/name", }, { s: "https://jujucharms.com/", err: `URL has invalid charm or bundle name: $URL`, }, { s: "https://jujucharms.com/bad.wolf", err: `URL has invalid charm or bundle name: $URL`, }, { s: "https://jujucharms.com/u/", err: "charm or bundle URL $URL malformed, expected \"/u//\"", }, { s: "https://jujucharms.com/u/badwolf", err: "charm or bundle URL $URL malformed, expected \"/u//\"", }, { s: "https://jujucharms.com/name/series/badwolf", err: "charm or bundle URL has malformed revision: \"badwolf\" in $URL", }, { s: "https://jujucharms.com/name/bad.wolf/42", err: `charm or bundle URL has invalid series: $URL`, }, { s: "https://badwolf@jujucharms.com/name/series/42", err: `charm or bundle URL $URL has unrecognized parts`, }, { s: "https://jujucharms.com/name/series/42#bad-wolf", err: `charm or bundle URL $URL has unrecognized parts`, }, { s: "https://jujucharms.com/name/series/42?bad=wolf", err: `charm or bundle URL $URL has unrecognized parts`, }, { s: "bs:~user/series/name-1", err: `charm or bundle URL has invalid schema: $URL`, }, { s: ":foo", err: `cannot parse charm or bundle URL: $URL`, }, { s: "cs:~1/series/name-1", err: `charm or bundle URL has invalid user name: $URL`, }, { s: "cs:~user", err: `URL without charm or bundle name: $URL`, }, { s: "cs:~user/1/name-1", err: `charm or bundle URL has invalid series: $URL`, }, { s: "cs:~user/series/name-1-2", err: `URL has invalid charm or bundle name: $URL`, }, { s: "cs:~user/series/name-1-name-2", err: `URL has invalid charm or bundle name: $URL`, }, { s: "cs:~user/series/name--name-2", err: `URL has invalid charm or bundle name: $URL`, }, { s: "cs:foo-1-2", err: `URL has invalid charm or bundle name: $URL`, }, { s: "cs:~user/series/huh/name-1", err: `charm or bundle URL has invalid form: $URL`, }, { s: "cs:~user/production/series/name-1", err: `charm or bundle URL has invalid form: $URL`, }, { s: "cs:~user/development/series/badwolf/name-1", err: `charm or bundle URL has invalid form: $URL`, }, { s: "cs:/name", err: `charm or bundle URL has invalid series: $URL`, }, { s: "local:~user/series/name", err: `local charm or bundle URL with user name: $URL`, }, { s: "local:~user/name", err: `local charm or bundle URL with user name: $URL`, }, { s: "local:development/name", err: `local charm or bundle URL with channel: $URL`, }, { s: "local:development/series/name-1", err: `local charm or bundle URL with channel: $URL`, }, { s: "precise/wordpress", exact: "cs:precise/wordpress", url: &charm.URL{"cs", "", "wordpress", -1, "precise", ""}, }, { s: "foo", exact: "cs:foo", url: &charm.URL{"cs", "", "foo", -1, "", ""}, }, { s: "foo-1", exact: "cs:foo-1", url: &charm.URL{"cs", "", "foo", 1, "", ""}, }, { s: "n0-n0-n0", exact: "cs:n0-n0-n0", url: &charm.URL{"cs", "", "n0-n0-n0", -1, "", ""}, }, { s: "cs:foo", exact: "cs:foo", url: &charm.URL{"cs", "", "foo", -1, "", ""}, }, { s: "local:foo", exact: "local:foo", url: &charm.URL{"local", "", "foo", -1, "", ""}, }, { s: "series/foo", exact: "cs:series/foo", url: &charm.URL{"cs", "", "foo", -1, "series", ""}, }, { s: "development/foo", exact: "cs:development/foo", url: &charm.URL{"cs", "", "foo", -1, "", charm.DevelopmentChannel}, }, { s: "development/foo-1", exact: "cs:development/foo-1", url: &charm.URL{"cs", "", "foo", 1, "", charm.DevelopmentChannel}, }, { s: "development/n0-n0-n0", exact: "cs:development/n0-n0-n0", url: &charm.URL{"cs", "", "n0-n0-n0", -1, "", charm.DevelopmentChannel}, }, { s: "development/series/foo", exact: "cs:development/series/foo", url: &charm.URL{"cs", "", "foo", -1, "series", charm.DevelopmentChannel}, }, { s: "series/foo/bar", err: `charm or bundle URL has invalid form: "series/foo/bar"`, }, { s: "cs:foo/~blah", err: `URL has invalid charm or bundle name: "cs:foo/~blah"`, }} func (s *URLSuite) TestParseURL(c *gc.C) { for i, t := range urlTests { c.Logf("test %d: %q", i, t.s) expectStr := t.s if t.exact != "" { expectStr = t.exact } url, uerr := charm.ParseURL(t.s) if t.err != "" { t.err = strings.Replace(t.err, "$URL", regexp.QuoteMeta(fmt.Sprintf("%q", t.s)), -1) c.Assert(uerr, gc.ErrorMatches, t.err) c.Assert(url, gc.IsNil) continue } c.Assert(uerr, gc.IsNil) c.Assert(url, gc.DeepEquals, t.url) c.Assert(url.String(), gc.Equals, expectStr) // URL strings are generated as expected. Reversability is preserved // with v1 URLs. if t.exact != "" { c.Check(url.String(), gc.Equals, t.exact) } else { c.Check(url.String(), gc.Equals, t.s) } } } var inferTests = []struct { vague, exact string }{ {"foo", "cs:defseries/foo"}, {"foo-1", "cs:defseries/foo-1"}, {"n0-n0-n0", "cs:defseries/n0-n0-n0"}, {"cs:foo", "cs:defseries/foo"}, {"local:foo", "local:defseries/foo"}, {"series/foo", "cs:series/foo"}, {"cs:series/foo", "cs:series/foo"}, {"local:series/foo", "local:series/foo"}, {"cs:~user/foo", "cs:~user/defseries/foo"}, {"cs:~user/series/foo", "cs:~user/series/foo"}, {"local:~user/series/foo", "local:~user/series/foo"}, {"bs:foo", "bs:defseries/foo"}, {"cs:~1/foo", "cs:~1/defseries/foo"}, {"cs:foo-1-2", "cs:defseries/foo-1-2"}, {"development/foo", "cs:development/defseries/foo"}, {"development/foo-1", "cs:development/defseries/foo-1"}, {"development/series/foo", "cs:development/series/foo"}, {"local:development/series/foo", "local:development/series/foo"}, {"cs:~user/development/foo", "cs:~user/development/defseries/foo"}, {"local:~user/development/series/foo", "local:~user/development/series/foo"}, {"cs:~1/development/foo", "cs:~1/development/defseries/foo"}, } func (s *URLSuite) TestInferURL(c *gc.C) { for i, t := range inferTests { c.Logf("test %d", i) comment := gc.Commentf("InferURL(%q, %q)", t.vague, "defseries") inferred, ierr := charm.InferURL(t.vague, "defseries") parsed, perr := charm.ParseURL(t.exact) if perr == nil { c.Check(inferred, gc.DeepEquals, parsed, comment) c.Check(ierr, gc.IsNil) } else { expect := perr.Error() if t.vague != t.exact { if colIdx := strings.Index(expect, ":"); colIdx > 0 { expect = expect[:colIdx] } } c.Check(ierr.Error(), gc.Matches, expect+".*", comment) } } u, err := charm.InferURL("~blah", "defseries") c.Assert(u, gc.IsNil) c.Assert(err, gc.ErrorMatches, "URL without charm or bundle name: .*") } var inferNoDefaultSeriesTests = []struct { vague, exact string resolved bool }{ {"foo", "", false}, {"foo-1", "", false}, {"cs:foo", "", false}, {"cs:~user/foo", "", false}, {"series/foo", "cs:series/foo", true}, {"cs:series/foo", "cs:series/foo", true}, {"cs:~user/series/foo", "cs:~user/series/foo", true}, {"development/foo", "", false}, {"development/foo-1", "", false}, {"cs:development/foo", "", false}, {"cs:~user/development/foo", "", false}, {"development/series/foo", "cs:development/series/foo", true}, {"cs:development/series/foo", "cs:development/series/foo", true}, {"cs:~user/development/series/foo", "cs:~user/development/series/foo", true}, } func (s *URLSuite) TestInferURLNoDefaultSeries(c *gc.C) { for i, t := range inferNoDefaultSeriesTests { c.Logf("%d: %s", i, t.vague) inferred, err := charm.InferURL(t.vague, "") if t.exact == "" { c.Assert(err, gc.ErrorMatches, fmt.Sprintf("cannot infer charm or bundle URL for %q: charm or bundle url series is not resolved", t.vague)) } else { parsed, err := charm.ParseURL(t.exact) c.Assert(err, gc.IsNil) c.Assert(inferred, gc.DeepEquals, parsed, gc.Commentf(`InferURL(%q, "")`, t.vague)) } } } var validTests = []struct { valid func(string) bool string string expect bool }{ {charm.IsValidName, "", false}, {charm.IsValidName, "wordpress", true}, {charm.IsValidName, "Wordpress", false}, {charm.IsValidName, "word-press", true}, {charm.IsValidName, "word press", false}, {charm.IsValidName, "word^press", false}, {charm.IsValidName, "-wordpress", false}, {charm.IsValidName, "wordpress-", false}, {charm.IsValidName, "wordpress2", true}, {charm.IsValidName, "wordpress-2", false}, {charm.IsValidName, "word2-press2", true}, {charm.IsValidSeries, "", false}, {charm.IsValidSeries, "precise", true}, {charm.IsValidSeries, "Precise", false}, {charm.IsValidSeries, "pre cise", false}, {charm.IsValidSeries, "pre-cise", false}, {charm.IsValidSeries, "pre^cise", false}, {charm.IsValidSeries, "prec1se", true}, {charm.IsValidSeries, "-precise", false}, {charm.IsValidSeries, "precise-", false}, {charm.IsValidSeries, "precise-1", false}, {charm.IsValidSeries, "precise1", true}, {charm.IsValidSeries, "pre-c1se", false}, } func (s *URLSuite) TestValidCheckers(c *gc.C) { for i, t := range validTests { c.Logf("test %d: %s", i, t.string) c.Assert(t.valid(t.string), gc.Equals, t.expect, gc.Commentf("%s", t.string)) } } var isValidChannelTests = []struct { channel charm.Channel expect bool }{{ channel: charm.DevelopmentChannel, expect: true, }, { channel: "", }, { channel: "-development", }, { channel: "bad wolf", }} func (s *URLSuite) TestIsValidChannel(c *gc.C) { for i, t := range isValidChannelTests { c.Logf("test %d: %s", i, t.channel) c.Assert(charm.IsValidChannel(t.channel), gc.Equals, t.expect, gc.Commentf("%s", t.channel)) } } func (s *URLSuite) TestMustParseURL(c *gc.C) { url := charm.MustParseURL("cs:series/name") c.Assert(url, gc.DeepEquals, &charm.URL{"cs", "", "name", -1, "series", ""}) f := func() { charm.MustParseURL("local:@@/name") } c.Assert(f, gc.PanicMatches, "charm or bundle URL has invalid series: .*") f = func() { charm.MustParseURL("cs:~user") } c.Assert(f, gc.PanicMatches, "URL without charm or bundle name: .*") f = func() { charm.MustParseURL("cs:~user") } c.Assert(f, gc.PanicMatches, "URL without charm or bundle name: .*") } func (s *URLSuite) TestWithRevision(c *gc.C) { url := charm.MustParseURL("cs:series/name") other := url.WithRevision(1) c.Assert(url, gc.DeepEquals, &charm.URL{"cs", "", "name", -1, "series", ""}) c.Assert(other, gc.DeepEquals, &charm.URL{"cs", "", "name", 1, "series", ""}) // Should always copy. The opposite behavior is error prone. c.Assert(other.WithRevision(1), gc.Not(gc.Equals), other) c.Assert(other.WithRevision(1), gc.DeepEquals, other) } func (s *URLSuite) TestWithChannel(c *gc.C) { url := charm.MustParseURL("cs:series/name") other := url.WithChannel("development") c.Assert(url, gc.DeepEquals, &charm.URL{"cs", "", "name", -1, "series", ""}) c.Assert(other, gc.DeepEquals, &charm.URL{"cs", "", "name", -1, "series", "development"}) // Should always copy. The opposite behavior is error prone. c.Assert(other.WithRevision(1), gc.Not(gc.Equals), other) // Set the channel back to empty. other = url.WithChannel("") c.Assert(other, gc.DeepEquals, &charm.URL{"cs", "", "name", -1, "series", ""}) } var codecs = []struct { Marshal func(interface{}) ([]byte, error) Unmarshal func([]byte, interface{}) error }{{ Marshal: bson.Marshal, Unmarshal: bson.Unmarshal, }, { Marshal: json.Marshal, Unmarshal: json.Unmarshal, }} func (s *URLSuite) TestURLCodecs(c *gc.C) { for i, codec := range codecs { c.Logf("codec %d", i) type doc struct { URL *charm.URL } url := charm.MustParseURL("cs:series/name") v0 := doc{url} data, err := codec.Marshal(v0) c.Assert(err, gc.IsNil) var v doc err = codec.Unmarshal(data, &v) c.Assert(v, gc.DeepEquals, v0) // Check that the underlying representation // is a string. type strDoc struct { URL string } var vs strDoc err = codec.Unmarshal(data, &vs) c.Assert(err, gc.IsNil) c.Assert(vs.URL, gc.Equals, "cs:series/name") data, err = codec.Marshal(doc{}) c.Assert(err, gc.IsNil) err = codec.Unmarshal(data, &v) c.Assert(err, gc.IsNil) c.Assert(v.URL, gc.IsNil) } } func (s *URLSuite) TestJSONGarbage(c *gc.C) { // unmarshalling json gibberish for _, value := range []string{":{", `"cs:{}+<"`, `"cs:~_~/f00^^&^/baaaar$%-?"`} { err := json.Unmarshal([]byte(value), new(struct{ URL *charm.URL })) c.Check(err, gc.NotNil) } } type QuoteSuite struct{} var _ = gc.Suite(&QuoteSuite{}) func (s *QuoteSuite) TestUnmodified(c *gc.C) { // Check that a string containing only valid // chars stays unmodified. in := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.-" out := charm.Quote(in) c.Assert(out, gc.Equals, in) } func (s *QuoteSuite) TestQuote(c *gc.C) { // Check that invalid chars are translated correctly. in := "hello_there/how'are~you-today.sir" out := charm.Quote(in) c.Assert(out, gc.Equals, "hello_5f_there_2f_how_27_are_7e_you-today.sir") } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/bundledir.go0000664000175000017500000000255012672604527022626 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm import ( "fmt" "io" "io/ioutil" "os" "path/filepath" ) type BundleDir struct { Path string data *BundleData readMe string } // Trick to ensure *BundleDir implements the Bundle interface. var _ Bundle = (*BundleDir)(nil) // ReadBundleDir returns a BundleDir representing an expanded // bundle directory. It does not verify the bundle data. func ReadBundleDir(path string) (dir *BundleDir, err error) { dir = &BundleDir{Path: path} file, err := os.Open(dir.join("bundle.yaml")) if err != nil { return nil, err } dir.data, err = ReadBundleData(file) file.Close() if err != nil { return nil, err } readMe, err := ioutil.ReadFile(dir.join("README.md")) if err != nil { return nil, fmt.Errorf("cannot read README file: %v", err) } dir.readMe = string(readMe) return dir, nil } func (dir *BundleDir) Data() *BundleData { return dir.data } func (dir *BundleDir) ReadMe() string { return dir.readMe } func (dir *BundleDir) ArchiveTo(w io.Writer) error { return writeArchive(w, dir.Path, -1, nil) } // join builds a path rooted at the bundle's expanded directory // path and the extra path components provided. func (dir *BundleDir) join(parts ...string) string { parts = append([]string{dir.Path}, parts...) return filepath.Join(parts...) } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/charmdir_test.go0000664000175000017500000002164712672604527023516 0ustar marcomarco// Copyright 2011, 2012, 2013 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm_test import ( "archive/zip" "bytes" "fmt" "io/ioutil" "os" "path/filepath" "strings" "syscall" "github.com/juju/testing" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" ) type CharmDirSuite struct { testing.IsolationSuite } var _ = gc.Suite(&CharmDirSuite{}) func (s *CharmDirSuite) TestReadCharmDir(c *gc.C) { path := charmDirPath(c, "dummy") dir, err := charm.ReadCharmDir(path) c.Assert(err, gc.IsNil) checkDummy(c, dir, path) } func (s *CharmDirSuite) TestReadCharmDirWithoutConfig(c *gc.C) { path := charmDirPath(c, "varnish") dir, err := charm.ReadCharmDir(path) c.Assert(err, gc.IsNil) // A lacking config.yaml file still causes a proper // Config value to be returned. c.Assert(dir.Config().Options, gc.HasLen, 0) } func (s *CharmDirSuite) TestReadCharmDirWithoutMetrics(c *gc.C) { path := charmDirPath(c, "varnish") dir, err := charm.ReadCharmDir(path) c.Assert(err, gc.IsNil) // A lacking metrics.yaml file indicates the unit will not // be metered. c.Assert(dir.Metrics(), gc.IsNil) } func (s *CharmDirSuite) TestReadCharmDirWithEmptyMetrics(c *gc.C) { path := charmDirPath(c, "metered-empty") dir, err := charm.ReadCharmDir(path) c.Assert(err, gc.IsNil) c.Assert(Keys(dir.Metrics()), gc.HasLen, 0) } func (s *CharmDirSuite) TestReadCharmDirWithCustomMetrics(c *gc.C) { path := charmDirPath(c, "metered") dir, err := charm.ReadCharmDir(path) c.Assert(err, gc.IsNil) c.Assert(dir.Metrics(), gc.NotNil) c.Assert(Keys(dir.Metrics()), gc.DeepEquals, []string{"juju-unit-time", "pings"}) } func (s *CharmDirSuite) TestReadCharmDirWithoutActions(c *gc.C) { path := charmDirPath(c, "wordpress") dir, err := charm.ReadCharmDir(path) c.Assert(err, gc.IsNil) // A lacking actions.yaml file still causes a proper // Actions value to be returned. c.Assert(dir.Actions().ActionSpecs, gc.HasLen, 0) } func (s *CharmDirSuite) TestArchiveTo(c *gc.C) { baseDir := c.MkDir() charmDir := cloneDir(c, charmDirPath(c, "dummy")) s.assertArchiveTo(c, baseDir, charmDir) } func (s *CharmDirSuite) TestArchiveToWithSymlinkedRootDir(c *gc.C) { path := cloneDir(c, charmDirPath(c, "dummy")) baseDir := filepath.Dir(path) err := os.Symlink(filepath.Join("dummy"), filepath.Join(baseDir, "newdummy")) c.Assert(err, gc.IsNil) charmDir := filepath.Join(baseDir, "newdummy") s.assertArchiveTo(c, baseDir, charmDir) } func (s *CharmDirSuite) assertArchiveTo(c *gc.C, baseDir, charmDir string) { haveSymlinks := true if err := os.Symlink("../target", filepath.Join(charmDir, "hooks/symlink")); err != nil { haveSymlinks = false } dir, err := charm.ReadCharmDir(charmDir) c.Assert(err, gc.IsNil) path := filepath.Join(baseDir, "archive.charm") file, err := os.Create(path) c.Assert(err, gc.IsNil) err = dir.ArchiveTo(file) file.Close() c.Assert(err, gc.IsNil) zipr, err := zip.OpenReader(path) c.Assert(err, gc.IsNil) defer zipr.Close() var metaf, instf, emptyf, revf, symf *zip.File for _, f := range zipr.File { c.Logf("Archived file: %s", f.Name) switch f.Name { case "revision": revf = f case "metadata.yaml": metaf = f case "hooks/install": instf = f case "hooks/symlink": symf = f case "empty/": emptyf = f case "build/ignored": c.Errorf("archive includes build/*: %s", f.Name) case ".ignored", ".dir/ignored": c.Errorf("archive includes .* entries: %s", f.Name) } } c.Assert(revf, gc.NotNil) reader, err := revf.Open() c.Assert(err, gc.IsNil) data, err := ioutil.ReadAll(reader) reader.Close() c.Assert(err, gc.IsNil) c.Assert(string(data), gc.Equals, "1") c.Assert(metaf, gc.NotNil) reader, err = metaf.Open() c.Assert(err, gc.IsNil) meta, err := charm.ReadMeta(reader) reader.Close() c.Assert(err, gc.IsNil) c.Assert(meta.Name, gc.Equals, "dummy") c.Assert(instf, gc.NotNil) // Despite it being 0751, we pack and unpack it as 0755. c.Assert(instf.Mode()&0777, gc.Equals, os.FileMode(0755)) if haveSymlinks { c.Assert(symf, gc.NotNil) c.Assert(symf.Mode()&0777, gc.Equals, os.FileMode(0777)) reader, err = symf.Open() c.Assert(err, gc.IsNil) data, err = ioutil.ReadAll(reader) reader.Close() c.Assert(err, gc.IsNil) c.Assert(string(data), gc.Equals, "../target") } else { c.Assert(symf, gc.IsNil) } c.Assert(emptyf, gc.NotNil) c.Assert(emptyf.Mode()&os.ModeType, gc.Equals, os.ModeDir) // Despite it being 0750, we pack and unpack it as 0755. c.Assert(emptyf.Mode()&0777, gc.Equals, os.FileMode(0755)) } // Bug #864164: Must complain if charm hooks aren't executable func (s *CharmDirSuite) TestArchiveToWithNonExecutableHooks(c *gc.C) { hooks := []string{"install", "start", "config-changed", "upgrade-charm", "stop", "collect-metrics", "meter-status-changed"} for _, relName := range []string{"foo", "bar", "self"} { for _, kind := range []string{"joined", "changed", "departed", "broken"} { hooks = append(hooks, relName+"-relation-"+kind) } } dir := readCharmDir(c, "all-hooks") path := filepath.Join(c.MkDir(), "archive.charm") file, err := os.Create(path) c.Assert(err, gc.IsNil) err = dir.ArchiveTo(file) file.Close() c.Assert(err, gc.IsNil) tlog := c.GetTestLog() for _, hook := range hooks { fullpath := filepath.Join(dir.Path, "hooks", hook) exp := fmt.Sprintf(`^(.|\n)*WARNING juju.charm making "%s" executable in charm(.|\n)*$`, fullpath) c.Assert(tlog, gc.Matches, exp, gc.Commentf("hook %q was not made executable", fullpath)) } // Expand it and check the hooks' permissions // (But do not use ExpandTo(), just use the raw zip) f, err := os.Open(path) c.Assert(err, gc.IsNil) defer f.Close() fi, err := f.Stat() c.Assert(err, gc.IsNil) size := fi.Size() zipr, err := zip.NewReader(f, size) c.Assert(err, gc.IsNil) allhooks := dir.Meta().Hooks() for _, zfile := range zipr.File { cleanName := filepath.Clean(zfile.Name) if strings.HasPrefix(cleanName, "hooks") { hookName := filepath.Base(cleanName) if _, ok := allhooks[hookName]; ok { perms := zfile.Mode() c.Assert(perms&0100 != 0, gc.Equals, true, gc.Commentf("hook %q is not executable", hookName)) } } } } func (s *CharmDirSuite) TestArchiveToWithBadType(c *gc.C) { charmDir := cloneDir(c, charmDirPath(c, "dummy")) badFile := filepath.Join(charmDir, "hooks", "badfile") // Symlink targeting a path outside of the charm. err := os.Symlink("../../target", badFile) c.Assert(err, gc.IsNil) dir, err := charm.ReadCharmDir(charmDir) c.Assert(err, gc.IsNil) err = dir.ArchiveTo(&bytes.Buffer{}) c.Assert(err, gc.ErrorMatches, `symlink "hooks/badfile" links out of charm: "../../target"`) // Symlink targeting an absolute path. os.Remove(badFile) err = os.Symlink("/target", badFile) c.Assert(err, gc.IsNil) dir, err = charm.ReadCharmDir(charmDir) c.Assert(err, gc.IsNil) err = dir.ArchiveTo(&bytes.Buffer{}) c.Assert(err, gc.ErrorMatches, `symlink "hooks/badfile" is absolute: "/target"`) // Can't archive special files either. os.Remove(badFile) err = syscall.Mkfifo(badFile, 0644) c.Assert(err, gc.IsNil) dir, err = charm.ReadCharmDir(charmDir) c.Assert(err, gc.IsNil) err = dir.ArchiveTo(&bytes.Buffer{}) c.Assert(err, gc.ErrorMatches, `file is a named pipe: "hooks/badfile"`) } func (s *CharmDirSuite) TestDirRevisionFile(c *gc.C) { charmDir := cloneDir(c, charmDirPath(c, "dummy")) revPath := filepath.Join(charmDir, "revision") // Missing revision file err := os.Remove(revPath) c.Assert(err, gc.IsNil) dir, err := charm.ReadCharmDir(charmDir) c.Assert(err, gc.IsNil) c.Assert(dir.Revision(), gc.Equals, 0) // Missing revision file with old revision in metadata file, err := os.OpenFile(filepath.Join(charmDir, "metadata.yaml"), os.O_WRONLY|os.O_APPEND, 0) c.Assert(err, gc.IsNil) _, err = file.Write([]byte("\nrevision: 1234\n")) c.Assert(err, gc.IsNil) dir, err = charm.ReadCharmDir(charmDir) c.Assert(err, gc.IsNil) c.Assert(dir.Revision(), gc.Equals, 1234) // Revision file with bad content err = ioutil.WriteFile(revPath, []byte("garbage"), 0666) c.Assert(err, gc.IsNil) dir, err = charm.ReadCharmDir(charmDir) c.Assert(err, gc.ErrorMatches, "invalid revision file") c.Assert(dir, gc.IsNil) } func (s *CharmDirSuite) TestDirSetRevision(c *gc.C) { path := cloneDir(c, charmDirPath(c, "dummy")) dir, err := charm.ReadCharmDir(path) c.Assert(err, gc.IsNil) c.Assert(dir.Revision(), gc.Equals, 1) dir.SetRevision(42) c.Assert(dir.Revision(), gc.Equals, 42) var b bytes.Buffer err = dir.ArchiveTo(&b) c.Assert(err, gc.IsNil) archive, err := charm.ReadCharmArchiveBytes(b.Bytes()) c.Assert(archive.Revision(), gc.Equals, 42) } func (s *CharmDirSuite) TestDirSetDiskRevision(c *gc.C) { charmDir := cloneDir(c, charmDirPath(c, "dummy")) dir, err := charm.ReadCharmDir(charmDir) c.Assert(err, gc.IsNil) c.Assert(dir.Revision(), gc.Equals, 1) dir.SetDiskRevision(42) c.Assert(dir.Revision(), gc.Equals, 42) dir, err = charm.ReadCharmDir(charmDir) c.Assert(err, gc.IsNil) c.Assert(dir.Revision(), gc.Equals, 42) } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/bundledata.go0000664000175000017500000006602612672604527022771 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm import ( "fmt" "io" "io/ioutil" "os" "path/filepath" "regexp" "sort" "strconv" "strings" "github.com/juju/names" "gopkg.in/yaml.v1" ) // BundleData holds the contents of the bundle. type BundleData struct { // Services holds one entry for each service // that the bundle will create, indexed by // the service name. Services map[string]*ServiceSpec // Machines holds one entry for each machine referred to // by unit placements. These will be mapped onto actual // machines at bundle deployment time. // It is an error if a machine is specified but // not referred to by a unit placement directive. Machines map[string]*MachineSpec `bson:",omitempty" json:",omitempty" yaml:",omitempty"` // Series holds the default series to use when // the bundle chooses charms. Series string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` // Relations holds a slice of 2-element slices, // each specifying a relation between two services. // Each two-element slice holds two endpoints, // each specified as either colon-separated // (service, relation) pair or just a service name. // The relation is made between each. If the relation // name is omitted, it will be inferred from the available // relations defined in the services' charms. Relations [][]string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` // White listed set of tags to categorize bundles as we do charms. Tags []string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` // Short paragraph explaining what the bundle is useful for. Description string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` } // MachineSpec represents a notional machine that will be mapped // onto an actual machine at bundle deployment time. type MachineSpec struct { Constraints string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` Annotations map[string]string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` Series string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` } // ServiceSpec represents a single service that will // be deployed as part of the bundle. type ServiceSpec struct { // Charm holds the charm URL of the charm to // use for the given service. Charm string // NumUnits holds the number of units of the // service that will be deployed. // // For a subordinate service, this actually represents // an arbitrary number of units depending on // the service it is related to. NumUnits int `yaml:"num_units,omitempty" json:",omitempty"` // To may hold up to NumUnits members with // each member specifying a desired placement // for the respective unit of the service. // // In regular-expression-like notation, each // element matches the following pattern: // // (:)?(||new) // // If containertype is specified, the unit is deployed // into a new container of that type, otherwise // it will be "hulk-smashed" into the specified location, // by co-locating it with any other units that happen to // be there, which may result in unintended behavior. // // The second part (after the colon) specifies where // the new unit should be placed - it may refer to // a unit of another service specified in the bundle, // a machine id specified in the machines section, // or the special name "new" which specifies a newly // created machine. // // A unit placement may be specified with a service name only, // in which case its unit number is assumed to // be one more than the unit number of the previous // unit in the list with the same service, or zero // if there were none. // // If there are less elements in To than NumUnits, // the last element is replicated to fill it. If there // are no elements (or To is omitted), "new" is replicated. // // For example: // // wordpress/0 wordpress/1 lxc:0 kvm:new // // specifies that the first two units get hulk-smashed // onto the first two units of the wordpress service, // the third unit gets allocated onto an lxc container // on machine 0, and subsequent units get allocated // on kvm containers on new machines. // // The above example is the same as this: // // wordpress wordpress lxc:0 kvm:new To []string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` // Expose holds whether the service must be exposed. Expose bool `bson:",omitempty" json:",omitempty" yaml:",omitempty"` // Options holds the configuration values // to apply to the new service. They should // be compatible with the charm configuration. Options map[string]interface{} `bson:",omitempty" json:",omitempty" yaml:",omitempty"` // Annotations holds any annotations to apply to the // service when deployed. Annotations map[string]string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` // Constraints holds the default constraints to apply // when creating new machines for units of the service. // This is ignored for units with explicit placement directives. Constraints string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` // Storage holds the constraints for storage to assign // to units of the service. Storage map[string]string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` // EndpointBindings maps how endpoints are bound to spaces EndpointBindings map[string]string `bson:"bindings,omitempty" json:"bindings,omitempty" yaml:"bindings,omitempty"` } // ReadBundleData reads bundle data from the given reader. // The returned data is not verified - call Verify to ensure // that it is OK. func ReadBundleData(r io.Reader) (*BundleData, error) { bytes, err := ioutil.ReadAll(r) if err != nil { return nil, err } var bd BundleData if err := yaml.Unmarshal(bytes, &bd); err != nil { return nil, fmt.Errorf("cannot unmarshal bundle data: %v", err) } return &bd, nil } // VerificationError holds an error generated by BundleData.Verify, // holding all the verification errors found when verifying. type VerificationError struct { Errors []error } func (err *VerificationError) Error() string { switch len(err.Errors) { case 0: return "no verification errors!" case 1: return err.Errors[0].Error() } return fmt.Sprintf("%s (and %d more errors)", err.Errors[0], len(err.Errors)-1) } type bundleDataVerifier struct { // bundleDir is the directory containing the bundle file bundleDir string bd *BundleData // machines holds the reference counts of all machines // as referred to by placement directives. machineRefCounts map[string]int charms map[string]Charm errors []error verifyConstraints func(c string) error verifyStorage func(s string) error } func (verifier *bundleDataVerifier) addErrorf(f string, a ...interface{}) { verifier.addError(fmt.Errorf(f, a...)) } func (verifier *bundleDataVerifier) addError(err error) { verifier.errors = append(verifier.errors, err) } func (verifier *bundleDataVerifier) err() error { if len(verifier.errors) > 0 { return &VerificationError{verifier.errors} } return nil } // RequiredCharms returns a sorted slice of all the charm URLs // required by the bundle. func (bd *BundleData) RequiredCharms() []string { req := make([]string, 0, len(bd.Services)) for _, svc := range bd.Services { req = append(req, svc.Charm) } sort.Strings(req) return req } // VerifyLocal verifies that a local bundle file is consistent. // A local bundle file may contain references to charms which are // referred to by a directory, either relative or absolute. // // bundleDir is used to construct the full path for charms specified // using a relative directory path. The charm path is therefore expected // to be relative to the bundle.yaml file. func (bd *BundleData) VerifyLocal( bundleDir string, verifyConstraints func(c string) error, verifyStorage func(s string) error, ) error { return bd.verifyBundle(bundleDir, verifyConstraints, verifyStorage, nil) } // Verify is a convenience method that calls VerifyWithCharms // with a nil charms map. func (bd *BundleData) Verify( verifyConstraints func(c string) error, verifyStorage func(s string) error, ) error { return bd.VerifyWithCharms(verifyConstraints, verifyStorage, nil) } // VerifyWithCharms verifies that the bundle is consistent. // The verifyConstraints function is called to verify any constraints // that are found. If verifyConstraints is nil, no checking // of constraints will be done. Similarly, a non-nil verifyStorage // function is called to verify any storage constraints. // // It verifies the following: // // - All defined machines are referred to by placement directives. // - All services referred to by placement directives are specified in the bundle. // - All services referred to by relations are specified in the bundle. // - All basic constraints are valid. // - All storage constraints are valid. // // If charms is not nil, it should hold a map with an entry for each // charm url returned by bd.RequiredCharms. The verification will then // also check that services are defined with valid charms, // relations are correctly made and options are defined correctly. // // If the verification fails, Verify returns a *VerificationError describing // all the problems found. func (bd *BundleData) VerifyWithCharms( verifyConstraints func(c string) error, verifyStorage func(s string) error, charms map[string]Charm, ) error { return bd.verifyBundle("", verifyConstraints, verifyStorage, charms) } func (bd *BundleData) verifyBundle( bundleDir string, verifyConstraints func(c string) error, verifyStorage func(s string) error, charms map[string]Charm, ) error { if verifyConstraints == nil { verifyConstraints = func(string) error { return nil } } if verifyStorage == nil { verifyStorage = func(string) error { return nil } } verifier := &bundleDataVerifier{ bundleDir: bundleDir, verifyConstraints: verifyConstraints, verifyStorage: verifyStorage, bd: bd, machineRefCounts: make(map[string]int), charms: charms, } for id := range bd.Machines { verifier.machineRefCounts[id] = 0 } if bd.Series != "" && !IsValidSeries(bd.Series) { verifier.addErrorf("bundle declares an invalid series %q", bd.Series) } verifier.verifyMachines() verifier.verifyServices() verifier.verifyRelations() verifier.verifyOptions() verifier.verifyEndpointBindings() for id, count := range verifier.machineRefCounts { if count == 0 { verifier.addErrorf("machine %q is not referred to by a placement directive", id) } } return verifier.err() } var ( validMachineId = regexp.MustCompile("^" + names.NumberSnippet + "$") validStorageName = regexp.MustCompile("^" + names.StorageNameSnippet + "$") ) func (verifier *bundleDataVerifier) verifyMachines() { for id, m := range verifier.bd.Machines { if !validMachineId.MatchString(id) { verifier.addErrorf("invalid machine id %q found in machines", id) } if m == nil { continue } if m.Constraints != "" { if err := verifier.verifyConstraints(m.Constraints); err != nil { verifier.addErrorf("invalid constraints %q in machine %q: %v", m.Constraints, id, err) } } if m.Series != "" && !IsValidSeries(m.Series) { verifier.addErrorf("invalid series %s for machine %q", m.Series, id) } } } func (verifier *bundleDataVerifier) verifyServices() { if len(verifier.bd.Services) == 0 { verifier.addErrorf("at least one service must be specified") return } for name, svc := range verifier.bd.Services { if svc.Charm == "" { verifier.addErrorf("empty charm path") } // Charm may be a local directory or a charm URL. if strings.HasPrefix(svc.Charm, ".") || filepath.IsAbs(svc.Charm) { charmPath := svc.Charm if !filepath.IsAbs(charmPath) { charmPath = filepath.Join(verifier.bundleDir, charmPath) } if _, err := os.Stat(charmPath); err != nil { if os.IsNotExist(err) { verifier.addErrorf("charm path in service %q does not exist: %v", name, charmPath) } else { verifier.addErrorf("invalid charm path in service %q: %v", name, err) } } } else if _, err := ParseURL(svc.Charm); err != nil { verifier.addErrorf("invalid charm URL in service %q: %v", name, err) } if err := verifier.verifyConstraints(svc.Constraints); err != nil { verifier.addErrorf("invalid constraints %q in service %q: %v", svc.Constraints, name, err) } for storageName, storageConstraints := range svc.Storage { if !validStorageName.MatchString(storageName) { verifier.addErrorf("invalid storage name %q in service %q", storageName, name) } if err := verifier.verifyStorage(storageConstraints); err != nil { verifier.addErrorf("invalid storage %q in service %q: %v", storageName, name, err) } } if verifier.charms != nil { if ch, ok := verifier.charms[svc.Charm]; ok { if ch.Meta().Subordinate { if len(svc.To) > 0 { verifier.addErrorf("service %q is subordinate but specifies unit placement", name) } if svc.NumUnits > 0 { verifier.addErrorf("service %q is subordinate but has non-zero num_units", name) } } } else { verifier.addErrorf("service %q refers to non-existent charm %q", name, svc.Charm) } } if svc.NumUnits < 0 { verifier.addErrorf("negative number of units specified on service %q", name) } else if len(svc.To) > svc.NumUnits { verifier.addErrorf("too many units specified in unit placement for service %q", name) } verifier.verifyPlacement(svc.To) } } func (verifier *bundleDataVerifier) verifyPlacement(to []string) { for _, p := range to { up, err := ParsePlacement(p) if err != nil { verifier.addError(err) continue } switch { case up.Service != "": spec, ok := verifier.bd.Services[up.Service] if !ok { verifier.addErrorf("placement %q refers to a service not defined in this bundle", p) continue } if up.Unit >= 0 && up.Unit >= spec.NumUnits { verifier.addErrorf("placement %q specifies a unit greater than the %d unit(s) started by the target service", p, spec.NumUnits) } case up.Machine == "new": default: _, ok := verifier.bd.Machines[up.Machine] if !ok { verifier.addErrorf("placement %q refers to a machine not defined in this bundle", p) continue } verifier.machineRefCounts[up.Machine]++ } } } func (verifier *bundleDataVerifier) getCharmMetaForService(svcName string) (*Meta, error) { svc, ok := verifier.bd.Services[svcName] if !ok { return nil, fmt.Errorf("service %q not found", svcName) } ch, ok := verifier.charms[svc.Charm] if !ok { return nil, fmt.Errorf("charm %q from service %q not found", svc.Charm, svcName) } return ch.Meta(), nil } func (verifier *bundleDataVerifier) verifyRelations() { seen := make(map[[2]endpoint]bool) for _, relPair := range verifier.bd.Relations { if len(relPair) != 2 { verifier.addErrorf("relation %q has %d endpoint(s), not 2", relPair, len(relPair)) continue } var epPair [2]endpoint relParseErr := false for i, svcRel := range relPair { ep, err := parseEndpoint(svcRel) if err != nil { verifier.addError(err) relParseErr = true continue } if _, ok := verifier.bd.Services[ep.service]; !ok { verifier.addErrorf("relation %q refers to service %q not defined in this bundle", relPair, ep.service) } epPair[i] = ep } if relParseErr { // We failed to parse at least one relation, so don't // bother checking further. continue } if epPair[0].service == epPair[1].service { verifier.addErrorf("relation %q relates a service to itself", relPair) } // Resolve endpoint relations if necessary and we have // the necessary charm information. if (epPair[0].relation == "" || epPair[1].relation == "") && verifier.charms != nil { iep0, iep1, err := inferEndpoints(epPair[0], epPair[1], verifier.getCharmMetaForService) if err != nil { verifier.addErrorf("cannot infer endpoint between %s and %s: %v", epPair[0], epPair[1], err) } else { // Change the endpoints that get recorded // as seen, so we'll diagnose a duplicate // relation even if one relation specifies // the relations explicitly and the other does // not. epPair[0], epPair[1] = iep0, iep1 } } // Re-order pairs so that we diagnose duplicate relations // whichever way they're specified. if epPair[1].less(epPair[0]) { epPair[1], epPair[0] = epPair[0], epPair[1] } if _, ok := seen[epPair]; ok { verifier.addErrorf("relation %q is defined more than once", relPair) } if verifier.charms != nil && epPair[0].relation != "" && epPair[1].relation != "" { // We have charms to verify against, and the // endpoint has been fully specified or inferred. verifier.verifyRelation(epPair[0], epPair[1]) } seen[epPair] = true } } func (verifier *bundleDataVerifier) verifyEndpointBindings() { for name, svc := range verifier.bd.Services { charm, ok := verifier.charms[name] // Only thest the ok path here because the !ok path is tested in verifyServices if !ok { continue } for endpoint, space := range svc.EndpointBindings { _, isInProvides := charm.Meta().Provides[endpoint] _, isInRequires := charm.Meta().Requires[endpoint] _, isInPeers := charm.Meta().Peers[endpoint] _, isInExtraBindings := charm.Meta().ExtraBindings[endpoint] if !(isInProvides || isInRequires || isInPeers || isInExtraBindings) { verifier.addErrorf( "service %q wants to bind endpoint %q to space %q, "+ "but the endpoint is not defined by the charm", name, endpoint, space) } } } } var infoRelation = Relation{ Name: "juju-info", Role: RoleProvider, Interface: "juju-info", Scope: ScopeContainer, } // verifyRelation verifies a single relation. // It checks that both endpoints of the relation are // defined, and that the relationship is correctly // symmetrical (provider to requirer) and shares // the same interface. func (verifier *bundleDataVerifier) verifyRelation(ep0, ep1 endpoint) { svc0 := verifier.bd.Services[ep0.service] svc1 := verifier.bd.Services[ep1.service] if svc0 == nil || svc1 == nil || svc0 == svc1 { // An error will be produced by verifyRelations for this case. return } charm0 := verifier.charms[svc0.Charm] charm1 := verifier.charms[svc1.Charm] if charm0 == nil || charm1 == nil { // An error will be produced by verifyServices for this case. return } relProv0, okProv0 := charm0.Meta().Provides[ep0.relation] // The juju-info relation is provided implicitly by every // charm - use it if required. if !okProv0 && ep0.relation == infoRelation.Name { relProv0, okProv0 = infoRelation, true } relReq0, okReq0 := charm0.Meta().Requires[ep0.relation] if !okProv0 && !okReq0 { verifier.addErrorf("charm %q used by service %q does not define relation %q", svc0.Charm, ep0.service, ep0.relation) } relProv1, okProv1 := charm1.Meta().Provides[ep1.relation] // The juju-info relation is provided implicitly by every // charm - use it if required. if !okProv1 && ep1.relation == infoRelation.Name { relProv1, okProv1 = infoRelation, true } relReq1, okReq1 := charm1.Meta().Requires[ep1.relation] if !okProv1 && !okReq1 { verifier.addErrorf("charm %q used by service %q does not define relation %q", svc1.Charm, ep1.service, ep1.relation) } var relProv, relReq Relation var epProv, epReq endpoint switch { case okProv0 && okReq1: relProv, relReq = relProv0, relReq1 epProv, epReq = ep0, ep1 case okReq0 && okProv1: relProv, relReq = relProv1, relReq0 epProv, epReq = ep1, ep0 case okProv0 && okProv1: verifier.addErrorf("relation %q to %q relates provider to provider", ep0, ep1) return case okReq0 && okReq1: verifier.addErrorf("relation %q to %q relates requirer to requirer", ep0, ep1) return default: // Errors were added above. return } if relProv.Interface != relReq.Interface { verifier.addErrorf("mismatched interface between %q and %q (%q vs %q)", epProv, epReq, relProv.Interface, relReq.Interface) } } // verifyOptions verifies that the options are correctly defined // with respect to the charm config options. func (verifier *bundleDataVerifier) verifyOptions() { if verifier.charms == nil { return } for svcName, svc := range verifier.bd.Services { charm := verifier.charms[svc.Charm] if charm == nil { // An error will be produced by verifyServices for this case. continue } config := charm.Config() for name, value := range svc.Options { opt, ok := config.Options[name] if !ok { verifier.addErrorf("cannot validate service %q: configuration option %q not found in charm %q", svcName, name, svc.Charm) continue } _, err := opt.validate(name, value) if err != nil { verifier.addErrorf("cannot validate service %q: %v", svcName, err) } } } } var validServiceRelation = regexp.MustCompile("^(" + names.ServiceSnippet + "):(" + names.RelationSnippet + ")$") type endpoint struct { service string relation string } func (ep endpoint) String() string { if ep.relation == "" { return ep.service } return fmt.Sprintf("%s:%s", ep.service, ep.relation) } func (ep1 endpoint) less(ep2 endpoint) bool { if ep1.service == ep2.service { return ep1.relation < ep2.relation } return ep1.service < ep2.service } func parseEndpoint(ep string) (endpoint, error) { m := validServiceRelation.FindStringSubmatch(ep) if m != nil { return endpoint{ service: m[1], relation: m[2], }, nil } if !names.IsValidService(ep) { return endpoint{}, fmt.Errorf("invalid relation syntax %q", ep) } return endpoint{ service: ep, }, nil } // endpointInfo holds information about one endpoint of a relation. type endpointInfo struct { serviceName string Relation } // String returns the unique identifier of the relation endpoint. func (ep endpointInfo) String() string { return ep.serviceName + ":" + ep.Name } // canRelateTo returns whether a relation may be established between ep // and other. func (ep endpointInfo) canRelateTo(other endpointInfo) bool { return ep.serviceName != other.serviceName && ep.Interface == other.Interface && ep.Role != RolePeer && counterpartRole(ep.Role) == other.Role } // endpoint returns the endpoint specifier for ep. func (ep endpointInfo) endpoint() endpoint { return endpoint{ service: ep.serviceName, relation: ep.Name, } } // counterpartRole returns the RelationRole that the given RelationRole // can relate to. func counterpartRole(r RelationRole) RelationRole { switch r { case RoleProvider: return RoleRequirer case RoleRequirer: return RoleProvider case RolePeer: return RolePeer } panic(fmt.Errorf("unknown relation role %q", r)) } type UnitPlacement struct { // ContainerType holds the container type of the new // new unit, or empty if unspecified. ContainerType string // Machine holds the numeric machine id, or "new", // or empty if the placement specifies a service. Machine string // Service holds the service name, or empty if // the placement specifies a machine. Service string // Unit holds the unit number of the service, or -1 // if unspecified. Unit int } var snippetReplacer = strings.NewReplacer( "container", names.ContainerTypeSnippet, "number", names.NumberSnippet, "service", names.ServiceSnippet, ) // validPlacement holds regexp that matches valid placement requests. To // make the expression easier to comprehend and maintain, we replace // symbolic snippet references in the regexp by their actual regexps // using snippetReplacer. var validPlacement = regexp.MustCompile( snippetReplacer.Replace( "^(?:(container):)?(?:(service)(?:/(number))?|(number))$", ), ) // ParsePlacement parses a unit placement directive, as // specified in the To clause of a service entry in the // services section of a bundle. func ParsePlacement(p string) (*UnitPlacement, error) { m := validPlacement.FindStringSubmatch(p) if m == nil { return nil, fmt.Errorf("invalid placement syntax %q", p) } up := UnitPlacement{ ContainerType: m[1], Service: m[2], Machine: m[4], } if unitStr := m[3]; unitStr != "" { // We know that unitStr must be a valid integer because // it's specified as such in the regexp. up.Unit, _ = strconv.Atoi(unitStr) } else { up.Unit = -1 } if up.Service == "new" { if up.Unit != -1 { return nil, fmt.Errorf("invalid placement syntax %q", p) } up.Machine, up.Service = "new", "" } return &up, nil } // inferEndpoints infers missing relation names from the given endpoint // specifications, using the given get function to retrieve charm // data if necessary. It returns the fully specified endpoints. func inferEndpoints(epSpec0, epSpec1 endpoint, get func(svc string) (*Meta, error)) (endpoint, endpoint, error) { if epSpec0.relation != "" && epSpec1.relation != "" { // The endpoints are already specified explicitly so // there is no need to fetch any charm data to infer // them. return epSpec0, epSpec1, nil } eps0, err := possibleEndpoints(epSpec0, get) if err != nil { return endpoint{}, endpoint{}, err } eps1, err := possibleEndpoints(epSpec1, get) if err != nil { return endpoint{}, endpoint{}, err } var candidates [][]endpointInfo for _, ep0 := range eps0 { for _, ep1 := range eps1 { if ep0.canRelateTo(ep1) { candidates = append(candidates, []endpointInfo{ep0, ep1}) } } } switch len(candidates) { case 0: return endpoint{}, endpoint{}, fmt.Errorf("no relations found") case 1: return candidates[0][0].endpoint(), candidates[0][1].endpoint(), nil } // There's ambiguity; try discarding implicit relations. filtered := discardImplicitRelations(candidates) if len(filtered) == 1 { return filtered[0][0].endpoint(), filtered[0][1].endpoint(), nil } // The ambiguity cannot be resolved, so return an error. var keys []string for _, cand := range candidates { keys = append(keys, fmt.Sprintf("%q", relationKey(cand))) } sort.Strings(keys) return endpoint{}, endpoint{}, fmt.Errorf("ambiguous relation: %s %s could refer to %s", epSpec0, epSpec1, strings.Join(keys, "; ")) } func discardImplicitRelations(candidates [][]endpointInfo) [][]endpointInfo { var filtered [][]endpointInfo outer: for _, cand := range candidates { for _, ep := range cand { if ep.IsImplicit() { continue outer } } filtered = append(filtered, cand) } return filtered } // relationKey returns a string describing the relation defined by // endpoints, for use in various contexts (including error messages). func relationKey(endpoints []endpointInfo) string { var names []string for _, ep := range endpoints { names = append(names, ep.String()) } sort.Strings(names) return strings.Join(names, " ") } // possibleEndpoints returns all the endpoints that the given endpoint spec // could refer to. func possibleEndpoints(epSpec endpoint, get func(svc string) (*Meta, error)) ([]endpointInfo, error) { meta, err := get(epSpec.service) if err != nil { return nil, err } var eps []endpointInfo add := func(r Relation) { if epSpec.relation == "" || epSpec.relation == r.Name { eps = append(eps, endpointInfo{ serviceName: epSpec.service, Relation: r, }) } } for _, r := range meta.Provides { add(r) } for _, r := range meta.Requires { add(r) } // Every service implicitly provides a juju-info relation. add(Relation{ Name: "juju-info", Role: RoleProvider, Interface: "juju-info", Scope: ScopeGlobal, }) return eps, nil } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/bundledata_test.go0000664000175000017500000006074412672604527024031 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm_test import ( "fmt" "os" "path/filepath" "sort" "strings" "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" ) type bundleDataSuite struct { testing.IsolationSuite } var _ = gc.Suite(&bundleDataSuite{}) const mediawikiBundle = ` series: precise services: mediawiki: charm: "cs:precise/mediawiki-10" num_units: 1 expose: true options: debug: false name: Please set name of wiki skin: vector annotations: "gui-x": 609 "gui-y": -15 storage: valid-store: 10G bindings: db: db website: public mysql: charm: "cs:precise/mysql-28" num_units: 2 to: [0, mediawiki/0] options: "binlog-format": MIXED "block-size": 5 "dataset-size": "80%" flavor: distro "ha-bindiface": eth0 "ha-mcastport": 5411 annotations: "gui-x": 610 "gui-y": 255 constraints: "mem=8g" bindings: db: db relations: - ["mediawiki:db", "mysql:db"] - ["mysql:foo", "mediawiki:bar"] machines: 0: constraints: 'arch=amd64 mem=4g' annotations: foo: bar tags: - super - awesome description: | Everything is awesome. Everything is cool when we work as a team. Lovely day. ` var parseTests = []struct { about string data string expectedBD *charm.BundleData expectedErr string }{{ about: "mediawiki", data: mediawikiBundle, expectedBD: &charm.BundleData{ Series: "precise", Services: map[string]*charm.ServiceSpec{ "mediawiki": { Charm: "cs:precise/mediawiki-10", NumUnits: 1, Expose: true, Options: map[string]interface{}{ "debug": false, "name": "Please set name of wiki", "skin": "vector", }, Annotations: map[string]string{ "gui-x": "609", "gui-y": "-15", }, Storage: map[string]string{ "valid-store": "10G", }, EndpointBindings: map[string]string{ "db": "db", "website": "public", }, }, "mysql": { Charm: "cs:precise/mysql-28", NumUnits: 2, To: []string{"0", "mediawiki/0"}, Options: map[string]interface{}{ "binlog-format": "MIXED", "block-size": 5, "dataset-size": "80%", "flavor": "distro", "ha-bindiface": "eth0", "ha-mcastport": 5411, }, Annotations: map[string]string{ "gui-x": "610", "gui-y": "255", }, Constraints: "mem=8g", EndpointBindings: map[string]string{ "db": "db", }, }, }, Machines: map[string]*charm.MachineSpec{ "0": { Constraints: "arch=amd64 mem=4g", Annotations: map[string]string{ "foo": "bar", }, }, }, Relations: [][]string{ {"mediawiki:db", "mysql:db"}, {"mysql:foo", "mediawiki:bar"}, }, Tags: []string{"super", "awesome"}, Description: `Everything is awesome. Everything is cool when we work as a team. Lovely day. `, }, }, { about: "relations specified with hyphens", data: ` relations: - - "mediawiki:db" - "mysql:db" - - "mysql:foo" - "mediawiki:bar" `, expectedBD: &charm.BundleData{ Relations: [][]string{ {"mediawiki:db", "mysql:db"}, {"mysql:foo", "mediawiki:bar"}, }, }, }} func (*bundleDataSuite) TestParse(c *gc.C) { for i, test := range parseTests { c.Logf("test %d: %s", i, test.about) bd, err := charm.ReadBundleData(strings.NewReader(test.data)) if test.expectedErr != "" { c.Assert(err, gc.ErrorMatches, test.expectedErr) continue } c.Assert(err, gc.IsNil) c.Assert(bd, jc.DeepEquals, test.expectedBD) } } var verifyErrorsTests = []struct { about string data string errors []string }{{ about: "as many errors as possible", data: ` series: "9wrong" machines: 0: constraints: 'bad constraints' annotations: foo: bar series: 'bad series' bogus: 3: services: mediawiki: charm: "bogus:precise/mediawiki-10" num_units: -4 options: debug: false name: Please set name of wiki skin: vector annotations: "gui-x": 609 "gui-y": -15 riak: charm: "./somepath" mysql: charm: "cs:precise/mysql-28" num_units: 2 to: [0, mediawiki/0, nowhere/3, 2, "bad placement"] options: "binlog-format": MIXED "block-size": 5 "dataset-size": "80%" flavor: distro "ha-bindiface": eth0 "ha-mcastport": 5411 annotations: "gui-x": 610 "gui-y": 255 constraints: "bad constraints" wordpress: charm: wordpress ceph: charm: ceph storage: valid-storage: 3,10G no_underscores: 123 ceph-osd: charm: ceph-osd storage: invalid-storage: "bad storage constraints" relations: - ["mediawiki:db", "mysql:db"] - ["mysql:foo", "mediawiki:bar"] - ["arble:bar"] - ["arble:bar", "mediawiki:db"] - ["mysql:foo", "mysql:bar"] - ["mysql:db", "mediawiki:db"] - ["mediawiki/db", "mysql:db"] - ["wordpress", "mysql"] `, errors: []string{ `bundle declares an invalid series "9wrong"`, `invalid storage name "no_underscores" in service "ceph"`, `invalid storage "invalid-storage" in service "ceph-osd": bad storage constraint`, `machine "3" is not referred to by a placement directive`, `machine "bogus" is not referred to by a placement directive`, `invalid machine id "bogus" found in machines`, `invalid constraints "bad constraints" in machine "0": bad constraint`, `invalid charm URL in service "mediawiki": charm or bundle URL has invalid schema: "bogus:precise/mediawiki-10"`, `charm path in service "riak" does not exist: internal/test-charm-repo/bundle/somepath`, `invalid constraints "bad constraints" in service "mysql": bad constraint`, `negative number of units specified on service "mediawiki"`, `too many units specified in unit placement for service "mysql"`, `placement "nowhere/3" refers to a service not defined in this bundle`, `placement "mediawiki/0" specifies a unit greater than the -4 unit(s) started by the target service`, `placement "2" refers to a machine not defined in this bundle`, `relation ["arble:bar"] has 1 endpoint(s), not 2`, `relation ["arble:bar" "mediawiki:db"] refers to service "arble" not defined in this bundle`, `relation ["mysql:foo" "mysql:bar"] relates a service to itself`, `relation ["mysql:db" "mediawiki:db"] is defined more than once`, `invalid placement syntax "bad placement"`, `invalid relation syntax "mediawiki/db"`, `invalid series bad series for machine "0"`, }, }, { about: "mediawiki should be ok", data: mediawikiBundle, }} func (*bundleDataSuite) TestVerifyErrors(c *gc.C) { for i, test := range verifyErrorsTests { c.Logf("test %d: %s", i, test.about) assertVerifyErrors(c, test.data, nil, test.errors) } } func assertVerifyErrors(c *gc.C, bundleData string, charms map[string]charm.Charm, expectErrors []string) { bd, err := charm.ReadBundleData(strings.NewReader(bundleData)) c.Assert(err, gc.IsNil) validateConstraints := func(c string) error { if c == "bad constraints" { return fmt.Errorf("bad constraint") } return nil } validateStorage := func(c string) error { if c == "bad storage constraints" { return fmt.Errorf("bad storage constraint") } return nil } if charms != nil { err = bd.VerifyWithCharms(validateConstraints, validateStorage, charms) } else { err = bd.VerifyLocal("internal/test-charm-repo/bundle", validateConstraints, validateStorage) } if len(expectErrors) == 0 { if err == nil { return } // Let the rest of the function deal with the // error, so that we'll see the actual errors // that resulted. } c.Assert(err, gc.FitsTypeOf, (*charm.VerificationError)(nil)) errors := err.(*charm.VerificationError).Errors errStrings := make([]string, len(errors)) for i, err := range errors { errStrings[i] = err.Error() } sort.Strings(errStrings) sort.Strings(expectErrors) c.Assert(errStrings, jc.DeepEquals, expectErrors) } func (*bundleDataSuite) TestVerifyCharmURL(c *gc.C) { bd, err := charm.ReadBundleData(strings.NewReader(mediawikiBundle)) c.Assert(err, gc.IsNil) for i, u := range []string{ "wordpress", "cs:wordpress", "cs:precise/wordpress", "precise/wordpress", "precise/wordpress-2", "local:foo", "local:foo-45", } { c.Logf("test %d: %s", i, u) bd.Services["mediawiki"].Charm = u err := bd.Verify(nil, nil) c.Assert(err, gc.IsNil, gc.Commentf("charm url %q", u)) } } func (*bundleDataSuite) TestVerifyLocalCharm(c *gc.C) { bd, err := charm.ReadBundleData(strings.NewReader(mediawikiBundle)) c.Assert(err, gc.IsNil) bundleDir := c.MkDir() relativeCharmDir := filepath.Join(bundleDir, "charm") err = os.MkdirAll(relativeCharmDir, 0700) c.Assert(err, jc.ErrorIsNil) for i, u := range []string{ "wordpress", "cs:wordpress", "cs:precise/wordpress", "precise/wordpress", "precise/wordpress-2", "local:foo", "local:foo-45", c.MkDir(), "./charm", } { c.Logf("test %d: %s", i, u) bd.Services["mediawiki"].Charm = u err := bd.VerifyLocal(bundleDir, nil, nil) c.Assert(err, gc.IsNil, gc.Commentf("charm url %q", u)) } } func (s *bundleDataSuite) TestVerifyBundleUsingJujuInfoRelation(c *gc.C) { err := s.testPrepareAndMutateBeforeVerifyWithCharms(c, nil) c.Assert(err, gc.IsNil) } func (s *bundleDataSuite) testPrepareAndMutateBeforeVerifyWithCharms(c *gc.C, mutator func(bd *charm.BundleData)) error { b := readBundleDir(c, "wordpress-with-logging") bd := b.Data() charms := map[string]charm.Charm{ "wordpress": readCharmDir(c, "wordpress"), "mysql": readCharmDir(c, "mysql"), "logging": readCharmDir(c, "logging"), } if mutator != nil { mutator(bd) } return bd.VerifyWithCharms(nil, nil, charms) } func (s *bundleDataSuite) TestVerifyBundleWithUnknownEndpointBindingGiven(c *gc.C) { err := s.testPrepareAndMutateBeforeVerifyWithCharms(c, func(bd *charm.BundleData) { bd.Services["wordpress"].EndpointBindings["foo"] = "bar" }) c.Assert(err, gc.ErrorMatches, `service "wordpress" wants to bind endpoint "foo" to space "bar", `+ `but the endpoint is not defined by the charm`, ) } func (s *bundleDataSuite) TestVerifyBundleWithExtraBindingsSuccess(c *gc.C) { err := s.testPrepareAndMutateBeforeVerifyWithCharms(c, func(bd *charm.BundleData) { // Both of these are specified in extra-bindings. bd.Services["wordpress"].EndpointBindings["admin-api"] = "internal" bd.Services["wordpress"].EndpointBindings["foo-bar"] = "test" }) c.Assert(err, gc.IsNil) } func (s *bundleDataSuite) TestVerifyBundleWithRelationNameBindingSuccess(c *gc.C) { err := s.testPrepareAndMutateBeforeVerifyWithCharms(c, func(bd *charm.BundleData) { // Both of these are specified in as relations. bd.Services["wordpress"].EndpointBindings["cache"] = "foo" bd.Services["wordpress"].EndpointBindings["monitoring-port"] = "bar" }) c.Assert(err, gc.IsNil) } func (*bundleDataSuite) TestRequiredCharms(c *gc.C) { bd, err := charm.ReadBundleData(strings.NewReader(mediawikiBundle)) c.Assert(err, gc.IsNil) reqCharms := bd.RequiredCharms() c.Assert(reqCharms, gc.DeepEquals, []string{"cs:precise/mediawiki-10", "cs:precise/mysql-28"}) } // testCharm returns a charm with the given name // and relations. The relations are specified as // a string of the form: // // | // // Within each section, each white-space separated // relation is specified as: /// : // // So, for example: // // testCharm("wordpress", "web:http | db:mysql") // // is equivalent to a charm with metadata.yaml containing // // name: wordpress // description: wordpress // provides: // web: // interface: http // requires: // db: // interface: mysql // // If the charm name has a "-sub" suffix, the // returned charm will have Meta.Subordinate = true. // func testCharm(name string, relations string) charm.Charm { var provides, requires string parts := strings.Split(relations, "|") provides = parts[0] if len(parts) > 1 { requires = parts[1] } meta := &charm.Meta{ Name: name, Summary: name, Description: name, Provides: parseRelations(provides, charm.RoleProvider), Requires: parseRelations(requires, charm.RoleRequirer), } if strings.HasSuffix(name, "-sub") { meta.Subordinate = true } configStr := ` options: title: {default: My Title, description: title, type: string} skill-level: {description: skill, type: int} ` config, err := charm.ReadConfig(strings.NewReader(configStr)) if err != nil { panic(err) } return testCharmImpl{ meta: meta, config: config, } } func parseRelations(s string, role charm.RelationRole) map[string]charm.Relation { rels := make(map[string]charm.Relation) for _, r := range strings.Fields(s) { parts := strings.Split(r, ":") if len(parts) != 2 { panic(fmt.Errorf("invalid relation specifier %q", r)) } name, interf := parts[0], parts[1] rels[name] = charm.Relation{ Name: name, Role: role, Interface: interf, Scope: charm.ScopeGlobal, } } return rels } type testCharmImpl struct { meta *charm.Meta config *charm.Config // Implement charm.Charm, but panic if anything other than // Meta or Config methods are called. charm.Charm } func (c testCharmImpl) Meta() *charm.Meta { return c.meta } func (c testCharmImpl) Config() *charm.Config { return c.config } var verifyWithCharmsErrorsTests = []struct { about string data string charms map[string]charm.Charm errors []string }{{ about: "no charms", data: mediawikiBundle, charms: map[string]charm.Charm{}, errors: []string{ `service "mediawiki" refers to non-existent charm "cs:precise/mediawiki-10"`, `service "mysql" refers to non-existent charm "cs:precise/mysql-28"`, }, }, { about: "all present and correct", data: ` services: service1: charm: "test" service2: charm: "test" service3: charm: "test" relations: - ["service1:prova", "service2:reqa"] - ["service1:reqa", "service3:prova"] - ["service3:provb", "service2:reqb"] `, charms: map[string]charm.Charm{ "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), }, }, { about: "undefined relations", data: ` services: service1: charm: "test" service2: charm: "test" relations: - ["service1:prova", "service2:blah"] - ["service1:blah", "service2:prova"] `, charms: map[string]charm.Charm{ "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), }, errors: []string{ `charm "test" used by service "service1" does not define relation "blah"`, `charm "test" used by service "service2" does not define relation "blah"`, }, }, { about: "undefined services", data: ` services: service1: charm: "test" service2: charm: "test" relations: - ["unknown:prova", "service2:blah"] - ["service1:blah", "unknown:prova"] `, charms: map[string]charm.Charm{ "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), }, errors: []string{ `relation ["service1:blah" "unknown:prova"] refers to service "unknown" not defined in this bundle`, `relation ["unknown:prova" "service2:blah"] refers to service "unknown" not defined in this bundle`, }, }, { about: "equal services", data: ` services: service1: charm: "test" service2: charm: "test" relations: - ["service2:prova", "service2:reqa"] `, charms: map[string]charm.Charm{ "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), }, errors: []string{ `relation ["service2:prova" "service2:reqa"] relates a service to itself`, }, }, { about: "provider to provider relation", data: ` services: service1: charm: "test" service2: charm: "test" relations: - ["service1:prova", "service2:prova"] `, charms: map[string]charm.Charm{ "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), }, errors: []string{ `relation "service1:prova" to "service2:prova" relates provider to provider`, }, }, { about: "provider to provider relation", data: ` services: service1: charm: "test" service2: charm: "test" relations: - ["service1:reqa", "service2:reqa"] `, charms: map[string]charm.Charm{ "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), }, errors: []string{ `relation "service1:reqa" to "service2:reqa" relates requirer to requirer`, }, }, { about: "interface mismatch", data: ` services: service1: charm: "test" service2: charm: "test" relations: - ["service1:reqa", "service2:provb"] `, charms: map[string]charm.Charm{ "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), }, errors: []string{ `mismatched interface between "service2:provb" and "service1:reqa" ("b" vs "a")`, }, }, { about: "different charms", data: ` services: service1: charm: "test1" service2: charm: "test2" relations: - ["service1:reqa", "service2:prova"] `, charms: map[string]charm.Charm{ "test1": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), "test2": testCharm("test", ""), }, errors: []string{ `charm "test2" used by service "service2" does not define relation "prova"`, }, }, { about: "ambiguous relation", data: ` services: service1: charm: "test1" service2: charm: "test2" relations: - [service1, service2] `, charms: map[string]charm.Charm{ "test1": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), "test2": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), }, errors: []string{ `cannot infer endpoint between service1 and service2: ambiguous relation: service1 service2 could refer to "service1:prova service2:reqa"; "service1:provb service2:reqb"; "service1:reqa service2:prova"; "service1:reqb service2:provb"`, }, }, { about: "relation using juju-info", data: ` services: service1: charm: "provider" service2: charm: "requirer" relations: - [service1, service2] `, charms: map[string]charm.Charm{ "provider": testCharm("provider", ""), "requirer": testCharm("requirer", "| req:juju-info"), }, }, { about: "ambiguous when implicit relations taken into account", data: ` services: service1: charm: "provider" service2: charm: "requirer" relations: - [service1, service2] `, charms: map[string]charm.Charm{ "provider": testCharm("provider", "provdb:db | "), "requirer": testCharm("requirer", "| reqdb:db reqinfo:juju-info"), }, }, { about: "half of relation left open", data: ` services: service1: charm: "provider" service2: charm: "requirer" relations: - ["service1:prova2", service2] `, charms: map[string]charm.Charm{ "provider": testCharm("provider", "prova1:a prova2:a | "), "requirer": testCharm("requirer", "| reqa:a"), }, }, { about: "duplicate relation between open and fully-specified relations", data: ` services: service1: charm: "provider" service2: charm: "requirer" relations: - ["service1:prova", "service2:reqa"] - ["service1", "service2"] `, charms: map[string]charm.Charm{ "provider": testCharm("provider", "prova:a | "), "requirer": testCharm("requirer", "| reqa:a"), }, errors: []string{ `relation ["service1" "service2"] is defined more than once`, }, }, { about: "configuration options specified", data: ` services: service1: charm: "test" options: title: "some title" skill-level: 245 service2: charm: "test" options: title: "another title" `, charms: map[string]charm.Charm{ "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), }, }, { about: "invalid type for option", data: ` services: service1: charm: "test" options: title: "some title" skill-level: "too much" service2: charm: "test" options: title: "another title" `, charms: map[string]charm.Charm{ "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), }, errors: []string{ `cannot validate service "service1": option "skill-level" expected int, got "too much"`, }, }, { about: "unknown option", data: ` services: service1: charm: "test" options: title: "some title" unknown-option: 2345 `, charms: map[string]charm.Charm{ "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), }, errors: []string{ `cannot validate service "service1": configuration option "unknown-option" not found in charm "test"`, }, }, { about: "multiple config problems", data: ` services: service1: charm: "test" options: title: "some title" unknown-option: 2345 service2: charm: "test" options: title: 123 another-unknown: 2345 `, charms: map[string]charm.Charm{ "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), }, errors: []string{ `cannot validate service "service1": configuration option "unknown-option" not found in charm "test"`, `cannot validate service "service2": configuration option "another-unknown" not found in charm "test"`, `cannot validate service "service2": option "title" expected string, got 123`, }, }, { about: "subordinate charm with more than zero units", data: ` services: testsub: charm: "testsub" num_units: 1 `, charms: map[string]charm.Charm{ "testsub": testCharm("test-sub", ""), }, errors: []string{ `service "testsub" is subordinate but has non-zero num_units`, }, }, { about: "subordinate charm with more than one unit", data: ` services: testsub: charm: "testsub" num_units: 1 `, charms: map[string]charm.Charm{ "testsub": testCharm("test-sub", ""), }, errors: []string{ `service "testsub" is subordinate but has non-zero num_units`, }, }, { about: "subordinate charm with to-clause", data: ` services: testsub: charm: "testsub" to: [0] machines: 0: `, charms: map[string]charm.Charm{ "testsub": testCharm("test-sub", ""), }, errors: []string{ `service "testsub" is subordinate but specifies unit placement`, `too many units specified in unit placement for service "testsub"`, }, }, { about: "charm with unspecified units and more than one to: entry", data: ` services: test: charm: "test" to: [0, 1] machines: 0: 1: `, errors: []string{ `too many units specified in unit placement for service "test"`, }, }} func (*bundleDataSuite) TestVerifyWithCharmsErrors(c *gc.C) { for i, test := range verifyWithCharmsErrorsTests { c.Logf("test %d: %s", i, test.about) assertVerifyErrors(c, test.data, test.charms, test.errors) } } var parsePlacementTests = []struct { placement string expect *charm.UnitPlacement expectErr string }{{ placement: "lxc:service/0", expect: &charm.UnitPlacement{ ContainerType: "lxc", Service: "service", Unit: 0, }, }, { placement: "lxc:service", expect: &charm.UnitPlacement{ ContainerType: "lxc", Service: "service", Unit: -1, }, }, { placement: "lxc:99", expect: &charm.UnitPlacement{ ContainerType: "lxc", Machine: "99", Unit: -1, }, }, { placement: "lxc:new", expect: &charm.UnitPlacement{ ContainerType: "lxc", Machine: "new", Unit: -1, }, }, { placement: "service/0", expect: &charm.UnitPlacement{ Service: "service", Unit: 0, }, }, { placement: "service", expect: &charm.UnitPlacement{ Service: "service", Unit: -1, }, }, { placement: "service45", expect: &charm.UnitPlacement{ Service: "service45", Unit: -1, }, }, { placement: "99", expect: &charm.UnitPlacement{ Machine: "99", Unit: -1, }, }, { placement: "new", expect: &charm.UnitPlacement{ Machine: "new", Unit: -1, }, }, { placement: ":0", expectErr: `invalid placement syntax ":0"`, }, { placement: "05", expectErr: `invalid placement syntax "05"`, }, { placement: "new/2", expectErr: `invalid placement syntax "new/2"`, }} func (*bundleDataSuite) TestParsePlacement(c *gc.C) { for i, test := range parsePlacementTests { c.Logf("test %d: %q", i, test.placement) up, err := charm.ParsePlacement(test.placement) if test.expectErr != "" { c.Assert(err, gc.ErrorMatches, test.expectErr) } else { c.Assert(err, gc.IsNil) c.Assert(up, jc.DeepEquals, test.expect) } } } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/bundledir_test.go0000664000175000017500000000301112672604527023656 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm_test import ( "os" "path/filepath" "github.com/juju/testing" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" ) type BundleDirSuite struct { testing.IsolationSuite } var _ = gc.Suite(&BundleDirSuite{}) func (s *BundleDirSuite) TestReadBundleDir(c *gc.C) { path := bundleDirPath(c, "wordpress-simple") dir, err := charm.ReadBundleDir(path) c.Assert(err, gc.IsNil) checkWordpressBundle(c, dir, path) } func (s *BundleDirSuite) TestReadBundleDirWithoutREADME(c *gc.C) { path := cloneDir(c, bundleDirPath(c, "wordpress-simple")) err := os.Remove(filepath.Join(path, "README.md")) c.Assert(err, gc.IsNil) dir, err := charm.ReadBundleDir(path) c.Assert(err, gc.ErrorMatches, "cannot read README file: .*") c.Assert(dir, gc.IsNil) } func (s *BundleDirSuite) TestArchiveTo(c *gc.C) { baseDir := c.MkDir() charmDir := cloneDir(c, bundleDirPath(c, "wordpress-simple")) s.assertArchiveTo(c, baseDir, charmDir) } func (s *BundleDirSuite) assertArchiveTo(c *gc.C, baseDir, bundleDir string) { dir, err := charm.ReadBundleDir(bundleDir) c.Assert(err, gc.IsNil) path := filepath.Join(baseDir, "archive.bundle") file, err := os.Create(path) c.Assert(err, gc.IsNil) err = dir.ArchiveTo(file) file.Close() c.Assert(err, gc.IsNil) archive, err := charm.ReadBundleArchive(path) c.Assert(err, gc.IsNil) c.Assert(archive.ReadMe(), gc.Equals, dir.ReadMe()) c.Assert(archive.Data(), gc.DeepEquals, dir.Data()) } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/resources_test.go0000664000175000017500000000453312672604527023732 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm_test import ( jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" ) var _ = gc.Suite(&resourceSuite{}) type resourceSuite struct{} func (s *resourceSuite) TestSchemaOkay(c *gc.C) { raw := map[interface{}]interface{}{ "type": "file", "filename": "filename.tgz", "description": "One line that is useful when operators need to push it.", } v, err := charm.ResourceSchema.Coerce(raw, nil) c.Assert(err, jc.ErrorIsNil) c.Check(v, jc.DeepEquals, map[string]interface{}{ "type": "file", "filename": "filename.tgz", "description": "One line that is useful when operators need to push it.", }) } func (s *resourceSuite) TestSchemaMissingType(c *gc.C) { raw := map[interface{}]interface{}{ "filename": "filename.tgz", "description": "One line that is useful when operators need to push it.", } v, err := charm.ResourceSchema.Coerce(raw, nil) c.Assert(err, jc.ErrorIsNil) c.Check(v, jc.DeepEquals, map[string]interface{}{ "type": "file", "filename": "filename.tgz", "description": "One line that is useful when operators need to push it.", }) } func (s *resourceSuite) TestSchemaUnknownType(c *gc.C) { raw := map[interface{}]interface{}{ "type": "repo", "filename": "juju", "description": "One line that is useful when operators need to push it.", } v, err := charm.ResourceSchema.Coerce(raw, nil) c.Assert(err, jc.ErrorIsNil) c.Check(v, jc.DeepEquals, map[string]interface{}{ "type": "repo", "filename": "juju", "description": "One line that is useful when operators need to push it.", }) } func (s *resourceSuite) TestSchemaMissingPath(c *gc.C) { raw := map[interface{}]interface{}{ "type": "file", "description": "One line that is useful when operators need to push it.", } _, err := charm.ResourceSchema.Coerce(raw, nil) c.Check(err, gc.NotNil) } func (s *resourceSuite) TestSchemaMissingComment(c *gc.C) { raw := map[interface{}]interface{}{ "type": "file", "filename": "filename.tgz", } v, err := charm.ResourceSchema.Coerce(raw, nil) c.Assert(err, jc.ErrorIsNil) c.Check(v, jc.DeepEquals, map[string]interface{}{ "type": "file", "filename": "filename.tgz", "description": "", }) } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/HACKING.md0000664000175000017500000000060212672604527021711 0ustar marcomarco# HACKING See README for information about gopkg.in ## Developing If you are to develop on a versioned branch, use gopkg.in. go get -u -v -t gopkg.in/juju/charm.v2/... gopkg.in names the local branch master. To submit a pull request, push to your github branch using a refspec which reflects the version tag you are using. git push git@github.com:jrwren/charm +master:v2 charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/README.md0000664000175000017500000000053212672604527021604 0ustar marcomarcoJuju charms =========== This package parses juju charms. ## Versions Stable versions of this API are available on gopkg.in at gopkg.in/juju/charm.vD where D is a version spec. If you are viewing this readme on github.com you can click the 'branch:' button above to view tags and branches. See http://labix.org/gopkg.in for more information. charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/extra_bindings_test.go0000664000175000017500000000405012672604527024712 0ustar marcomarco// Copyright 2016 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm_test import ( jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" ) var _ = gc.Suite(&extraBindingsSuite{}) type extraBindingsSuite struct { riakMeta charm.Meta } func (s *extraBindingsSuite) SetUpTest(c *gc.C) { riakMeta, err := charm.ReadMeta(repoMeta(c, "riak")) c.Assert(err, jc.ErrorIsNil) s.riakMeta = *riakMeta } func (s *extraBindingsSuite) TestSchemaOkay(c *gc.C) { raw := map[interface{}]interface{}{ "foo": nil, "bar": nil, } v, err := charm.ExtraBindingsSchema.Coerce(raw, nil) c.Assert(err, jc.ErrorIsNil) c.Check(v, jc.DeepEquals, map[interface{}]interface{}{ "foo": nil, "bar": nil, }) } func (s *extraBindingsSuite) TestValidateWithEmptyNonNilMap(c *gc.C) { s.riakMeta.ExtraBindings = map[string]charm.ExtraBinding{} err := charm.ValidateMetaExtraBindings(s.riakMeta) c.Assert(err, gc.ErrorMatches, "extra bindings cannot be empty when specified") } func (s *extraBindingsSuite) TestValidateWithEmptyName(c *gc.C) { s.riakMeta.ExtraBindings = map[string]charm.ExtraBinding{ "": charm.ExtraBinding{Name: ""}, } err := charm.ValidateMetaExtraBindings(s.riakMeta) c.Assert(err, gc.ErrorMatches, "missing binding name") } func (s *extraBindingsSuite) TestValidateWithMismatchedName(c *gc.C) { s.riakMeta.ExtraBindings = map[string]charm.ExtraBinding{ "bar": charm.ExtraBinding{Name: "foo"}, } err := charm.ValidateMetaExtraBindings(s.riakMeta) c.Assert(err, gc.ErrorMatches, `mismatched extra binding name: got "foo", expected "bar"`) } func (s *extraBindingsSuite) TestValidateWithRelationNamesMatchingExtraBindings(c *gc.C) { s.riakMeta.ExtraBindings = map[string]charm.ExtraBinding{ "admin": charm.ExtraBinding{Name: "admin"}, "ring": charm.ExtraBinding{Name: "ring"}, "foo": charm.ExtraBinding{Name: "foo"}, } err := charm.ValidateMetaExtraBindings(s.riakMeta) c.Assert(err, gc.ErrorMatches, `relation names \(admin, ring\) cannot be used in extra bindings`) } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/config.go0000664000175000017500000001616112672604527022126 0ustar marcomarco// Copyright 2011, 2012, 2013 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm import ( "fmt" "io" "io/ioutil" "strconv" "github.com/juju/schema" "gopkg.in/yaml.v1" ) // Settings is a group of charm config option names and values. A Settings // S is considered valid by the Config C if every key in S is an option in // C, and every value either has the correct type or is nil. type Settings map[string]interface{} // Option represents a single charm config option. type Option struct { Type string `yaml:"type"` Description string `yaml:"description,omitempty"` Default interface{} `yaml:"default,omitempty"` } // error replaces any supplied non-nil error with a new error describing a // validation failure for the supplied value. func (option Option) error(err *error, name string, value interface{}) { if *err != nil { *err = fmt.Errorf("option %q expected %s, got %#v", name, option.Type, value) } } // validate returns an appropriately-typed value for the supplied value, or // returns an error if it cannot be converted to the correct type. Nil values // are always considered valid. func (option Option) validate(name string, value interface{}) (_ interface{}, err error) { if value == nil { return nil, nil } defer option.error(&err, name, value) if checker := optionTypeCheckers[option.Type]; checker != nil { if value, err = checker.Coerce(value, nil); err != nil { return nil, err } return value, nil } panic(fmt.Errorf("option %q has unknown type %q", name, option.Type)) } var optionTypeCheckers = map[string]schema.Checker{ "string": schema.String(), "int": schema.Int(), "float": schema.Float(), "boolean": schema.Bool(), } // parse returns an appropriately-typed value for the supplied string, or // returns an error if it cannot be parsed to the correct type. func (option Option) parse(name, str string) (_ interface{}, err error) { defer option.error(&err, name, str) switch option.Type { case "string": return str, nil case "int": return strconv.ParseInt(str, 10, 64) case "float": return strconv.ParseFloat(str, 64) case "boolean": return strconv.ParseBool(str) } panic(fmt.Errorf("option %q has unknown type %q", name, option.Type)) } // Config represents the supported configuration options for a charm, // as declared in its config.yaml file. type Config struct { Options map[string]Option } // NewConfig returns a new Config without any options. func NewConfig() *Config { return &Config{map[string]Option{}} } // ReadConfig reads a Config in YAML format. func ReadConfig(r io.Reader) (*Config, error) { data, err := ioutil.ReadAll(r) if err != nil { return nil, err } var config *Config if err := yaml.Unmarshal(data, &config); err != nil { return nil, err } if config == nil { return nil, fmt.Errorf("invalid config: empty configuration") } if config.Options == nil { // We are allowed an empty configuration if the options // field is explicitly specified, but there is no easy way // to tell if it was specified or not without unmarshaling // into interface{} and explicitly checking the field. var configInterface interface{} if err := yaml.Unmarshal(data, &configInterface); err != nil { return nil, err } m, _ := configInterface.(map[interface{}]interface{}) if _, ok := m["options"]; !ok { return nil, fmt.Errorf("invalid config: empty configuration") } } for name, option := range config.Options { switch option.Type { case "string", "int", "float", "boolean": case "": // Missing type is valid in python. option.Type = "string" default: return nil, fmt.Errorf("invalid config: option %q has unknown type %q", name, option.Type) } def := option.Default if def == "" && option.Type == "string" { // Skip normal validation for compatibility with pyjuju. } else if option.Default, err = option.validate(name, def); err != nil { option.error(&err, name, def) return nil, fmt.Errorf("invalid config default: %v", err) } config.Options[name] = option } return config, nil } // option returns the named option from the config, or an error if none // such exists. func (c *Config) option(name string) (Option, error) { if option, ok := c.Options[name]; ok { return option, nil } return Option{}, fmt.Errorf("unknown option %q", name) } // DefaultSettings returns settings containing the default value of every // option in the config. Default values may be nil. func (c *Config) DefaultSettings() Settings { out := make(Settings) for name, option := range c.Options { out[name] = option.Default } return out } // ValidateSettings returns a copy of the supplied settings with a consistent type // for each value. It returns an error if the settings contain unknown keys // or invalid values. func (c *Config) ValidateSettings(settings Settings) (Settings, error) { out := make(Settings) for name, value := range settings { if option, err := c.option(name); err != nil { return nil, err } else if value, err = option.validate(name, value); err != nil { return nil, err } out[name] = value } return out, nil } // FilterSettings returns the subset of the supplied settings that are valid. func (c *Config) FilterSettings(settings Settings) Settings { out := make(Settings) for name, value := range settings { if option, err := c.option(name); err == nil { if value, err := option.validate(name, value); err == nil { out[name] = value } } } return out } // ParseSettingsStrings returns settings derived from the supplied map. Every // value in the map must be parseable to the correct type for the option // identified by its key. Empty values are interpreted as nil. func (c *Config) ParseSettingsStrings(values map[string]string) (Settings, error) { out := make(Settings) for name, str := range values { option, err := c.option(name) if err != nil { return nil, err } value, err := option.parse(name, str) if err != nil { return nil, err } out[name] = value } return out, nil } // ParseSettingsYAML returns settings derived from the supplied YAML data. The // YAML must unmarshal to a map of strings to settings data; the supplied key // must be present in the map, and must point to a map in which every value // must have, or be a string parseable to, the correct type for the associated // config option. Empty strings and nil values are both interpreted as nil. func (c *Config) ParseSettingsYAML(yamlData []byte, key string) (Settings, error) { var allSettings map[string]Settings if err := yaml.Unmarshal(yamlData, &allSettings); err != nil { return nil, fmt.Errorf("cannot parse settings data: %v", err) } settings, ok := allSettings[key] if !ok { return nil, fmt.Errorf("no settings found for %q", key) } out := make(Settings) for name, value := range settings { option, err := c.option(name) if err != nil { return nil, err } // Accept string values for compatibility with python. if str, ok := value.(string); ok { if value, err = option.parse(name, str); err != nil { return nil, err } } else if value, err = option.validate(name, value); err != nil { return nil, err } out[name] = value } return out, nil } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/meta_test.go0000664000175000017500000007606712672604527022661 0ustar marcomarco// Copyright 2011, 2012, 2013 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm_test import ( "bytes" "fmt" "io" "io/ioutil" "os" "path/filepath" "strings" jc "github.com/juju/testing/checkers" "github.com/juju/version" gc "gopkg.in/check.v1" "gopkg.in/yaml.v1" yamlv2 "gopkg.in/yaml.v2" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charm.v6-unstable/resource" ) func repoMeta(c *gc.C, name string) io.Reader { charmDir := charmDirPath(c, name) file, err := os.Open(filepath.Join(charmDir, "metadata.yaml")) c.Assert(err, gc.IsNil) defer file.Close() data, err := ioutil.ReadAll(file) c.Assert(err, gc.IsNil) return bytes.NewReader(data) } type MetaSuite struct{} var _ = gc.Suite(&MetaSuite{}) func (s *MetaSuite) TestReadMetaVersion1(c *gc.C) { meta, err := charm.ReadMeta(repoMeta(c, "dummy")) c.Assert(err, gc.IsNil) c.Assert(meta.Name, gc.Equals, "dummy") c.Assert(meta.Summary, gc.Equals, "That's a dummy charm.") c.Assert(meta.Description, gc.Equals, "This is a longer description which\npotentially contains multiple lines.\n") c.Assert(meta.Format, gc.Equals, 1) c.Assert(meta.OldRevision, gc.Equals, 0) c.Assert(meta.Subordinate, gc.Equals, false) } func (s *MetaSuite) TestReadMetaVersion2(c *gc.C) { meta, err := charm.ReadMeta(repoMeta(c, "format2")) c.Assert(err, gc.IsNil) c.Assert(meta.Name, gc.Equals, "format2") c.Assert(meta.Format, gc.Equals, 2) c.Assert(meta.Categories, gc.HasLen, 0) c.Assert(meta.Terms, gc.HasLen, 0) } func (s *MetaSuite) TestCheckTerms(c *gc.C) { tests := []struct { about string terms []string expectError string }{{ about: "valid terms", terms: []string{"term/1", "term/2"}, }, { about: "missing revision number", terms: []string{"term/1", "term"}, expectError: "invalid term name \"term\": must match.*", }, { about: "revision not a number", terms: []string{"term/1", "term/a"}, expectError: "invalid term name \"term/a\": must match.*", }, { about: "wrong format", terms: []string{"term/1", "term/a/1"}, expectError: "invalid term name \"term/a/1\": must match.*", }, { about: "term may not contain spaces", terms: []string{"term/1", "term about a term"}, expectError: "invalid term name \"term about a term\": must match.*", }, { about: "term name must start with lowercase letter", terms: []string{"Term/1"}, expectError: `invalid term name "Term/1": must match.*`, }, { about: "term name match the regexp", terms: []string{"term_123-23aAf/1"}, expectError: "invalid term name \"term_123-23aAf/1\": must match.*", }, } for i, test := range tests { c.Logf("running test %v: %v", i, test.about) meta := charm.Meta{Terms: test.terms} err := meta.Check() if test.expectError == "" { c.Assert(err, jc.ErrorIsNil) } else { c.Assert(err, gc.ErrorMatches, test.expectError) } } } func (s *MetaSuite) TestReadCategory(c *gc.C) { meta, err := charm.ReadMeta(repoMeta(c, "category")) c.Assert(err, gc.IsNil) c.Assert(meta.Categories, jc.DeepEquals, []string{"database"}) } func (s *MetaSuite) TestReadTerms(c *gc.C) { meta, err := charm.ReadMeta(repoMeta(c, "terms")) c.Assert(err, gc.IsNil) c.Assert(meta.Terms, jc.DeepEquals, []string{"term1", "term2"}) } func (s *MetaSuite) TestReadTags(c *gc.C) { meta, err := charm.ReadMeta(repoMeta(c, "category")) c.Assert(err, gc.IsNil) c.Assert(meta.Tags, jc.DeepEquals, []string{"openstack", "storage"}) } func (s *MetaSuite) TestSubordinate(c *gc.C) { meta, err := charm.ReadMeta(repoMeta(c, "logging")) c.Assert(err, gc.IsNil) c.Assert(meta.Subordinate, gc.Equals, true) } func (s *MetaSuite) TestSubordinateWithoutContainerRelation(c *gc.C) { r := repoMeta(c, "dummy") hackYaml := ReadYaml(r) hackYaml["subordinate"] = true _, err := charm.ReadMeta(hackYaml.Reader()) c.Assert(err, gc.ErrorMatches, "subordinate charm \"dummy\" lacks \"requires\" relation with container scope") } func (s *MetaSuite) TestScopeConstraint(c *gc.C) { meta, err := charm.ReadMeta(repoMeta(c, "logging")) c.Assert(err, gc.IsNil) c.Assert(meta.Provides["logging-client"].Scope, gc.Equals, charm.ScopeGlobal) c.Assert(meta.Requires["logging-directory"].Scope, gc.Equals, charm.ScopeContainer) c.Assert(meta.Subordinate, gc.Equals, true) } func (s *MetaSuite) TestParseMetaRelations(c *gc.C) { meta, err := charm.ReadMeta(repoMeta(c, "mysql")) c.Assert(err, gc.IsNil) c.Assert(meta.Provides["server"], gc.Equals, charm.Relation{ Name: "server", Role: charm.RoleProvider, Interface: "mysql", Scope: charm.ScopeGlobal, }) c.Assert(meta.Requires, gc.IsNil) c.Assert(meta.Peers, gc.IsNil) meta, err = charm.ReadMeta(repoMeta(c, "riak")) c.Assert(err, gc.IsNil) c.Assert(meta.Provides["endpoint"], gc.Equals, charm.Relation{ Name: "endpoint", Role: charm.RoleProvider, Interface: "http", Scope: charm.ScopeGlobal, }) c.Assert(meta.Provides["admin"], gc.Equals, charm.Relation{ Name: "admin", Role: charm.RoleProvider, Interface: "http", Scope: charm.ScopeGlobal, }) c.Assert(meta.Peers["ring"], gc.Equals, charm.Relation{ Name: "ring", Role: charm.RolePeer, Interface: "riak", Limit: 1, Scope: charm.ScopeGlobal, }) c.Assert(meta.Requires, gc.IsNil) meta, err = charm.ReadMeta(repoMeta(c, "terracotta")) c.Assert(err, gc.IsNil) c.Assert(meta.Provides["dso"], gc.Equals, charm.Relation{ Name: "dso", Role: charm.RoleProvider, Interface: "terracotta", Optional: true, Scope: charm.ScopeGlobal, }) c.Assert(meta.Peers["server-array"], gc.Equals, charm.Relation{ Name: "server-array", Role: charm.RolePeer, Interface: "terracotta-server", Limit: 1, Scope: charm.ScopeGlobal, }) c.Assert(meta.Requires, gc.IsNil) meta, err = charm.ReadMeta(repoMeta(c, "wordpress")) c.Assert(err, gc.IsNil) c.Assert(meta.Provides["url"], gc.Equals, charm.Relation{ Name: "url", Role: charm.RoleProvider, Interface: "http", Scope: charm.ScopeGlobal, }) c.Assert(meta.Requires["db"], gc.Equals, charm.Relation{ Name: "db", Role: charm.RoleRequirer, Interface: "mysql", Limit: 1, Scope: charm.ScopeGlobal, }) c.Assert(meta.Requires["cache"], gc.Equals, charm.Relation{ Name: "cache", Role: charm.RoleRequirer, Interface: "varnish", Limit: 2, Optional: true, Scope: charm.ScopeGlobal, }) c.Assert(meta.Peers, gc.IsNil) } func (s *MetaSuite) TestCombinedRelations(c *gc.C) { meta, err := charm.ReadMeta(repoMeta(c, "riak")) c.Assert(err, gc.IsNil) combinedRelations := meta.CombinedRelations() expectedLength := len(meta.Provides) + len(meta.Requires) + len(meta.Peers) c.Assert(combinedRelations, gc.HasLen, expectedLength) c.Assert(combinedRelations, jc.DeepEquals, map[string]charm.Relation{ "endpoint": { Name: "endpoint", Role: charm.RoleProvider, Interface: "http", Scope: charm.ScopeGlobal, }, "admin": { Name: "admin", Role: charm.RoleProvider, Interface: "http", Scope: charm.ScopeGlobal, }, "ring": { Name: "ring", Role: charm.RolePeer, Interface: "riak", Limit: 1, Scope: charm.ScopeGlobal, }, }) } var relationsConstraintsTests = []struct { rels string err string }{ { "provides:\n foo: ping\nrequires:\n foo: pong", `charm "a" using a duplicated relation name: "foo"`, }, { "requires:\n foo: ping\npeers:\n foo: pong", `charm "a" using a duplicated relation name: "foo"`, }, { "peers:\n foo: ping\nprovides:\n foo: pong", `charm "a" using a duplicated relation name: "foo"`, }, { "provides:\n juju: blob", `charm "a" using a reserved relation name: "juju"`, }, { "requires:\n juju: blob", `charm "a" using a reserved relation name: "juju"`, }, { "peers:\n juju: blob", `charm "a" using a reserved relation name: "juju"`, }, { "provides:\n juju-snap: blub", `charm "a" using a reserved relation name: "juju-snap"`, }, { "requires:\n juju-crackle: blub", `charm "a" using a reserved relation name: "juju-crackle"`, }, { "peers:\n juju-pop: blub", `charm "a" using a reserved relation name: "juju-pop"`, }, { "provides:\n innocuous: juju", `charm "a" relation "innocuous" using a reserved interface: "juju"`, }, { "peers:\n innocuous: juju", `charm "a" relation "innocuous" using a reserved interface: "juju"`, }, { "provides:\n innocuous: juju-snap", `charm "a" relation "innocuous" using a reserved interface: "juju-snap"`, }, { "peers:\n innocuous: juju-snap", `charm "a" relation "innocuous" using a reserved interface: "juju-snap"`, }, } func (s *MetaSuite) TestRelationsConstraints(c *gc.C) { check := func(s, e string) { meta, err := charm.ReadMeta(strings.NewReader(s)) if e != "" { c.Assert(err, gc.ErrorMatches, e) c.Assert(meta, gc.IsNil) } else { c.Assert(err, gc.IsNil) c.Assert(meta, gc.NotNil) } } prefix := "name: a\nsummary: b\ndescription: c\n" for i, t := range relationsConstraintsTests { c.Logf("test %d", i) check(prefix+t.rels, t.err) check(prefix+"subordinate: true\n"+t.rels, t.err) } // The juju-* namespace is accessible to container-scoped require // relations on subordinate charms. check(prefix+` subordinate: true requires: juju-info: interface: juju-info scope: container`, "") // The juju-* interfaces are allowed on any require relation. check(prefix+` requires: innocuous: juju-info`, "") } // dummyMetadata contains a minimally valid charm metadata.yaml // for testing valid and invalid series. const dummyMetadata = "name: a\nsummary: b\ndescription: c" func (s *MetaSuite) TestSeries(c *gc.C) { // series not specified meta, err := charm.ReadMeta(strings.NewReader(dummyMetadata)) c.Assert(err, gc.IsNil) c.Check(meta.Series, gc.HasLen, 0) charmMeta := fmt.Sprintf("%s\nseries:", dummyMetadata) for _, seriesName := range []string{"precise", "trusty", "plan9"} { charmMeta = fmt.Sprintf("%s\n - %s", charmMeta, seriesName) } meta, err = charm.ReadMeta(strings.NewReader(charmMeta)) c.Assert(err, gc.IsNil) c.Assert(meta.Series, gc.DeepEquals, []string{"precise", "trusty", "plan9"}) } func (s *MetaSuite) TestInvalidSeries(c *gc.C) { for _, seriesName := range []string{"pre-c1se", "pre^cise", "cp/m", "OpenVMS"} { _, err := charm.ReadMeta(strings.NewReader( fmt.Sprintf("%s\nseries:\n - %s\n", dummyMetadata, seriesName))) c.Assert(err, gc.NotNil) c.Check(err, gc.ErrorMatches, `charm "a" declares invalid series: .*`) } } func (s *MetaSuite) TestMinJujuVersion(c *gc.C) { // series not specified meta, err := charm.ReadMeta(strings.NewReader(dummyMetadata)) c.Assert(err, gc.IsNil) c.Check(meta.Series, gc.HasLen, 0) charmMeta := fmt.Sprintf("%s\nmin-juju-version: ", dummyMetadata) vals := []version.Number{ {Major: 1, Minor: 25}, {Major: 1, Minor: 25, Tag: "alpha1"}, {Major: 1, Minor: 25, Patch: 1}, } for _, ver := range vals { val := charmMeta + ver.String() meta, err = charm.ReadMeta(strings.NewReader(val)) c.Assert(err, gc.IsNil) c.Assert(meta.MinJujuVersion, gc.Equals, ver) } } func (s *MetaSuite) TestInvalidMinJujuVersion(c *gc.C) { _, err := charm.ReadMeta(strings.NewReader(dummyMetadata + "\nmin-juju-version: invalid-version")) c.Check(err, gc.ErrorMatches, `invalid min-juju-version: invalid version "invalid-version"`) } func (s *MetaSuite) TestNoMinJujuVersion(c *gc.C) { meta, err := charm.ReadMeta(strings.NewReader(dummyMetadata)) c.Assert(err, jc.ErrorIsNil) c.Check(meta.MinJujuVersion, gc.Equals, version.Zero) } func (s *MetaSuite) TestCheckMismatchedRelationName(c *gc.C) { // This Check case cannot be covered by the above // TestRelationsConstraints tests. meta := charm.Meta{ Name: "foo", Provides: map[string]charm.Relation{ "foo": { Name: "foo", Role: charm.RolePeer, Interface: "x", Limit: 1, Scope: charm.ScopeGlobal, }, }, } err := meta.Check() c.Assert(err, gc.ErrorMatches, `charm "foo" has mismatched role "peer"; expected "provider"`) } func (s *MetaSuite) TestCheckMismatchedRole(c *gc.C) { // This Check case cannot be covered by the above // TestRelationsConstraints tests. meta := charm.Meta{ Name: "foo", Provides: map[string]charm.Relation{ "foo": { Role: charm.RolePeer, Interface: "foo", Limit: 1, Scope: charm.ScopeGlobal, }, }, } err := meta.Check() c.Assert(err, gc.ErrorMatches, `charm "foo" has mismatched relation name ""; expected "foo"`) } func (s *MetaSuite) TestCheckMismatchedExtraBindingName(c *gc.C) { meta := charm.Meta{ Name: "foo", ExtraBindings: map[string]charm.ExtraBinding{ "foo": {Name: "bar"}, }, } err := meta.Check() c.Assert(err, gc.ErrorMatches, `charm "foo" has invalid extra bindings: mismatched extra binding name: got "bar", expected "foo"`) } func (s *MetaSuite) TestCheckEmptyNameKeyOrEmptyExtraBindingName(c *gc.C) { meta := charm.Meta{ Name: "foo", ExtraBindings: map[string]charm.ExtraBinding{"": {Name: "bar"}}, } err := meta.Check() expectedError := `charm "foo" has invalid extra bindings: missing binding name` c.Assert(err, gc.ErrorMatches, expectedError) meta.ExtraBindings = map[string]charm.ExtraBinding{"bar": {Name: ""}} err = meta.Check() c.Assert(err, gc.ErrorMatches, expectedError) } // Test rewriting of a given interface specification into long form. // // InterfaceExpander uses `coerce` to do one of two things: // // - Rewrite shorthand to the long form used for actual storage // - Fills in defaults, including a configurable `limit` // // This test ensures test coverage on each of these branches, along // with ensuring the conversion object properly raises SchemaError // exceptions on invalid data. func (s *MetaSuite) TestIfaceExpander(c *gc.C) { e := charm.IfaceExpander(nil) path := []string{""} // Shorthand is properly rewritten v, err := e.Coerce("http", path) c.Assert(err, gc.IsNil) c.Assert(v, jc.DeepEquals, map[string]interface{}{"interface": "http", "limit": nil, "optional": false, "scope": string(charm.ScopeGlobal)}) // Defaults are properly applied v, err = e.Coerce(map[string]interface{}{"interface": "http"}, path) c.Assert(err, gc.IsNil) c.Assert(v, jc.DeepEquals, map[string]interface{}{"interface": "http", "limit": nil, "optional": false, "scope": string(charm.ScopeGlobal)}) v, err = e.Coerce(map[string]interface{}{"interface": "http", "limit": 2}, path) c.Assert(err, gc.IsNil) c.Assert(v, jc.DeepEquals, map[string]interface{}{"interface": "http", "limit": int64(2), "optional": false, "scope": string(charm.ScopeGlobal)}) v, err = e.Coerce(map[string]interface{}{"interface": "http", "optional": true}, path) c.Assert(err, gc.IsNil) c.Assert(v, jc.DeepEquals, map[string]interface{}{"interface": "http", "limit": nil, "optional": true, "scope": string(charm.ScopeGlobal)}) // Invalid data raises an error. v, err = e.Coerce(42, path) c.Assert(err, gc.ErrorMatches, `: expected map, got int\(42\)`) v, err = e.Coerce(map[string]interface{}{"interface": "http", "optional": nil}, path) c.Assert(err, gc.ErrorMatches, ".optional: expected bool, got nothing") v, err = e.Coerce(map[string]interface{}{"interface": "http", "limit": "none, really"}, path) c.Assert(err, gc.ErrorMatches, ".limit: unexpected value.*") // Can change default limit e = charm.IfaceExpander(1) v, err = e.Coerce(map[string]interface{}{"interface": "http"}, path) c.Assert(err, gc.IsNil) c.Assert(v, jc.DeepEquals, map[string]interface{}{"interface": "http", "limit": int64(1), "optional": false, "scope": string(charm.ScopeGlobal)}) } func (s *MetaSuite) TestMetaHooks(c *gc.C) { meta, err := charm.ReadMeta(repoMeta(c, "wordpress")) c.Assert(err, gc.IsNil) hooks := meta.Hooks() expectedHooks := map[string]bool{ "install": true, "start": true, "config-changed": true, "upgrade-charm": true, "stop": true, "collect-metrics": true, "meter-status-changed": true, "leader-elected": true, "leader-deposed": true, "leader-settings-changed": true, "update-status": true, "cache-relation-joined": true, "cache-relation-changed": true, "cache-relation-departed": true, "cache-relation-broken": true, "db-relation-joined": true, "db-relation-changed": true, "db-relation-departed": true, "db-relation-broken": true, "logging-dir-relation-joined": true, "logging-dir-relation-changed": true, "logging-dir-relation-departed": true, "logging-dir-relation-broken": true, "monitoring-port-relation-joined": true, "monitoring-port-relation-changed": true, "monitoring-port-relation-departed": true, "monitoring-port-relation-broken": true, "url-relation-joined": true, "url-relation-changed": true, "url-relation-departed": true, "url-relation-broken": true, } c.Assert(hooks, jc.DeepEquals, expectedHooks) } func (s *MetaSuite) TestCodecRoundTripEmpty(c *gc.C) { for i, codec := range codecs { c.Logf("codec %d", i) empty_input := charm.Meta{} data, err := codec.Marshal(empty_input) c.Assert(err, gc.IsNil) var empty_output charm.Meta err = codec.Unmarshal(data, &empty_output) c.Assert(err, gc.IsNil) c.Assert(empty_input, jc.DeepEquals, empty_output) } } func (s *MetaSuite) TestCodecRoundTrip(c *gc.C) { var input = charm.Meta{ Name: "Foo", Summary: "Bar", Description: "Baz", Subordinate: true, Provides: map[string]charm.Relation{ "qux": { Interface: "quxx", Optional: true, Limit: 42, Scope: "quxxx", }, }, Requires: map[string]charm.Relation{ "qux": { Interface: "quxx", Optional: true, Limit: 42, Scope: "quxxx", }, }, Peers: map[string]charm.Relation{ "qux": { Interface: "quxx", Optional: true, Limit: 42, Scope: "quxxx", }, }, ExtraBindings: map[string]charm.ExtraBinding{ "foo": {Name: "foo"}, "qux": {Name: "qux"}, }, Categories: []string{"quxxxx", "quxxxxx"}, Tags: []string{"openstack", "storage"}, Format: 10, OldRevision: 11, Terms: []string{"test term 1", "test term 2"}, } for i, codec := range codecs { c.Logf("codec %d", i) data, err := codec.Marshal(input) c.Assert(err, gc.IsNil) var output charm.Meta err = codec.Unmarshal(data, &output) c.Assert(err, gc.IsNil) c.Assert(input, jc.DeepEquals, output) } } var implementedByTests = []struct { ifce string name string role charm.RelationRole scope charm.RelationScope match bool implicit bool }{ {"ifce-pro", "pro", charm.RoleProvider, charm.ScopeGlobal, true, false}, {"blah", "pro", charm.RoleProvider, charm.ScopeGlobal, false, false}, {"ifce-pro", "blah", charm.RoleProvider, charm.ScopeGlobal, false, false}, {"ifce-pro", "pro", charm.RoleRequirer, charm.ScopeGlobal, false, false}, {"ifce-pro", "pro", charm.RoleProvider, charm.ScopeContainer, true, false}, {"juju-info", "juju-info", charm.RoleProvider, charm.ScopeGlobal, true, true}, {"blah", "juju-info", charm.RoleProvider, charm.ScopeGlobal, false, false}, {"juju-info", "blah", charm.RoleProvider, charm.ScopeGlobal, false, false}, {"juju-info", "juju-info", charm.RoleRequirer, charm.ScopeGlobal, false, false}, {"juju-info", "juju-info", charm.RoleProvider, charm.ScopeContainer, true, true}, {"ifce-req", "req", charm.RoleRequirer, charm.ScopeGlobal, true, false}, {"blah", "req", charm.RoleRequirer, charm.ScopeGlobal, false, false}, {"ifce-req", "blah", charm.RoleRequirer, charm.ScopeGlobal, false, false}, {"ifce-req", "req", charm.RolePeer, charm.ScopeGlobal, false, false}, {"ifce-req", "req", charm.RoleRequirer, charm.ScopeContainer, true, false}, {"juju-info", "info", charm.RoleRequirer, charm.ScopeContainer, true, false}, {"blah", "info", charm.RoleRequirer, charm.ScopeContainer, false, false}, {"juju-info", "blah", charm.RoleRequirer, charm.ScopeContainer, false, false}, {"juju-info", "info", charm.RolePeer, charm.ScopeContainer, false, false}, {"juju-info", "info", charm.RoleRequirer, charm.ScopeGlobal, false, false}, {"ifce-peer", "peer", charm.RolePeer, charm.ScopeGlobal, true, false}, {"blah", "peer", charm.RolePeer, charm.ScopeGlobal, false, false}, {"ifce-peer", "blah", charm.RolePeer, charm.ScopeGlobal, false, false}, {"ifce-peer", "peer", charm.RoleProvider, charm.ScopeGlobal, false, false}, {"ifce-peer", "peer", charm.RolePeer, charm.ScopeContainer, true, false}, } func (s *MetaSuite) TestImplementedBy(c *gc.C) { for i, t := range implementedByTests { c.Logf("test %d", i) r := charm.Relation{ Interface: t.ifce, Name: t.name, Role: t.role, Scope: t.scope, } c.Assert(r.ImplementedBy(&dummyCharm{}), gc.Equals, t.match) c.Assert(r.IsImplicit(), gc.Equals, t.implicit) } } var metaYAMLMarshalTests = []struct { about string yaml string }{{ about: "minimal charm", yaml: ` name: minimal description: d summary: s `, }, { about: "charm with lots of stuff", yaml: ` name: big description: d summary: s subordinate: true provides: provideSimple: someinterface provideLessSimple: interface: anotherinterface optional: true scope: container limit: 3 requires: requireSimple: someinterface requireLessSimple: interface: anotherinterface optional: true scope: container limit: 3 peers: peerSimple: someinterface peerLessSimple: interface: peery optional: true extra-bindings: extraBar: extraFoo1: categories: [c1, c1] tags: [t1, t2] series: - someseries `, }} func (s *MetaSuite) TestYAMLMarshal(c *gc.C) { for i, test := range metaYAMLMarshalTests { c.Logf("test %d: %s", i, test.about) ch, err := charm.ReadMeta(strings.NewReader(test.yaml)) c.Assert(err, gc.IsNil) gotYAML, err := yaml.Marshal(ch) c.Assert(err, gc.IsNil) gotCh, err := charm.ReadMeta(bytes.NewReader(gotYAML)) c.Assert(err, gc.IsNil) c.Assert(gotCh, jc.DeepEquals, ch) } } func (s *MetaSuite) TestYAMLMarshalV2(c *gc.C) { for i, test := range metaYAMLMarshalTests { c.Logf("test %d: %s", i, test.about) ch, err := charm.ReadMeta(strings.NewReader(test.yaml)) c.Assert(err, gc.IsNil) gotYAML, err := yamlv2.Marshal(ch) c.Assert(err, gc.IsNil) gotCh, err := charm.ReadMeta(bytes.NewReader(gotYAML)) c.Assert(err, gc.IsNil) c.Assert(gotCh, jc.DeepEquals, ch) } } func (s *MetaSuite) TestYAMLMarshalSimpleRelationOrExtraBinding(c *gc.C) { // Check that a simple relation / extra-binding gets marshaled as a string. chYAML := ` name: minimal description: d summary: s provides: server: http requires: client: http peers: me: http extra-bindings: foo: ` ch, err := charm.ReadMeta(strings.NewReader(chYAML)) c.Assert(err, gc.IsNil) gotYAML, err := yaml.Marshal(ch) c.Assert(err, gc.IsNil) var x interface{} err = yaml.Unmarshal(gotYAML, &x) c.Assert(err, gc.IsNil) c.Assert(x, jc.DeepEquals, map[interface{}]interface{}{ "name": "minimal", "description": "d", "summary": "s", "provides": map[interface{}]interface{}{ "server": "http", }, "requires": map[interface{}]interface{}{ "client": "http", }, "peers": map[interface{}]interface{}{ "me": "http", }, "extra-bindings": map[interface{}]interface{}{ "foo": nil, }, }) } func (s *MetaSuite) TestStorage(c *gc.C) { // "type" is the only required attribute for storage. meta, err := charm.ReadMeta(strings.NewReader(` name: a summary: b description: c storage: store0: description: woo tee bix type: block store1: type: filesystem `)) c.Assert(err, gc.IsNil) c.Assert(meta.Storage, gc.DeepEquals, map[string]charm.Storage{ "store0": { Name: "store0", Description: "woo tee bix", Type: charm.StorageBlock, CountMin: 1, // singleton CountMax: 1, }, "store1": { Name: "store1", Type: charm.StorageFilesystem, CountMin: 1, // singleton CountMax: 1, }, }) } func (s *MetaSuite) TestStorageErrors(c *gc.C) { prefix := ` name: a summary: b description: c storage: store-bad: `[1:] type test struct { desc string yaml string err string } tests := []test{{ desc: "type is required", yaml: " required: false", err: "metadata: storage.store-bad.type: unexpected value ", }, { desc: "range must be an integer, or integer range (1)", yaml: " type: filesystem\n multiple:\n range: woat", err: `metadata: storage.store-bad.multiple.range: value "woat" does not match 'm', 'm-n', or 'm\+'`, }, { desc: "range must be an integer, or integer range (2)", yaml: " type: filesystem\n multiple:\n range: 0-abc", err: `metadata: storage.store-bad.multiple.range: value "0-abc" does not match 'm', 'm-n', or 'm\+'`, }, { desc: "range must be non-negative", yaml: " type: filesystem\n multiple:\n range: -1", err: `metadata: storage.store-bad.multiple.range: invalid count -1`, }, { desc: "range must be positive", yaml: " type: filesystem\n multiple:\n range: 0", err: `metadata: storage.store-bad.multiple.range: invalid count 0`, }, { desc: "location cannot be specified for block type storage", yaml: " type: block\n location: /dev/sdc", err: `charm "a" storage "store-bad": location may not be specified for "type: block"`, }, { desc: "minimum size must parse correctly", yaml: " type: block\n minimum-size: foo", err: `metadata: expected a non-negative number, got "foo"`, }, { desc: "minimum size must have valid suffix", yaml: " type: block\n minimum-size: 10Q", err: `metadata: invalid multiplier suffix "Q", expected one of MGTPEZY`, }, { desc: "properties must contain valid values", yaml: " type: block\n properties: [transient, foo]", err: `metadata: .* unexpected value "foo"`, }} for i, test := range tests { c.Logf("test %d: %s", i, test.desc) c.Logf("\n%s\n", prefix+test.yaml) _, err := charm.ReadMeta(strings.NewReader(prefix + test.yaml)) c.Assert(err, gc.ErrorMatches, test.err) } } func (s *MetaSuite) TestStorageCount(c *gc.C) { testStorageCount := func(count string, min, max int) { meta, err := charm.ReadMeta(strings.NewReader(fmt.Sprintf(` name: a summary: b description: c storage: store0: type: filesystem multiple: range: %s `, count))) c.Assert(err, gc.IsNil) store := meta.Storage["store0"] c.Assert(store, gc.NotNil) c.Assert(store.CountMin, gc.Equals, min) c.Assert(store.CountMax, gc.Equals, max) } testStorageCount("1", 1, 1) testStorageCount("0-1", 0, 1) testStorageCount("1-1", 1, 1) testStorageCount("1+", 1, -1) // n- is equivalent to n+ testStorageCount("1-", 1, -1) } func (s *MetaSuite) TestStorageLocation(c *gc.C) { meta, err := charm.ReadMeta(strings.NewReader(` name: a summary: b description: c storage: store0: type: filesystem location: /var/lib/things `)) c.Assert(err, gc.IsNil) store := meta.Storage["store0"] c.Assert(store, gc.NotNil) c.Assert(store.Location, gc.Equals, "/var/lib/things") } func (s *MetaSuite) TestStorageMinimumSize(c *gc.C) { meta, err := charm.ReadMeta(strings.NewReader(` name: a summary: b description: c storage: store0: type: filesystem minimum-size: 10G `)) c.Assert(err, gc.IsNil) store := meta.Storage["store0"] c.Assert(store, gc.NotNil) c.Assert(store.MinimumSize, gc.Equals, uint64(10*1024)) } func (s *MetaSuite) TestStorageProperties(c *gc.C) { meta, err := charm.ReadMeta(strings.NewReader(` name: a summary: b description: c storage: store0: type: filesystem properties: [transient] `)) c.Assert(err, gc.IsNil) store := meta.Storage["store0"] c.Assert(store, gc.NotNil) c.Assert(store.Properties, jc.SameContents, []string{"transient"}) } func (s *MetaSuite) TestExtraBindings(c *gc.C) { meta, err := charm.ReadMeta(strings.NewReader(` name: a summary: b description: c extra-bindings: endpoint-1: foo: bar-42: `)) c.Assert(err, gc.IsNil) c.Assert(meta.ExtraBindings, gc.DeepEquals, map[string]charm.ExtraBinding{ "endpoint-1": { Name: "endpoint-1", }, "foo": { Name: "foo", }, "bar-42": { Name: "bar-42", }, }) } func (s *MetaSuite) TestExtraBindingsEmptyMapError(c *gc.C) { meta, err := charm.ReadMeta(strings.NewReader(` name: a summary: b description: c extra-bindings: `)) c.Assert(err, gc.ErrorMatches, "metadata: extra-bindings: expected map, got nothing") c.Assert(meta, gc.IsNil) } func (s *MetaSuite) TestExtraBindingsNonEmptyValueError(c *gc.C) { meta, err := charm.ReadMeta(strings.NewReader(` name: a summary: b description: c extra-bindings: foo: 42 `)) c.Assert(err, gc.ErrorMatches, `metadata: extra-bindings.foo: expected empty value, got int\(42\)`) c.Assert(meta, gc.IsNil) } func (s *MetaSuite) TestExtraBindingsEmptyNameError(c *gc.C) { meta, err := charm.ReadMeta(strings.NewReader(` name: a summary: b description: c extra-bindings: "": `)) c.Assert(err, gc.ErrorMatches, `metadata: extra-bindings: expected non-empty binding name, got string\(""\)`) c.Assert(meta, gc.IsNil) } func (s *MetaSuite) TestPayloadClasses(c *gc.C) { meta, err := charm.ReadMeta(strings.NewReader(` name: a summary: b description: c payloads: monitor: type: docker kvm-guest: type: kvm `)) c.Assert(err, gc.IsNil) c.Check(meta.PayloadClasses, jc.DeepEquals, map[string]charm.PayloadClass{ "monitor": charm.PayloadClass{ Name: "monitor", Type: "docker", }, "kvm-guest": charm.PayloadClass{ Name: "kvm-guest", Type: "kvm", }, }) } func (s *MetaSuite) TestResources(c *gc.C) { meta, err := charm.ReadMeta(strings.NewReader(` name: a summary: b description: c resources: resource-name: type: file filename: filename.tgz description: "One line that is useful when operators need to push it." other-resource: type: file filename: other.zip `)) c.Assert(err, gc.IsNil) c.Check(meta.Resources, jc.DeepEquals, map[string]resource.Meta{ "resource-name": resource.Meta{ Name: "resource-name", Type: resource.TypeFile, Path: "filename.tgz", Description: "One line that is useful when operators need to push it.", }, "other-resource": resource.Meta{ Name: "other-resource", Type: resource.TypeFile, Path: "other.zip", }, }) } type dummyCharm struct{} func (c *dummyCharm) Config() *charm.Config { panic("unused") } func (c *dummyCharm) Metrics() *charm.Metrics { panic("unused") } func (c *dummyCharm) Actions() *charm.Actions { panic("unused") } func (c *dummyCharm) Revision() int { panic("unused") } func (c *dummyCharm) Meta() *charm.Meta { return &charm.Meta{ Provides: map[string]charm.Relation{ "pro": {Interface: "ifce-pro", Scope: charm.ScopeGlobal}, }, Requires: map[string]charm.Relation{ "req": {Interface: "ifce-req", Scope: charm.ScopeGlobal}, "info": {Interface: "juju-info", Scope: charm.ScopeContainer}, }, Peers: map[string]charm.Relation{ "peer": {Interface: "ifce-peer", Scope: charm.ScopeGlobal}, }, } } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/0000775000175000017500000000000012672604527022141 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/0000775000175000017500000000000012672604527025153 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/series/0000775000175000017500000000000012672604527026445 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/series/format2/0000775000175000017500000000000012672604527030017 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/series/format2/hooks/0000775000175000017500000000000012672604527031142 5ustar marcomarco././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/series/format2/hooks/symlinkcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/series/format2/hooks/symlin0000777000175000017500000000000012672604527034017 2../targetustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/series/format2/build/0000775000175000017500000000000012672604527031116 5ustar marcomarco././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/series/format2/build/ignoredcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/series/format2/build/ignore0000664000175000017500000000000012672604527032312 0ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/0000775000175000017500000000000012672604527026424 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-simple/0000775000175000017500000000000012672604527031743 5ustar marcomarco././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-simple/bundle.yamlcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-simple/bun0000664000175000017500000000023112672604527032446 0ustar marcomarcoservices: wordpress: charm: wordpress mysql: charm: mysql num_units: 1 relations: - ["wordpress:db", "mysql:server"] ././@LongLink0000644000000000000000000000015300000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-simple/README.mdcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-simple/REA0000664000175000017500000000001712672604527032273 0ustar marcomarcoA dummy bundle ././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-with-logging/charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-with-loggi0000775000175000017500000000000012672604527032445 5ustar marcomarco././@LongLink0000644000000000000000000000016300000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-with-logging/bundle.yamlcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-with-loggi0000664000175000017500000000071012672604527032445 0ustar marcomarcoservices: wordpress: charm: wordpress num_units: 1 bindings: db: db url: public db-client: db admin-api: public mysql: charm: mysql num_units: 1 bindings: server: db logging: charm: logging relations: - ["wordpress:db", "mysql:server"] - ["wordpress:juju-info", "logging:info"] - ["mysql:juju-info", "logging:info"] ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-with-logging/README.mdcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-with-loggi0000664000175000017500000000001712672604527032445 0ustar marcomarcoA dummy bundle charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/openstack/0000775000175000017500000000000012672604527030413 5ustar marcomarco././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/openstack/bundle.yamlcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/openstack/bundle.yam0000664000175000017500000001221012672604527032370 0ustar marcomarcoseries: precise services: mysql: charm: cs:precise/mysql constraints: mem=1G options: dataset-size: 50% rabbitmq-server: charm: cs:precise/rabbitmq-server constraints: mem=1G ceph: charm: cs:precise/ceph num_units: 3 constraints: mem=1G options: monitor-count: 3 fsid: 6547bd3e-1397-11e2-82e5-53567c8d32dc monitor-secret: AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ== osd-devices: /dev/vdb osd-reformat: "yes" ephemeral-unmount: /mnt keystone: charm: cs:precise/keystone constraints: mem=1G options: admin-password: openstack admin-token: ubuntutesting openstack-dashboard: charm: cs:precise/openstack-dashboard constraints: mem=1G nova-compute: charm: cs:precise/nova-compute num_units: 3 constraints: mem=4G options: config-flags: "auto_assign_floating_ip=False" enable-live-migration: False virt-type: kvm nova-cloud-controller: charm: cs:precise/nova-cloud-controller constraints: mem=1G options: network-manager: Neutron quantum-security-groups: "yes" neutron-gateway: charm: cs:precise/quantum-gateway constraints: mem=1G cinder: charm: cs:precise/cinder options: block-device: "None" constraints": mem=1G glance: charm: cs:precise/glance constraints: mem=1G swift-proxy: charm: cs:precise/swift-proxy constraints: mem=1G options: zone-assignment: manual replicas: 3 use-https: 'no' swift-hash: fdfef9d4-8b06-11e2-8ac0-531c923c8fae swift-storage-z1: charm: cs:precise/swift-storage constraints: mem=1G options: zone: 1 block-device: vdb overwrite: "true" swift-storage-z2: charm: cs:precise/swift-storage constraints: mem=1G options: zone: 2 block-device: vdb overwrite: "true" swift-storage-z3: charm: cs:precise/swift-storage constraints: mem=1G options: zone: 3 block-device: vdb overwrite: "true" ceilometer: charm: cs:precise/ceilometer constraints: mem=1G ceilometer-agent: charm: cs:precise/ceilometer-agent mongodb: charm: cs:precise/mongodb constraints: mem=1G heat: charm: cs:precise/heat constraints: mem=1G ntp: charm: cs:precise/ntp relations: - - keystone:shared-db - mysql:shared-db - - nova-cloud-controller:shared-db - mysql:shared-db - - nova-cloud-controller:amqp - rabbitmq-server:amqp - - nova-cloud-controller:image-service - glance:image-service - - nova-cloud-controller:identity-service - keystone:identity-service - - nova-compute:cloud-compute - nova-cloud-controller:cloud-compute - - nova-compute:shared-db - mysql:shared-db - - nova-compute:amqp - rabbitmq-server:amqp - - nova-compute:image-service - glance:image-service - - nova-compute:ceph - ceph:client - - glance:shared-db - mysql:shared-db - - glance:identity-service - keystone:identity-service - - glance:ceph - ceph:client - - glance:image-service - cinder:image-service - - cinder:shared-db - mysql:shared-db - - cinder:amqp - rabbitmq-server:amqp - - cinder:cinder-volume-service - nova-cloud-controller:cinder-volume-service - - cinder:identity-service - keystone:identity-service - - cinder:ceph - ceph:client - - neutron-gateway:shared-db - mysql:shared-db - - neutron-gateway:amqp - rabbitmq-server:amqp - - neutron-gateway:quantum-network-service - nova-cloud-controller:quantum-network-service - - openstack-dashboard:identity-service - keystone:identity-service - - swift-proxy:identity-service - keystone:identity-service - - swift-proxy:swift-storage - swift-storage-z1:swift-storage - - swift-proxy:swift-storage - swift-storage-z2:swift-storage - - swift-proxy:swift-storage - swift-storage-z3:swift-storage - - ceilometer:identity-service - keystone:identity-service - - ceilometer:amqp - rabbitmq-server:amqp - - ceilometer:shared-db - mongodb:database - - ceilometer-agent:nova-ceilometer - nova-compute:nova-ceilometer - - ceilometer-agent:ceilometer-service - ceilometer:ceilometer-service - - heat:identity-service - keystone:identity-service - - heat:shared-db - mysql:shared-db - - heat:amqp - rabbitmq-server:amqp - - ntp:juju-info - nova-compute:juju-info - - ntp:juju-info - nova-cloud-controller:juju-info - - ntp:juju-info - neutron-gateway:juju-info - - ntp:juju-info - ceph:juju-info - - ntp:juju-info - cinder:juju-info - - ntp:juju-info - keystone:juju-info - - ntp:juju-info - glance:juju-info - - ntp:juju-info - swift-proxy:juju-info - - ntp:juju-info - swift-storage-z1:juju-info - - ntp:juju-info - swift-storage-z2:juju-info - - ntp:juju-info - swift-storage-z3:juju-info - - ntp:juju-info - ceilometer:juju-info - - ntp:juju-info - mongodb:juju-info - - ntp:juju-info - rabbitmq-server:juju-info - - ntp:juju-info - mysql:juju-info - - ntp:juju-info - openstack-dashboard:juju-info - - ntp:juju-info - heat:juju-info charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/openstack/README.md0000664000175000017500000000425112672604527031674 0ustar marcomarcoOpenStack Bundle for Juju ========================= Overview -------- This bundle deploys a reference OpenStack architecture including all core projects: - OpenStack Compute - OpenStack Networking (using Open vSwitch plugin) - OpenStack Block Storage (backed with Ceph storage) - OpenStack Image - OpenStack Object Storage - OpenStack Identity - OpenStack Dashboard - OpenStack Telemetry - OpenStack Orchestration The charm configuration is an opinioned set for deploying OpenStack for testing on Cloud environments which support nested KVM. Instance types also need to have ephemeral storage (these block devices are used for Ceph and Swift storage). The Ubuntu Server Team use this bundle for testing OpenStack-on-OpenStack. Usage ----- Once deployed, the cloud can be accessed either using the OpenStack command line tools or using the OpenStack Dashboard: http:///horizon The charms configure the 'admin' user with a password of 'openstack' by default. The OpenStack cloud deployed is completely clean; the charms don't attempt to configure networking or upload images. Read the OpenStack User Guide on how to configure your cloud for use: http://docs.openstack.org/user-guide/content/ Niggles ------- The neutron-gateway service requires a service unit with two network interfaces to provide full functionality; this part of OpenStack provides L3 routing between tenant networks and the rest of the world. Its possible todo this when testing on OpenStack by adding a second network interface to the neutron-gateway service: nova interface-attach --net-id juju set neutron-gateway ext-port=eth1 Note that you will need to be running this bundle on an OpenStack cloud that supports MAC address learning of some description; this includes using OpenStack Havana with the Neutron Open vSwitch plugin. For actual OpenStack deployments, this service would reside of a physical server with network ports attached to both the internal network (for communication with nova-compute service units) and the external network (for inbound/outbound network access to/from instances within the cloud). charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/bad/0000775000175000017500000000000012672604527027152 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/bad/bundle.yaml0000664000175000017500000000037212672604527031311 0ustar marcomarco# This bundle has a bad relation, which will cause it to fail # its verification. services: wordpress: charm: wordpress num_units: 1 mysql: charm: mysql num_units: 1 relations: - ["foo:db", "mysql:server"] charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/bad/README.md0000664000175000017500000000001712672604527030427 0ustar marcomarcoA dummy bundle charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/0000775000175000017500000000000012672604527026620 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish/0000775000175000017500000000000012672604527030272 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish/revision0000664000175000017500000000000112672604527032042 0ustar marcomarco1././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish/metadata.ya0000664000175000017500000000015712672604527032410 0ustar marcomarconame: varnish summary: "Database engine" description: "Another popular database" provides: webcache: varnish charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/category/0000775000175000017500000000000012672604527030435 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/category/.ignored0000664000175000017500000000000112672604527032054 0ustar marcomarco#charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/category/.dir/0000775000175000017500000000000012672604527031271 5ustar marcomarco././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/category/.dir/ignoredcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/category/.dir/ignor0000664000175000017500000000000012672604527032320 0ustar marcomarco././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/category/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/category/metadata.y0000664000175000017500000000026212672604527032407 0ustar marcomarconame: categories summary: "Sample charm with a category" description: | That's a boring charm that has a category. categories: ["database"] tags: ["openstack", "storage"]charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/logging/0000775000175000017500000000000012672604527030246 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/logging/hooks/0000775000175000017500000000000012672604527031371 5ustar marcomarco././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/logging/hooks/.gitkeepcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/logging/hooks/.gitk0000664000175000017500000000000012672604527032316 0ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/logging/revision0000664000175000017500000000000212672604527032017 0ustar marcomarco1 ././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/logging/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/logging/metadata.ya0000664000175000017500000000056212672604527032364 0ustar marcomarconame: logging summary: "Subordinate logging test charm" description: | This is a longer description which potentially contains multiple lines. subordinate: true provides: logging-client: interface: logging requires: logging-directory: interface: logging scope: container info: interface: juju-info scope: container charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/0000775000175000017500000000000012672604527030650 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/hooks/0000775000175000017500000000000012672604527031773 5ustar marcomarco././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/hooks/.gitkeepcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/hooks/.gi0000664000175000017500000000000012672604527032361 0ustar marcomarco././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/config.yamlcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/config.ya0000664000175000017500000000015712672604527032453 0ustar marcomarcooptions: blog-title: {default: My Title, description: A descriptive title used for the blog., type: string} charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/revision0000664000175000017500000000000112672604527032420 0ustar marcomarco3././@LongLink0000644000000000000000000000015100000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/metadata.0000664000175000017500000000072512672604527032435 0ustar marcomarconame: wordpress summary: "Blog engine" description: "A pretty popular blog engine" provides: url: interface: http limit: optional: false logging-dir: interface: logging scope: container monitoring-port: interface: monitoring scope: container requires: db: interface: mysql limit: 1 optional: false cache: interface: varnish limit: 2 optional: true extra-bindings: db-client: admin-api: foo-bar: charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/actions/0000775000175000017500000000000012672604527032310 5ustar marcomarco././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/actions/.gitkeepcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/actions/.0000664000175000017500000000000012672604527032356 0ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/riak/0000775000175000017500000000000012672604527027546 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/riak/revision0000664000175000017500000000000112672604527031316 0ustar marcomarco7charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/riak/metadata.yaml0000664000175000017500000000031712672604527032213 0ustar marcomarconame: riak summary: "K/V storage engine" description: "Scalable K/V Store in Erlang with Clocks :-)" provides: endpoint: interface: http admin: interface: http peers: ring: interface: riak charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/mysql-alternative/0000775000175000017500000000000012672604527032301 5ustar marcomarco././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/mysql-alternative/revisioncharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/mysql-alternative/r0000664000175000017500000000000112672604527032454 0ustar marcomarco1././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/mysql-alternative/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/mysql-alternative/m0000664000175000017500000000025412672604527032461 0ustar marcomarconame: mysql-alternative summary: "Database engine" description: "A pretty popular database" provides: prod: interface: mysql dev: interface: mysql limit: 2 charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/format2/0000775000175000017500000000000012672604527030172 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/format2/.ignored0000664000175000017500000000000112672604527031611 0ustar marcomarco#charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/format2/.dir/0000775000175000017500000000000012672604527031026 5ustar marcomarco././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/format2/.dir/ignoredcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/format2/.dir/ignore0000664000175000017500000000000012672604527032222 0ustar marcomarco././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/format2/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/format2/metadata.ya0000664000175000017500000000024312672604527032304 0ustar marcomarconame: format2 format: 2 summary: "Sample charm described in format 2" description: | That's a boring charm that is described in terms of format 2. charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/mysql/0000775000175000017500000000000012672604527027765 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/mysql/revision0000664000175000017500000000000112672604527031535 0ustar marcomarco1charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/mysql/metadata.yaml0000664000175000017500000000015212672604527032427 0ustar marcomarconame: mysql summary: "Database engine" description: "A pretty popular database" provides: server: mysql charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/0000775000175000017500000000000012672604527027753 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/hooks/0000775000175000017500000000000012672604527031076 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/hooks/install0000775000175000017500000000003112672604527032464 0ustar marcomarco#!/bin/bash echo "Done!" charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/.ignored0000664000175000017500000000000112672604527031372 0ustar marcomarco#charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/empty/0000775000175000017500000000000012672604527031111 5ustar marcomarco././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/empty/.gitkeepcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/empty/.gitkee0000664000175000017500000000000012672604527032350 0ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/src/0000775000175000017500000000000012672604527030542 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/src/hello.c0000664000175000017500000000011412672604527032005 0ustar marcomarco#include main() { printf ("Hello World!\n"); return 0; } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/config.yaml0000664000175000017500000000054312672604527032106 0ustar marcomarcooptions: title: {default: My Title, description: A descriptive title used for the service., type: string} outlook: {description: No default outlook., type: string} username: {default: admin001, description: The name of the initial account (given admin permissions)., type: string} skill-level: {description: A number indicating skill., type: int} charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/.dir/0000775000175000017500000000000012672604527030607 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/.dir/ignored0000664000175000017500000000000012672604527032147 0ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/revision0000664000175000017500000000000112672604527031523 0ustar marcomarco1charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/metadata.yaml0000664000175000017500000000021412672604527032414 0ustar marcomarconame: dummy summary: "That's a dummy charm." description: | This is a longer description which potentially contains multiple lines. charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/build/0000775000175000017500000000000012672604527031052 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/build/ignored0000664000175000017500000000000012672604527032412 0ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/actions.yaml0000664000175000017500000000026512672604527032302 0ustar marcomarcosnapshot: description: Take a snapshot of the database. params: outfile: description: The file to write out to. type: string default: foo.bz2 charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered/0000775000175000017500000000000012672604527030245 5ustar marcomarco././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered/metrics.yamlcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered/metrics.yam0000664000175000017500000000014012672604527032416 0ustar marcomarcometrics: pings: type: gauge description: Description of the metric. juju-unit-time: charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered/revision0000664000175000017500000000000112672604527032015 0ustar marcomarco1././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered/metadata.ya0000664000175000017500000000011512672604527032355 0ustar marcomarconame: metered summary: "A metered charm with custom metrics" description: "" charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/upgrade1/0000775000175000017500000000000012672604527030330 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/upgrade1/revision0000664000175000017500000000000212672604527032101 0ustar marcomarco1 ././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/upgrade1/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/upgrade1/metadata.y0000664000175000017500000000022212672604527032276 0ustar marcomarconame: upgrade summary: "Sample charm to test version changes" description: | Sample charm to test version changes. This is the old charm. charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered-empty/0000775000175000017500000000000012672604527031401 5ustar marcomarco././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered-empty/metrics.yamlcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered-empty/metri0000664000175000017500000000000012672604527032432 0ustar marcomarco././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered-empty/revisioncharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered-empty/revis0000664000175000017500000000000112672604527032443 0ustar marcomarco1././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered-empty/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered-empty/metad0000664000175000017500000000016212672604527032415 0ustar marcomarconame: metered-empty summary: "Metered charm with empty metrics" description: "A charm that will not send metrics" charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/terracotta/0000775000175000017500000000000012672604527030770 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/terracotta/revision0000664000175000017500000000000212672604527032541 0ustar marcomarco3 ././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/terracotta/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/terracotta/metadata0000664000175000017500000000075112672604527032476 0ustar marcomarconame: terracotta summary: Distributed HA caching/storage platform for Java maintainer: Robert Ayres description: | Distributed HA caching/storage platform for Java. . Terracotta provides out of the box clustering for a number of well known Java frameworks, including EHCache, Hibernate and Quartz as well as clustering for J2EE containers. provides: dso: interface: terracotta optional: true peers: server-array: terracotta-server charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/upgrade2/0000775000175000017500000000000012672604527030331 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/upgrade2/revision0000664000175000017500000000000212672604527032102 0ustar marcomarco2 ././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/upgrade2/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/upgrade2/metadata.y0000664000175000017500000000022212672604527032277 0ustar marcomarconame: upgrade summary: "Sample charm to test version changes" description: | Sample charm to test version changes. This is the new charm. ././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish-alternative/charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish-alternative0000775000175000017500000000000012672604527032527 5ustar marcomarco././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish-alternative/hooks/charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish-alternative0000775000175000017500000000000012672604527032527 5ustar marcomarco././@LongLink0000644000000000000000000000016300000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish-alternative/hooks/installcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish-alternative0000775000175000017500000000003512672604527032532 0ustar marcomarco#!/bin/bash echo hello world././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish-alternative/revisioncharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish-alternative0000664000175000017500000000000112672604527032520 0ustar marcomarco1././@LongLink0000644000000000000000000000016300000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish-alternative/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish-alternative0000664000175000017500000000017312672604527032532 0ustar marcomarconame: varnish-alternative summary: "Database engine" description: "Another popular database" provides: webcache: varnish charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/terms/0000775000175000017500000000000012672604527027752 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/terms/metadata.yaml0000664000175000017500000000024212672604527032414 0ustar marcomarconame: terms summary: "Sample charm with terms and conditions" description: | That's a boring charm that requires certain terms. terms: ["term1", "term2"] charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/monitoring/0000775000175000017500000000000012672604527031005 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/monitoring/hooks/0000775000175000017500000000000012672604527032130 5ustar marcomarco././@LongLink0000644000000000000000000000015300000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/monitoring/hooks/.gitkeepcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/monitoring/hooks/.g0000664000175000017500000000000012672604527032345 0ustar marcomarco././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/monitoring/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/monitoring/metadata0000664000175000017500000000057712672604527032521 0ustar marcomarconame: monitoring summary: "Subordinate monitoring test charm" description: | This is a longer description which potentially contains multiple lines. subordinate: true provides: monitoring-client: interface: monitoring requires: monitoring-port: interface: monitoring scope: container info: interface: juju-info scope: container charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/0000775000175000017500000000000012672604527030511 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/0000775000175000017500000000000012672604527031634 5ustar marcomarco././@LongLink0000644000000000000000000000015300000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/otherdatacharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/oth0000664000175000017500000000001212672604527032342 0ustar marcomarcosome text ././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/collect-metricscharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/col0000664000175000017500000000002212672604527032326 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/startcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/sta0000664000175000017500000000002212672604527032340 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/upgrade-charmcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/upg0000664000175000017500000000002212672604527032344 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/config-changedcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/con0000664000175000017500000000002212672604527032330 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/meter-status-changedcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/met0000664000175000017500000000002212672604527032336 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000015100000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/installcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/ins0000664000175000017500000000002212672604527032342 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000016500000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-brokencharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo0000664000175000017500000000002212672604527032334 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000016700000000000011607 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-departedcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar0000664000175000017500000000002212672604527032315 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-joinedcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/sel0000664000175000017500000000002212672604527032334 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-changedcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo0000664000175000017500000000002212672604527032334 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000016700000000000011607 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-changedcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/sel0000664000175000017500000000002212672604527032334 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000016700000000000011607 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-departedcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo0000664000175000017500000000002212672604527032334 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-changedcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar0000664000175000017500000000002212672604527032315 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000016500000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-brokencharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar0000664000175000017500000000002212672604527032315 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/stopcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/sto0000664000175000017500000000002212672604527032356 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000016500000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-joinedcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar0000664000175000017500000000002212672604527032315 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000016600000000000011606 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-brokencharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/sel0000664000175000017500000000002212672604527032334 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000015100000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/subdir/charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/sub0000775000175000017500000000000012672604527032346 5ustar marcomarco././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/subdir/stuffcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/sub0000664000175000017500000000002712672604527032347 0ustar marcomarconon hook related stuff ././@LongLink0000644000000000000000000000017000000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-departedcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/sel0000664000175000017500000000002212672604527032334 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000016500000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-joinedcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo0000664000175000017500000000002212672604527032334 0ustar marcomarco#!/bin/sh echo $0 charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/revision0000664000175000017500000000000212672604527032262 0ustar marcomarco1 ././@LongLink0000644000000000000000000000015100000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/metadata.0000664000175000017500000000036512672604527032276 0ustar marcomarconame: all-hooks summary: "That's a dummy charm with hook scrips for all types of hooks." description: "This is a longer description." provides: foo: interface: phony requires: bar: interface: fake peers: self: interface: dummy charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/bundle.go0000664000175000017500000000135712672604527022133 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm import "os" // The Bundle interface is implemented by any type that // may be handled as a bundle. It encapsulates all // the data of a bundle. type Bundle interface { // Data returns the contents of the bundle's bundle.yaml file. Data() *BundleData // Data returns the contents of the bundle's README.md file. ReadMe() string } // ReadBundle reads a Bundle from path, which can point to either a // bundle archive or a bundle directory. func ReadBundle(path string) (Bundle, error) { info, err := os.Stat(path) if err != nil { return nil, err } if info.IsDir() { return ReadBundleDir(path) } return ReadBundleArchive(path) } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/metrics_test.go0000664000175000017500000001175612672604527023373 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm_test import ( "sort" "strings" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" ) // Keys returns a list of all defined metrics keys. func Keys(m *charm.Metrics) []string { result := make([]string, 0, len(m.Metrics)) for name := range m.Metrics { result = append(result, name) } sort.Strings(result) return result } type MetricsSuite struct{} var _ = gc.Suite(&MetricsSuite{}) func (s *MetricsSuite) TestReadEmpty(c *gc.C) { metrics, err := charm.ReadMetrics(strings.NewReader("")) c.Assert(err, gc.IsNil) c.Assert(metrics, gc.NotNil) } func (s *MetricsSuite) TestReadAlmostEmpty(c *gc.C) { metrics, err := charm.ReadMetrics(strings.NewReader(` metrics: `)) c.Assert(err, gc.IsNil) c.Assert(metrics, gc.NotNil) } func (s *MetricsSuite) TestNoDescription(c *gc.C) { metrics, err := charm.ReadMetrics(strings.NewReader(` metrics: some-metric: type: gauge `)) c.Assert(err, gc.ErrorMatches, "invalid metrics declaration: metric \"some-metric\" lacks description") c.Assert(metrics, gc.IsNil) } func (s *MetricsSuite) TestIncorrectType(c *gc.C) { metrics, err := charm.ReadMetrics(strings.NewReader(` metrics: some-metric: type: not-a-type description: Some description. `)) c.Assert(err, gc.ErrorMatches, "invalid metrics declaration: metric \"some-metric\" has unknown type \"not-a-type\"") c.Assert(metrics, gc.IsNil) } func (s *MetricsSuite) TestMultipleDefinition(c *gc.C) { metrics, err := charm.ReadMetrics(strings.NewReader(` metrics: some-metric: type: gauge description: Some description. some-metric: type: absolute description: Some other description. `)) c.Assert(err, gc.IsNil) c.Assert(metrics.Metrics, gc.HasLen, 1) c.Assert(metrics.Metrics["some-metric"].Type, gc.Equals, charm.MetricTypeAbsolute) } func (s *MetricsSuite) TestIsBuiltinMetric(c *gc.C) { tests := []struct { input string isbuiltin bool }{{ "juju-thing", true, }, { "jujuthing", true, }, { "thing", false, }, } for i, test := range tests { c.Logf("test %d isBuiltinMetric(%v) = %v", i, test.input, test.isbuiltin) is := charm.IsBuiltinMetric(test.input) c.Assert(is, gc.Equals, test.isbuiltin) } } func (s *MetricsSuite) TestValidYaml(c *gc.C) { metrics, err := charm.ReadMetrics(strings.NewReader(` metrics: blips: type: absolute description: An absolute metric. blops: type: gauge description: A gauge metric. juju-unit-time: `)) c.Assert(err, gc.IsNil) c.Assert(metrics, gc.NotNil) c.Assert(Keys(metrics), gc.DeepEquals, []string{"blips", "blops", "juju-unit-time"}) testCases := []struct { about string name string value string err string }{{ about: "valid gauge metric", name: "blops", value: "1", err: "", }, { about: "valid absolute metric", name: "blips", value: "0", err: "", }, { about: "valid gauge metric, float value", name: "blops", value: "0.15", err: "", }, { about: "valid absolute metric, float value", name: "blips", value: "6.015e15", err: "", }, { about: "undeclared metric", name: "undeclared", value: "6.015e15", err: "metric \"undeclared\" not defined", }, { about: "invalid type for gauge metric", name: "blops", value: "true", err: "invalid value type: expected float, got \"true\"", }, { about: "metric value too large", name: "blips", value: "1111111111111111111111111111111", err: "metric value is too large", }, } for i, t := range testCases { c.Logf("test %d: %s", i, t.about) err := metrics.ValidateMetric(t.name, t.value) if t.err == "" { c.Check(err, gc.IsNil) } else { c.Check(err, gc.ErrorMatches, t.err) } } } func (s *MetricsSuite) TestBuiltInMetrics(c *gc.C) { tests := []string{` metrics: some-metric: type: gauge description: Some description. juju-unit-time: type: absolute `, ` metrics: some-metric: type: gauge description: Some description. juju-unit-time: description: Some description `, } for _, test := range tests { c.Logf("%s", test) _, err := charm.ReadMetrics(strings.NewReader(test)) c.Assert(err, gc.ErrorMatches, `metric "juju-unit-time" is using a prefix reserved for built-in metrics: it should not have type or description specification`) } } func (s *MetricsSuite) TestValidateValue(c *gc.C) { tests := []struct { value string expectedError string }{{ value: "1234567890", }, { value: "0", }, { value: "abcd", expectedError: `invalid value type: expected float, got "abcd"`, }, { value: "1234567890123456789012345678901234567890", expectedError: "metric value is too large", }, { value: "-42", expectedError: "invalid value: value must be greater or equal to zero, got -42", }, } for _, test := range tests { err := charm.ValidateValue(test.value) if test.expectedError != "" { c.Assert(err, gc.ErrorMatches, test.expectedError) } else { c.Assert(err, gc.IsNil) } } } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/LICENCE0000664000175000017500000002150112672604527021311 0ustar marcomarcoAll files in this repository are licensed as follows. If you contribute to this repository, it is assumed that you license your contribution under the same license unless you state otherwise. All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. This software is licensed under the LGPLv3, included below. As a special exception to the GNU Lesser General Public License version 3 ("LGPL3"), the copyright holders of this Library give you permission to convey to a third party a Combined Work that links statically or dynamically to this Library without providing any Minimal Corresponding Source or Minimal Application Code as set out in 4d or providing the installation information set out in section 4e, provided that you comply with the other provisions of LGPL3 and provided that you meet, for the Application the terms and conditions of the license(s) which apply to the Application. Except as stated in this special exception, the provisions of LGPL3 will continue to comply in full to this Library. If you modify this Library, you may apply this exception to your version of this Library, but you are not obliged to do so. If you do not wish to do so, delete this exception statement from your version. This exception does not (and cannot) modify any license terms which apply to the Application, with which you must still comply. GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/resource/0000775000175000017500000000000012672604527022154 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/resource/type.go0000664000175000017500000000224212672604527023464 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package resource import ( "github.com/juju/errors" ) // These are the valid resource types (except for unknown). const ( typeUnknown Type = iota TypeFile ) var types = map[Type]string{ TypeFile: "file", } // Type enumerates the recognized resource types. type Type int // ParseType converts a string to a Type. If the given value does not // match a recognized type then an error is returned. func ParseType(value string) (Type, error) { for rt, str := range types { if value == str { return rt, nil } } return typeUnknown, errors.Errorf("unsupported resource type %q", value) } // String returns the printable representation of the type. func (rt Type) String() string { return types[rt] } // Validate ensures that the type is valid. func (rt Type) Validate() error { // Ideally, only the (unavoidable) zero value would be invalid. // However, typedef'ing int means that the use of int literals // could result in invalid Type values other than the zero value. if _, ok := types[rt]; !ok { return errors.NewNotValid(nil, "unknown resource type") } return nil } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/resource/type_test.go0000664000175000017500000000367612672604527024537 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package resource_test import ( "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable/resource" ) var _ = gc.Suite(&TypeSuite{}) type TypeSuite struct{} func (s *TypeSuite) TestParseTypeOkay(c *gc.C) { rt, err := resource.ParseType("file") c.Assert(err, jc.ErrorIsNil) c.Check(rt, gc.Equals, resource.TypeFile) } func (s *TypeSuite) TestParseTypeRecognized(c *gc.C) { supported := []resource.Type{ resource.TypeFile, } for _, expected := range supported { rt, err := resource.ParseType(expected.String()) c.Assert(err, jc.ErrorIsNil) c.Check(rt, gc.Equals, expected) } } func (s *TypeSuite) TestParseTypeEmpty(c *gc.C) { rt, err := resource.ParseType("") c.Check(err, gc.ErrorMatches, `unsupported resource type ""`) var unknown resource.Type c.Check(rt, gc.Equals, unknown) } func (s *TypeSuite) TestParseTypeUnsupported(c *gc.C) { rt, err := resource.ParseType("spam") c.Check(err, gc.ErrorMatches, `unsupported resource type "spam"`) var unknown resource.Type c.Check(rt, gc.Equals, unknown) } func (s *TypeSuite) TestTypeStringSupported(c *gc.C) { supported := map[resource.Type]string{ resource.TypeFile: "file", } for rt, expected := range supported { str := rt.String() c.Check(str, gc.Equals, expected) } } func (s *TypeSuite) TestTypeStringUnknown(c *gc.C) { var unknown resource.Type str := unknown.String() c.Check(str, gc.Equals, "") } func (s *TypeSuite) TestTypeValidateSupported(c *gc.C) { supported := []resource.Type{ resource.TypeFile, } for _, rt := range supported { err := rt.Validate() c.Check(err, jc.ErrorIsNil) } } func (s *TypeSuite) TestTypeValidateUnknown(c *gc.C) { var unknown resource.Type err := unknown.Validate() c.Check(err, jc.Satisfies, errors.IsNotValid) c.Check(err, gc.ErrorMatches, `unknown resource type`) } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/resource/resource.go0000664000175000017500000000251312672604527024333 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package resource import ( "github.com/juju/errors" ) // Resource describes a charm's resource in the charm store. type Resource struct { Meta // Origin identifies where the resource will come from. Origin Origin // Revision is the charm store revision of the resource. Revision int // Fingerprint is the SHA-384 checksum for the resource blob. Fingerprint Fingerprint // Size is the size of the resource, in bytes. Size int64 } // Validate checks the payload class to ensure its data is valid. func (res Resource) Validate() error { if err := res.Meta.Validate(); err != nil { return errors.Annotate(err, "invalid resource (bad metadata)") } if err := res.Origin.Validate(); err != nil { return errors.Annotate(err, "invalid resource (bad origin)") } if res.Revision < 0 { return errors.NewNotValid(nil, "invalid resource (revision must be non-negative)") } // TODO(ericsnow) Ensure Revision is 0 for OriginUpload? if res.Fingerprint.IsZero() { if res.Size > 0 { return errors.NewNotValid(nil, "missing fingerprint") } } else { if err := res.Fingerprint.Validate(); err != nil { return errors.Annotate(err, "bad fingerprint") } } if res.Size < 0 { return errors.NotValidf("negative size") } return nil } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/resource/fingerprint_test.go0000664000175000017500000000673012672604527026077 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package resource_test import ( "crypto/sha512" "encoding/hex" "strings" "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable/resource" ) func newFingerprint(c *gc.C, data string) ([]byte, string) { hash := sha512.New384() _, err := hash.Write([]byte(data)) c.Assert(err, jc.ErrorIsNil) raw := hash.Sum(nil) hexStr := hex.EncodeToString(raw) return raw, hexStr } var _ = gc.Suite(&FingerprintSuite{}) type FingerprintSuite struct{} func (s *FingerprintSuite) TestNewFingerprintOkay(c *gc.C) { expected, _ := newFingerprint(c, "spamspamspam") fp, err := resource.NewFingerprint(expected) c.Assert(err, jc.ErrorIsNil) raw := fp.Bytes() c.Check(raw, jc.DeepEquals, expected) } func (s *FingerprintSuite) TestNewFingerprintTooSmall(c *gc.C) { expected, _ := newFingerprint(c, "spamspamspam") _, err := resource.NewFingerprint(expected[:10]) c.Check(err, jc.Satisfies, errors.IsNotValid) c.Check(err, gc.ErrorMatches, `.*too small.*`) } func (s *FingerprintSuite) TestNewFingerprintTooBig(c *gc.C) { expected, _ := newFingerprint(c, "spamspamspam") _, err := resource.NewFingerprint(append(expected, 1, 2, 3)) c.Check(err, jc.Satisfies, errors.IsNotValid) c.Check(err, gc.ErrorMatches, `.*too big.*`) } func (s *FingerprintSuite) TestParseFingerprintOkay(c *gc.C) { _, expected := newFingerprint(c, "spamspamspam") fp, err := resource.ParseFingerprint(expected) c.Assert(err, jc.ErrorIsNil) hex := fp.String() c.Check(hex, jc.DeepEquals, expected) } func (s *FingerprintSuite) TestParseFingerprintNonHex(c *gc.C) { _, err := resource.ParseFingerprint("XYZ") // not hex c.Check(err, gc.ErrorMatches, `.*odd length hex string.*`) } func (s *FingerprintSuite) TestGenerateFingerprint(c *gc.C) { expected, _ := newFingerprint(c, "spamspamspam") data := strings.NewReader("spamspamspam") fp, err := resource.GenerateFingerprint(data) c.Assert(err, jc.ErrorIsNil) raw := fp.Bytes() c.Check(raw, jc.DeepEquals, expected) } func (s *FingerprintSuite) TestString(c *gc.C) { raw, expected := newFingerprint(c, "spamspamspam") fp, err := resource.NewFingerprint(raw) c.Assert(err, jc.ErrorIsNil) hex := fp.String() c.Check(hex, gc.Equals, expected) } func (s *FingerprintSuite) TestRoundtripString(c *gc.C) { _, expected := newFingerprint(c, "spamspamspam") fp, err := resource.ParseFingerprint(expected) c.Assert(err, jc.ErrorIsNil) hex := fp.String() c.Check(hex, gc.Equals, expected) } func (s *FingerprintSuite) TestBytes(c *gc.C) { expected, _ := newFingerprint(c, "spamspamspam") fp, err := resource.NewFingerprint(expected) c.Assert(err, jc.ErrorIsNil) raw := fp.Bytes() c.Check(raw, jc.DeepEquals, expected) } func (s *FingerprintSuite) TestRoundtripBytes(c *gc.C) { expected, _ := newFingerprint(c, "spamspamspam") fp, err := resource.NewFingerprint(expected) c.Assert(err, jc.ErrorIsNil) raw := fp.Bytes() c.Check(raw, jc.DeepEquals, expected) } func (s *FingerprintSuite) TestValidateOkay(c *gc.C) { raw, _ := newFingerprint(c, "spamspamspam") fp, err := resource.NewFingerprint(raw) c.Assert(err, jc.ErrorIsNil) err = fp.Validate() c.Check(err, jc.ErrorIsNil) } func (s *FingerprintSuite) TestValidateZero(c *gc.C) { var fp resource.Fingerprint err := fp.Validate() c.Check(err, jc.Satisfies, errors.IsNotValid) c.Check(err, gc.ErrorMatches, `zero-value fingerprint not valid`) } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/resource/origin_test.go0000664000175000017500000000246212672604527025035 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package resource_test import ( "github.com/juju/errors" "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable/resource" ) type OriginSuite struct { testing.IsolationSuite } var _ = gc.Suite(&OriginSuite{}) func (OriginSuite) TestParseOriginKnown(c *gc.C) { recognized := map[string]resource.Origin{ "upload": resource.OriginUpload, "store": resource.OriginStore, } for value, expected := range recognized { origin, err := resource.ParseOrigin(value) c.Check(err, jc.ErrorIsNil) c.Check(origin, gc.Equals, expected) } } func (OriginSuite) TestParseOriginUnknown(c *gc.C) { _, err := resource.ParseOrigin("") c.Check(err, gc.ErrorMatches, `.*unknown origin "".*`) } func (OriginSuite) TestValidateKnown(c *gc.C) { recognized := []resource.Origin{ resource.OriginUpload, resource.OriginStore, } for _, origin := range recognized { err := origin.Validate() c.Check(err, jc.ErrorIsNil) } } func (OriginSuite) TestValidateUnknown(c *gc.C) { var origin resource.Origin err := origin.Validate() c.Check(errors.Cause(err), jc.Satisfies, errors.IsNotValid) c.Check(err, gc.ErrorMatches, `.*unknown origin.*`) } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/resource/meta.go0000664000175000017500000000447012672604527023436 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package resource import ( "fmt" "strings" "github.com/juju/errors" ) // Meta holds the information about a resource, as stored // in a charm's metadata. type Meta struct { // Name identifies the resource. Name string // Type identifies the type of resource (e.g. "file"). Type Type // TODO(ericsnow) Rename Path to Filename? // Path is the relative path of the file or directory where the // resource will be stored under the unit's data directory. The path // is resolved against a subdirectory assigned to the resource. For // example, given a service named "spam", a resource "eggs", and a // path "eggs.tgz", the fully resolved storage path for the resource // would be: // /var/lib/juju/agent/spam-0/resources/eggs/eggs.tgz Path string // Description holds optional user-facing info for the resource. Description string } // ParseMeta parses the provided data into a Meta. func ParseMeta(name string, data interface{}) (Meta, error) { var meta Meta meta.Name = name if data == nil { return meta, nil } rMap := data.(map[string]interface{}) if val := rMap["type"]; val != nil { var err error meta.Type, err = ParseType(val.(string)) if err != nil { return meta, errors.Trace(err) } } if val := rMap["filename"]; val != nil { meta.Path = val.(string) } if val := rMap["description"]; val != nil { meta.Description = val.(string) } return meta, nil } // Validate checks the resource metadata to ensure the data is valid. func (meta Meta) Validate() error { if meta.Name == "" { return errors.NewNotValid(nil, "resource missing name") } var typeUnknown Type if meta.Type == typeUnknown { return errors.NewNotValid(nil, "resource missing type") } if err := meta.Type.Validate(); err != nil { msg := fmt.Sprintf("invalid resource type %v: %v", meta.Type, err) return errors.NewNotValid(nil, msg) } if meta.Path == "" { // TODO(ericsnow) change "filename" to "path" return errors.NewNotValid(nil, "resource missing filename") } if meta.Type == TypeFile { if strings.Contains(meta.Path, "/") { msg := fmt.Sprintf(`filename cannot contain "/" (got %q)`, meta.Path) return errors.NewNotValid(nil, msg) } // TODO(ericsnow) Constrain Path to alphanumeric? } return nil } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/resource/resource_test.go0000664000175000017500000001045412672604527025375 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package resource_test import ( "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable/resource" ) var fingerprint = []byte("123456789012345678901234567890123456789012345678") var _ = gc.Suite(&ResourceSuite{}) type ResourceSuite struct{} func (s *ResourceSuite) TestValidateFull(c *gc.C) { fp, err := resource.NewFingerprint(fingerprint) c.Assert(err, jc.ErrorIsNil) res := resource.Resource{ Meta: resource.Meta{ Name: "my-resource", Type: resource.TypeFile, Path: "filename.tgz", Description: "One line that is useful when operators need to push it.", }, Origin: resource.OriginStore, Revision: 1, Fingerprint: fp, Size: 1, } err = res.Validate() c.Check(err, jc.ErrorIsNil) } func (s *ResourceSuite) TestValidateZeroValue(c *gc.C) { var res resource.Resource err := res.Validate() c.Check(err, jc.Satisfies, errors.IsNotValid) } func (s *ResourceSuite) TestValidateBadMetadata(c *gc.C) { var meta resource.Meta c.Assert(meta.Validate(), gc.NotNil) fp, err := resource.NewFingerprint(fingerprint) c.Assert(err, jc.ErrorIsNil) res := resource.Resource{ Meta: meta, Origin: resource.OriginStore, Revision: 1, Fingerprint: fp, } err = res.Validate() c.Check(err, jc.Satisfies, errors.IsNotValid) c.Check(err, gc.ErrorMatches, `.*bad metadata.*`) } func (s *ResourceSuite) TestValidateBadOrigin(c *gc.C) { var origin resource.Origin c.Assert(origin.Validate(), gc.NotNil) fp, err := resource.NewFingerprint(fingerprint) c.Assert(err, jc.ErrorIsNil) res := resource.Resource{ Meta: resource.Meta{ Name: "my-resource", Type: resource.TypeFile, Path: "filename.tgz", Description: "One line that is useful when operators need to push it.", }, Origin: origin, Revision: 1, Fingerprint: fp, } err = res.Validate() c.Check(err, jc.Satisfies, errors.IsNotValid) c.Check(err, gc.ErrorMatches, `.*bad origin.*`) } func (s *ResourceSuite) TestValidateBadRevision(c *gc.C) { fp, err := resource.NewFingerprint(fingerprint) c.Assert(err, jc.ErrorIsNil) res := resource.Resource{ Meta: resource.Meta{ Name: "my-resource", Type: resource.TypeFile, Path: "filename.tgz", Description: "One line that is useful when operators need to push it.", }, Origin: resource.OriginStore, Revision: -1, Fingerprint: fp, } err = res.Validate() c.Check(err, jc.Satisfies, errors.IsNotValid) c.Check(err, gc.ErrorMatches, `.*revision must be non-negative.*`) } func (s *ResourceSuite) TestValidateZeroValueFingerprint(c *gc.C) { var fp resource.Fingerprint c.Assert(fp.Validate(), gc.NotNil) res := resource.Resource{ Meta: resource.Meta{ Name: "my-resource", Type: resource.TypeFile, Path: "filename.tgz", Description: "One line that is useful when operators need to push it.", }, Origin: resource.OriginStore, Revision: 1, Fingerprint: fp, } err := res.Validate() c.Check(err, jc.ErrorIsNil) } func (s *ResourceSuite) TestValidateMissingFingerprint(c *gc.C) { var fp resource.Fingerprint c.Assert(fp.Validate(), gc.NotNil) res := resource.Resource{ Meta: resource.Meta{ Name: "my-resource", Type: resource.TypeFile, Path: "filename.tgz", Description: "One line that is useful when operators need to push it.", }, Origin: resource.OriginStore, Revision: 1, Fingerprint: fp, Size: 10, } err := res.Validate() c.Check(err, jc.Satisfies, errors.IsNotValid) c.Check(err, gc.ErrorMatches, `.*missing fingerprint.*`) } func (s *ResourceSuite) TestValidateBadSize(c *gc.C) { fp, err := resource.NewFingerprint(fingerprint) c.Assert(err, jc.ErrorIsNil) res := resource.Resource{ Meta: resource.Meta{ Name: "my-resource", Type: resource.TypeFile, Path: "filename.tgz", Description: "One line that is useful when operators need to push it.", }, Origin: resource.OriginStore, Revision: 1, Fingerprint: fp, Size: -1, } err = res.Validate() c.Check(err, jc.Satisfies, errors.IsNotValid) c.Check(err, gc.ErrorMatches, `negative size not valid`) } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/resource/origin.go0000664000175000017500000000231012672604527023766 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package resource import ( "github.com/juju/errors" ) // These are the valid resource origins. const ( originUnknown Origin = iota OriginUpload OriginStore ) var origins = map[Origin]string{ OriginUpload: "upload", OriginStore: "store", } // Origin identifies where a charm's resource comes from. type Origin int // ParseOrigin converts the provided string into an Origin. // If it is not a known origin then an error is returned. func ParseOrigin(value string) (Origin, error) { for o, str := range origins { if value == str { return o, nil } } return originUnknown, errors.Errorf("unknown origin %q", value) } // String returns the printable representation of the origin. func (o Origin) String() string { return origins[o] } // Validate ensures that the origin is correct. func (o Origin) Validate() error { // Ideally, only the (unavoidable) zero value would be invalid. // However, typedef'ing int means that the use of int literals // could result in invalid Type values other than the zero value. if _, ok := origins[o]; !ok { return errors.NewNotValid(nil, "unknown origin") } return nil } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/resource/meta_test.go0000664000175000017500000001474412672604527024502 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package resource_test import ( "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable/resource" ) var _ = gc.Suite(&MetaSuite{}) type MetaSuite struct{} func (s *MetaSuite) TestParseMetaOkay(c *gc.C) { name := "my-resource" data := map[string]interface{}{ "type": "file", "filename": "filename.tgz", "description": "One line that is useful when operators need to push it.", } res, err := resource.ParseMeta(name, data) c.Assert(err, jc.ErrorIsNil) c.Check(res, jc.DeepEquals, resource.Meta{ Name: "my-resource", Type: resource.TypeFile, Path: "filename.tgz", Description: "One line that is useful when operators need to push it.", }) } func (s *MetaSuite) TestParseMetaMissingName(c *gc.C) { name := "" data := map[string]interface{}{ "type": "file", "filename": "filename.tgz", "description": "One line that is useful when operators need to push it.", } res, err := resource.ParseMeta(name, data) c.Assert(err, jc.ErrorIsNil) c.Check(res, jc.DeepEquals, resource.Meta{ Name: "", Type: resource.TypeFile, Path: "filename.tgz", Description: "One line that is useful when operators need to push it.", }) } func (s *MetaSuite) TestParseMetaMissingType(c *gc.C) { name := "my-resource" data := map[string]interface{}{ "filename": "filename.tgz", "description": "One line that is useful when operators need to push it.", } res, err := resource.ParseMeta(name, data) c.Assert(err, jc.ErrorIsNil) c.Check(res, jc.DeepEquals, resource.Meta{ Name: "my-resource", // Type is the zero value. Path: "filename.tgz", Description: "One line that is useful when operators need to push it.", }) } func (s *MetaSuite) TestParseMetaEmptyType(c *gc.C) { name := "my-resource" data := map[string]interface{}{ "type": "", "filename": "filename.tgz", "description": "One line that is useful when operators need to push it.", } _, err := resource.ParseMeta(name, data) c.Check(err, gc.ErrorMatches, `unsupported resource type .*`) } func (s *MetaSuite) TestParseMetaUnknownType(c *gc.C) { name := "my-resource" data := map[string]interface{}{ "type": "spam", "filename": "filename.tgz", "description": "One line that is useful when operators need to push it.", } _, err := resource.ParseMeta(name, data) c.Check(err, gc.ErrorMatches, `unsupported resource type .*`) } func (s *MetaSuite) TestParseMetaMissingPath(c *gc.C) { name := "my-resource" data := map[string]interface{}{ "type": "file", "description": "One line that is useful when operators need to push it.", } res, err := resource.ParseMeta(name, data) c.Assert(err, jc.ErrorIsNil) c.Check(res, jc.DeepEquals, resource.Meta{ Name: "my-resource", Type: resource.TypeFile, Path: "", Description: "One line that is useful when operators need to push it.", }) } func (s *MetaSuite) TestParseMetaMissingComment(c *gc.C) { name := "my-resource" data := map[string]interface{}{ "type": "file", "filename": "filename.tgz", } res, err := resource.ParseMeta(name, data) c.Assert(err, jc.ErrorIsNil) c.Check(res, jc.DeepEquals, resource.Meta{ Name: "my-resource", Type: resource.TypeFile, Path: "filename.tgz", Description: "", }) } func (s *MetaSuite) TestParseMetaEmpty(c *gc.C) { name := "my-resource" data := make(map[string]interface{}) res, err := resource.ParseMeta(name, data) c.Assert(err, jc.ErrorIsNil) c.Check(res, jc.DeepEquals, resource.Meta{ Name: "my-resource", }) } func (s *MetaSuite) TestParseMetaNil(c *gc.C) { name := "my-resource" var data map[string]interface{} res, err := resource.ParseMeta(name, data) c.Assert(err, jc.ErrorIsNil) c.Check(res, jc.DeepEquals, resource.Meta{ Name: "my-resource", }) } func (s *MetaSuite) TestValidateFull(c *gc.C) { res := resource.Meta{ Name: "my-resource", Type: resource.TypeFile, Path: "filename.tgz", Description: "One line that is useful when operators need to push it.", } err := res.Validate() c.Check(err, jc.ErrorIsNil) } func (s *MetaSuite) TestValidateZeroValue(c *gc.C) { var res resource.Meta err := res.Validate() c.Check(err, jc.Satisfies, errors.IsNotValid) } func (s *MetaSuite) TestValidateMissingName(c *gc.C) { res := resource.Meta{ Type: resource.TypeFile, Path: "filename.tgz", Description: "One line that is useful when operators need to push it.", } err := res.Validate() c.Check(err, jc.Satisfies, errors.IsNotValid) c.Check(err, gc.ErrorMatches, `resource missing name`) } func (s *MetaSuite) TestValidateMissingType(c *gc.C) { res := resource.Meta{ Name: "my-resource", Path: "filename.tgz", Description: "One line that is useful when operators need to push it.", } err := res.Validate() c.Check(err, jc.Satisfies, errors.IsNotValid) c.Check(err, gc.ErrorMatches, `resource missing type`) } func (s *MetaSuite) TestValidateMissingPath(c *gc.C) { res := resource.Meta{ Name: "my-resource", Type: resource.TypeFile, Description: "One line that is useful when operators need to push it.", } err := res.Validate() c.Check(err, jc.Satisfies, errors.IsNotValid) c.Check(err, gc.ErrorMatches, `resource missing filename`) } func (s *MetaSuite) TestValidateNestedPath(c *gc.C) { res := resource.Meta{ Name: "my-resource", Type: resource.TypeFile, Path: "spam/eggs", } err := res.Validate() c.Check(err, jc.Satisfies, errors.IsNotValid) c.Check(err, gc.ErrorMatches, `.*filename cannot contain "/" .*`) } func (s *MetaSuite) TestValidateAbsolutePath(c *gc.C) { res := resource.Meta{ Name: "my-resource", Type: resource.TypeFile, Path: "/spam/eggs", } err := res.Validate() c.Check(err, jc.Satisfies, errors.IsNotValid) c.Check(err, gc.ErrorMatches, `.*filename cannot contain "/" .*`) } func (s *MetaSuite) TestValidateSuspectPath(c *gc.C) { res := resource.Meta{ Name: "my-resource", Type: resource.TypeFile, Path: "git@github.com:juju/juju.git", } err := res.Validate() c.Check(err, jc.Satisfies, errors.IsNotValid) c.Check(err, gc.ErrorMatches, `.*filename cannot contain "/" .*`) } func (s *MetaSuite) TestValidateMissingComment(c *gc.C) { res := resource.Meta{ Name: "my-resource", Type: resource.TypeFile, Path: "filename.tgz", } err := res.Validate() c.Check(err, jc.ErrorIsNil) } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/resource/package_test.go0000664000175000017500000000032112672604527025131 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package resource_test import ( "testing" gc "gopkg.in/check.v1" ) func Test(t *testing.T) { gc.TestingT(t) } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/resource/sort.go0000664000175000017500000000076212672604527023477 0ustar marcomarco// Copyright 2016 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package resource import ( "sort" ) // Sort sorts the provided resources. func Sort(resources []Resource) { sort.Sort(byName(resources)) } type byName []Resource func (sorted byName) Len() int { return len(sorted) } func (sorted byName) Swap(i, j int) { sorted[i], sorted[j] = sorted[j], sorted[i] } func (sorted byName) Less(i, j int) bool { return sorted[i].Name < sorted[j].Name } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/resource/fingerprint.go0000664000175000017500000000335512672604527025040 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package resource import ( stdhash "hash" "io" "github.com/juju/errors" "github.com/juju/utils/hash" ) var newHash, validateSum = hash.SHA384() // Fingerprint represents the unique fingerprint value of a resource's data. type Fingerprint struct { hash.Fingerprint } // NewFingerprint returns wraps the provided raw fingerprint bytes. // This function roundtrips with Fingerprint.Bytes(). func NewFingerprint(raw []byte) (Fingerprint, error) { fp, err := hash.NewFingerprint(raw, validateSum) if err != nil { return Fingerprint{}, errors.Trace(err) } return Fingerprint{fp}, nil } // ParseFingerprint returns wraps the provided raw fingerprint string. // This function roundtrips with Fingerprint.String(). func ParseFingerprint(raw string) (Fingerprint, error) { fp, err := hash.ParseHexFingerprint(raw, validateSum) if err != nil { return Fingerprint{}, errors.Trace(err) } return Fingerprint{fp}, nil } // GenerateFingerprint returns the fingerprint for the provided data. func GenerateFingerprint(reader io.Reader) (Fingerprint, error) { fp, err := hash.GenerateFingerprint(reader, newHash) if err != nil { return Fingerprint{}, errors.Trace(err) } return Fingerprint{fp}, nil } // Fingerprint is a hash that may be used to generate fingerprints. type FingerprintHash struct { stdhash.Hash } // NewFingerprintHash returns a hash that may be used to create fingerprints. func NewFingerprintHash() *FingerprintHash { return &FingerprintHash{ Hash: newHash(), } } // Fingerprint returns the current fingerprint of the hash. func (fph FingerprintHash) Fingerprint() Fingerprint { fp := hash.NewValidFingerprint(fph) return Fingerprint{fp} } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/charmarchive_test.go0000664000175000017500000002620312672604527024352 0ustar marcomarco// Copyright 2011, 2012, 2013 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm_test import ( "archive/zip" "bytes" "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "strconv" "syscall" jc "github.com/juju/testing/checkers" "github.com/juju/utils/set" gc "gopkg.in/check.v1" "gopkg.in/yaml.v1" "gopkg.in/juju/charm.v6-unstable" ) type CharmArchiveSuite struct { archivePath string } var _ = gc.Suite(&CharmArchiveSuite{}) func (s *CharmArchiveSuite) SetUpSuite(c *gc.C) { s.archivePath = archivePath(c, readCharmDir(c, "dummy")) } var dummyManifest = []string{ "actions.yaml", "config.yaml", "empty", "empty/.gitkeep", "hooks", "hooks/install", "metadata.yaml", "revision", "src", "src/hello.c", } func (s *CharmArchiveSuite) TestReadCharmArchive(c *gc.C) { archive, err := charm.ReadCharmArchive(s.archivePath) c.Assert(err, gc.IsNil) checkDummy(c, archive, s.archivePath) } func (s *CharmArchiveSuite) TestReadCharmArchiveWithoutConfig(c *gc.C) { // Technically varnish has no config AND no actions. // Perhaps we should make this more orthogonal? path := archivePath(c, readCharmDir(c, "varnish")) archive, err := charm.ReadCharmArchive(path) c.Assert(err, gc.IsNil) // A lacking config.yaml file still causes a proper // Config value to be returned. c.Assert(archive.Config().Options, gc.HasLen, 0) } func (s *CharmArchiveSuite) TestReadCharmArchiveWithoutMetrics(c *gc.C) { path := archivePath(c, readCharmDir(c, "varnish")) dir, err := charm.ReadCharmArchive(path) c.Assert(err, gc.IsNil) // A lacking metrics.yaml file indicates the unit will not // be metered. c.Assert(dir.Metrics(), gc.IsNil) } func (s *CharmArchiveSuite) TestReadCharmArchiveWithEmptyMetrics(c *gc.C) { path := archivePath(c, readCharmDir(c, "metered-empty")) dir, err := charm.ReadCharmArchive(path) c.Assert(err, gc.IsNil) c.Assert(Keys(dir.Metrics()), gc.HasLen, 0) } func (s *CharmArchiveSuite) TestReadCharmArchiveWithCustomMetrics(c *gc.C) { path := archivePath(c, readCharmDir(c, "metered")) dir, err := charm.ReadCharmArchive(path) c.Assert(err, gc.IsNil) c.Assert(dir.Metrics(), gc.NotNil) c.Assert(Keys(dir.Metrics()), gc.DeepEquals, []string{"juju-unit-time", "pings"}) } func (s *CharmArchiveSuite) TestReadCharmArchiveWithoutActions(c *gc.C) { // Wordpress has config but no actions. path := archivePath(c, readCharmDir(c, "wordpress")) archive, err := charm.ReadCharmArchive(path) c.Assert(err, gc.IsNil) // A lacking actions.yaml file still causes a proper // Actions value to be returned. c.Assert(archive.Actions().ActionSpecs, gc.HasLen, 0) } func (s *CharmArchiveSuite) TestReadCharmArchiveBytes(c *gc.C) { data, err := ioutil.ReadFile(s.archivePath) c.Assert(err, gc.IsNil) archive, err := charm.ReadCharmArchiveBytes(data) c.Assert(err, gc.IsNil) checkDummy(c, archive, "") } func (s *CharmArchiveSuite) TestReadCharmArchiveFromReader(c *gc.C) { f, err := os.Open(s.archivePath) c.Assert(err, gc.IsNil) defer f.Close() info, err := f.Stat() c.Assert(err, gc.IsNil) archive, err := charm.ReadCharmArchiveFromReader(f, info.Size()) c.Assert(err, gc.IsNil) checkDummy(c, archive, "") } func (s *CharmArchiveSuite) TestManifest(c *gc.C) { archive, err := charm.ReadCharmArchive(s.archivePath) c.Assert(err, gc.IsNil) manifest, err := archive.Manifest() c.Assert(err, gc.IsNil) c.Assert(manifest, jc.DeepEquals, set.NewStrings(dummyManifest...)) } func (s *CharmArchiveSuite) TestManifestNoRevision(c *gc.C) { archive, err := charm.ReadCharmArchive(s.archivePath) c.Assert(err, gc.IsNil) dirPath := c.MkDir() err = archive.ExpandTo(dirPath) c.Assert(err, gc.IsNil) err = os.Remove(filepath.Join(dirPath, "revision")) c.Assert(err, gc.IsNil) archive = extCharmArchiveDir(c, dirPath) manifest, err := archive.Manifest() c.Assert(err, gc.IsNil) c.Assert(manifest, gc.DeepEquals, set.NewStrings(dummyManifest...)) } func (s *CharmArchiveSuite) TestManifestSymlink(c *gc.C) { srcPath := cloneDir(c, charmDirPath(c, "dummy")) if err := os.Symlink("../target", filepath.Join(srcPath, "hooks/symlink")); err != nil { c.Skip("cannot symlink") } expected := append([]string{"hooks/symlink"}, dummyManifest...) archive := archiveDir(c, srcPath) manifest, err := archive.Manifest() c.Assert(err, gc.IsNil) c.Assert(manifest, gc.DeepEquals, set.NewStrings(expected...)) } func (s *CharmArchiveSuite) TestExpandTo(c *gc.C) { archive, err := charm.ReadCharmArchive(s.archivePath) c.Assert(err, gc.IsNil) path := filepath.Join(c.MkDir(), "charm") err = archive.ExpandTo(path) c.Assert(err, gc.IsNil) dir, err := charm.ReadCharmDir(path) c.Assert(err, gc.IsNil) checkDummy(c, dir, path) } func (s *CharmArchiveSuite) prepareCharmArchive(c *gc.C, charmDir *charm.CharmDir, archivePath string) { file, err := os.Create(archivePath) c.Assert(err, gc.IsNil) defer file.Close() zipw := zip.NewWriter(file) defer zipw.Close() h := &zip.FileHeader{Name: "revision"} h.SetMode(syscall.S_IFREG | 0644) w, err := zipw.CreateHeader(h) c.Assert(err, gc.IsNil) _, err = w.Write([]byte(strconv.Itoa(charmDir.Revision()))) h = &zip.FileHeader{Name: "metadata.yaml", Method: zip.Deflate} h.SetMode(0644) w, err = zipw.CreateHeader(h) c.Assert(err, gc.IsNil) data, err := yaml.Marshal(charmDir.Meta()) c.Assert(err, gc.IsNil) _, err = w.Write(data) c.Assert(err, gc.IsNil) for name := range charmDir.Meta().Hooks() { hookName := filepath.Join("hooks", name) h = &zip.FileHeader{ Name: hookName, Method: zip.Deflate, } // Force it non-executable h.SetMode(0644) w, err := zipw.CreateHeader(h) c.Assert(err, gc.IsNil) _, err = w.Write([]byte("not important")) c.Assert(err, gc.IsNil) } } func (s *CharmArchiveSuite) TestExpandToSetsHooksExecutable(c *gc.C) { charmDir, err := charm.ReadCharmDir(cloneDir(c, charmDirPath(c, "all-hooks"))) c.Assert(err, gc.IsNil) // CharmArchive manually, so we can check ExpandTo(), unaffected // by ArchiveTo()'s behavior archivePath := filepath.Join(c.MkDir(), "archive.charm") s.prepareCharmArchive(c, charmDir, archivePath) archive, err := charm.ReadCharmArchive(archivePath) c.Assert(err, gc.IsNil) path := filepath.Join(c.MkDir(), "charm") err = archive.ExpandTo(path) c.Assert(err, gc.IsNil) _, err = charm.ReadCharmDir(path) c.Assert(err, gc.IsNil) for name := range archive.Meta().Hooks() { hookName := string(name) info, err := os.Stat(filepath.Join(path, "hooks", hookName)) c.Assert(err, gc.IsNil) perm := info.Mode() & 0777 c.Assert(perm&0100 != 0, gc.Equals, true, gc.Commentf("hook %q is not executable", hookName)) } } func (s *CharmArchiveSuite) TestCharmArchiveFileModes(c *gc.C) { // Apply subtler mode differences than can be expressed in Bazaar. srcPath := cloneDir(c, charmDirPath(c, "dummy")) modes := []struct { path string mode os.FileMode }{ {"hooks/install", 0751}, {"empty", 0750}, {"src/hello.c", 0614}, } for _, m := range modes { err := os.Chmod(filepath.Join(srcPath, m.path), m.mode) c.Assert(err, gc.IsNil) } var haveSymlinks = true if err := os.Symlink("../target", filepath.Join(srcPath, "hooks/symlink")); err != nil { haveSymlinks = false } // CharmArchive and extract the charm to a new directory. archive := archiveDir(c, srcPath) path := c.MkDir() err := archive.ExpandTo(path) c.Assert(err, gc.IsNil) // Check sensible file modes once round-tripped. info, err := os.Stat(filepath.Join(path, "src", "hello.c")) c.Assert(err, gc.IsNil) c.Assert(info.Mode()&0777, gc.Equals, os.FileMode(0644)) c.Assert(info.Mode()&os.ModeType, gc.Equals, os.FileMode(0)) info, err = os.Stat(filepath.Join(path, "hooks", "install")) c.Assert(err, gc.IsNil) c.Assert(info.Mode()&0777, gc.Equals, os.FileMode(0755)) c.Assert(info.Mode()&os.ModeType, gc.Equals, os.FileMode(0)) info, err = os.Stat(filepath.Join(path, "empty")) c.Assert(err, gc.IsNil) c.Assert(info.Mode()&0777, gc.Equals, os.FileMode(0755)) if haveSymlinks { target, err := os.Readlink(filepath.Join(path, "hooks", "symlink")) c.Assert(err, gc.IsNil) c.Assert(target, gc.Equals, "../target") } } func (s *CharmArchiveSuite) TestCharmArchiveRevisionFile(c *gc.C) { charmDir := cloneDir(c, charmDirPath(c, "dummy")) revPath := filepath.Join(charmDir, "revision") // Missing revision file err := os.Remove(revPath) c.Assert(err, gc.IsNil) archive := extCharmArchiveDir(c, charmDir) c.Assert(archive.Revision(), gc.Equals, 0) // Missing revision file with old revision in metadata file, err := os.OpenFile(filepath.Join(charmDir, "metadata.yaml"), os.O_WRONLY|os.O_APPEND, 0) c.Assert(err, gc.IsNil) _, err = file.Write([]byte("\nrevision: 1234\n")) c.Assert(err, gc.IsNil) archive = extCharmArchiveDir(c, charmDir) c.Assert(archive.Revision(), gc.Equals, 1234) // Revision file with bad content err = ioutil.WriteFile(revPath, []byte("garbage"), 0666) c.Assert(err, gc.IsNil) path := extCharmArchiveDirPath(c, charmDir) archive, err = charm.ReadCharmArchive(path) c.Assert(err, gc.ErrorMatches, "invalid revision file") c.Assert(archive, gc.IsNil) } func (s *CharmArchiveSuite) TestCharmArchiveSetRevision(c *gc.C) { archive, err := charm.ReadCharmArchive(s.archivePath) c.Assert(err, gc.IsNil) c.Assert(archive.Revision(), gc.Equals, 1) archive.SetRevision(42) c.Assert(archive.Revision(), gc.Equals, 42) path := filepath.Join(c.MkDir(), "charm") err = archive.ExpandTo(path) c.Assert(err, gc.IsNil) dir, err := charm.ReadCharmDir(path) c.Assert(err, gc.IsNil) c.Assert(dir.Revision(), gc.Equals, 42) } func (s *CharmArchiveSuite) TestExpandToWithBadLink(c *gc.C) { charmDir := cloneDir(c, charmDirPath(c, "dummy")) badLink := filepath.Join(charmDir, "hooks", "badlink") // Symlink targeting a path outside of the charm. err := os.Symlink("../../target", badLink) c.Assert(err, gc.IsNil) archive := extCharmArchiveDir(c, charmDir) c.Assert(err, gc.IsNil) path := filepath.Join(c.MkDir(), "charm") err = archive.ExpandTo(path) c.Assert(err, gc.ErrorMatches, `cannot extract "hooks/badlink": symlink "../../target" leads out of scope`) // Symlink targeting an absolute path. os.Remove(badLink) err = os.Symlink("/target", badLink) c.Assert(err, gc.IsNil) archive = extCharmArchiveDir(c, charmDir) c.Assert(err, gc.IsNil) path = filepath.Join(c.MkDir(), "charm") err = archive.ExpandTo(path) c.Assert(err, gc.ErrorMatches, `cannot extract "hooks/badlink": symlink "/target" is absolute`) } func extCharmArchiveDirPath(c *gc.C, dirpath string) string { path := filepath.Join(c.MkDir(), "archive.charm") cmd := exec.Command("/bin/sh", "-c", fmt.Sprintf("cd %s; zip --fifo --symlinks -r %s .", dirpath, path)) output, err := cmd.CombinedOutput() c.Assert(err, gc.IsNil, gc.Commentf("Command output: %s", output)) return path } func extCharmArchiveDir(c *gc.C, dirpath string) *charm.CharmArchive { path := extCharmArchiveDirPath(c, dirpath) archive, err := charm.ReadCharmArchive(path) c.Assert(err, gc.IsNil) return archive } func archiveDir(c *gc.C, dirpath string) *charm.CharmArchive { dir, err := charm.ReadCharmDir(dirpath) c.Assert(err, gc.IsNil) buf := new(bytes.Buffer) err = dir.ArchiveTo(buf) c.Assert(err, gc.IsNil) archive, err := charm.ReadCharmArchiveBytes(buf.Bytes()) c.Assert(err, gc.IsNil) return archive } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/metrics.go0000664000175000017500000000577412672604527022337 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm import ( "fmt" "io" "io/ioutil" "strconv" "strings" goyaml "gopkg.in/yaml.v1" ) // MetricType is used to identify metric types supported by juju. type MetricType string const ( builtinMetricPrefix = "juju" // Supported metric types. MetricTypeGauge MetricType = "gauge" MetricTypeAbsolute MetricType = "absolute" ) // IsBuiltinMetric reports whether the given metric key is in the builtin metric namespace func IsBuiltinMetric(key string) bool { return strings.HasPrefix(key, builtinMetricPrefix) } func validateValue(value string) error { // The largest number of digits that can be returned by strconv.FormatFloat is 24, so // choose an arbitrary limit somewhat higher than that. if len(value) > 30 { return fmt.Errorf("metric value is too large") } fValue, err := strconv.ParseFloat(value, 64) if err != nil { return fmt.Errorf("invalid value type: expected float, got %q", value) } if fValue < 0 { return fmt.Errorf("invalid value: value must be greater or equal to zero, got %v", value) } return nil } // validateValue checks if the supplied metric value fits the requirements // of its expected type. func (m MetricType) validateValue(value string) error { switch m { case MetricTypeGauge, MetricTypeAbsolute: return validateValue(value) default: return fmt.Errorf("unknown metric type %q", m) } } // Metric represents a single metric definition type Metric struct { Type MetricType Description string } // Metrics contains the metrics declarations encoded in the metrics.yaml // file. type Metrics struct { Metrics map[string]Metric } // ReadMetrics reads a MetricsDeclaration in YAML format. func ReadMetrics(r io.Reader) (*Metrics, error) { data, err := ioutil.ReadAll(r) if err != nil { return nil, err } var metrics Metrics if err := goyaml.Unmarshal(data, &metrics); err != nil { return nil, err } if metrics.Metrics == nil { return &metrics, nil } for name, metric := range metrics.Metrics { if IsBuiltinMetric(name) { if metric.Type != MetricType("") || metric.Description != "" { return nil, fmt.Errorf("metric %q is using a prefix reserved for built-in metrics: it should not have type or description specification", name) } continue } switch metric.Type { case MetricTypeGauge, MetricTypeAbsolute: default: return nil, fmt.Errorf("invalid metrics declaration: metric %q has unknown type %q", name, metric.Type) } if metric.Description == "" { return nil, fmt.Errorf("invalid metrics declaration: metric %q lacks description", name) } } return &metrics, nil } // ValidateMetric validates the supplied metric name and value against the loaded // metric definitions. func (m Metrics) ValidateMetric(name, value string) error { metric, exists := m.Metrics[name] if !exists { return fmt.Errorf("metric %q not defined", name) } if IsBuiltinMetric(name) { return validateValue(value) } return metric.Type.validateValue(value) } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/bundlearchive_test.go0000664000175000017500000000531312672604527024530 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm_test import ( "fmt" "io/ioutil" "os" "path/filepath" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" ) var _ = gc.Suite(&BundleArchiveSuite{}) type BundleArchiveSuite struct { archivePath string } func (s *BundleArchiveSuite) SetUpSuite(c *gc.C) { s.archivePath = archivePath(c, readBundleDir(c, "wordpress-simple")) } func (s *BundleArchiveSuite) TestReadBundleArchive(c *gc.C) { archive, err := charm.ReadBundleArchive(s.archivePath) c.Assert(err, gc.IsNil) checkWordpressBundle(c, archive, s.archivePath) } func (s *BundleArchiveSuite) TestReadBundleArchiveBytes(c *gc.C) { data, err := ioutil.ReadFile(s.archivePath) c.Assert(err, gc.IsNil) archive, err := charm.ReadBundleArchiveBytes(data) c.Assert(err, gc.IsNil) checkWordpressBundle(c, archive, "") } func (s *BundleArchiveSuite) TestReadBundleArchiveFromReader(c *gc.C) { f, err := os.Open(s.archivePath) c.Assert(err, gc.IsNil) defer f.Close() info, err := f.Stat() c.Assert(err, gc.IsNil) archive, err := charm.ReadBundleArchiveFromReader(f, info.Size()) c.Assert(err, gc.IsNil) checkWordpressBundle(c, archive, "") } func (s *BundleArchiveSuite) TestReadBundleArchiveWithoutBundleYAML(c *gc.C) { testReadBundleArchiveWithoutFile(c, "bundle.yaml") } func (s *BundleArchiveSuite) TestReadBundleArchiveWithoutREADME(c *gc.C) { testReadBundleArchiveWithoutFile(c, "README.md") } func testReadBundleArchiveWithoutFile(c *gc.C, fileToRemove string) { path := cloneDir(c, bundleDirPath(c, "wordpress-simple")) dir, err := charm.ReadBundleDir(path) c.Assert(err, gc.IsNil) // Remove the file from the bundle directory. // ArchiveTo just zips the contents of the directory as-is, // so the resulting bundle archive not contain the // file. err = os.Remove(filepath.Join(dir.Path, fileToRemove)) c.Assert(err, gc.IsNil) archivePath := filepath.Join(c.MkDir(), "out.bundle") dstf, err := os.Create(archivePath) c.Assert(err, gc.IsNil) err = dir.ArchiveTo(dstf) dstf.Close() archive, err := charm.ReadBundleArchive(archivePath) // Slightly dubious assumption: the quoted file name has no // regexp metacharacters worth worrying about. c.Assert(err, gc.ErrorMatches, fmt.Sprintf("archive file %q not found", fileToRemove)) c.Assert(archive, gc.IsNil) } func (s *BundleArchiveSuite) TestExpandTo(c *gc.C) { dir := c.MkDir() archive, err := charm.ReadBundleArchive(s.archivePath) c.Assert(err, gc.IsNil) err = archive.ExpandTo(dir) c.Assert(err, gc.IsNil) bdir, err := charm.ReadBundleDir(dir) c.Assert(err, gc.IsNil) c.Assert(bdir.ReadMe(), gc.Equals, archive.ReadMe()) c.Assert(bdir.Data(), gc.DeepEquals, archive.Data()) } charm-2.1.1/src/gopkg.in/juju/charm.v6-unstable/bundlearchive.go0000664000175000017500000000445612672604527023500 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package charm import ( "bytes" "io" "io/ioutil" ziputil "github.com/juju/utils/zip" ) type BundleArchive struct { zopen zipOpener Path string data *BundleData readMe string } // ReadBundleArchive reads a bundle archive from the given file path. func ReadBundleArchive(path string) (*BundleArchive, error) { a, err := readBundleArchive(newZipOpenerFromPath(path)) if err != nil { return nil, err } a.Path = path return a, nil } // ReadBundleArchiveBytes reads a bundle archive from the given byte // slice. func ReadBundleArchiveBytes(data []byte) (*BundleArchive, error) { zopener := newZipOpenerFromReader(bytes.NewReader(data), int64(len(data))) return readBundleArchive(zopener) } // ReadBundleArchiveFromReader returns a BundleArchive that uses // r to read the bundle. The given size must hold the number // of available bytes in the file. // // Note that the caller is responsible for closing r - methods on // the returned BundleArchive may fail after that. func ReadBundleArchiveFromReader(r io.ReaderAt, size int64) (*BundleArchive, error) { return readBundleArchive(newZipOpenerFromReader(r, size)) } func readBundleArchive(zopen zipOpener) (*BundleArchive, error) { a := &BundleArchive{ zopen: zopen, } zipr, err := zopen.openZip() if err != nil { return nil, err } defer zipr.Close() reader, err := zipOpenFile(zipr, "bundle.yaml") if err != nil { return nil, err } a.data, err = ReadBundleData(reader) reader.Close() if err != nil { return nil, err } reader, err = zipOpenFile(zipr, "README.md") if err != nil { return nil, err } readMe, err := ioutil.ReadAll(reader) if err != nil { return nil, err } a.readMe = string(readMe) return a, nil } // Data implements Bundle.Data. func (a *BundleArchive) Data() *BundleData { return a.data } // ReadMe implements Bundle.ReadMe. func (a *BundleArchive) ReadMe() string { return a.readMe } // ExpandTo expands the bundle archive into dir, creating it if necessary. // If any errors occur during the expansion procedure, the process will // abort. func (a *BundleArchive) ExpandTo(dir string) error { zipr, err := a.zopen.openZip() if err != nil { return err } defer zipr.Close() return ziputil.ExtractAll(zipr.Reader, dir) } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/0000775000175000017500000000000012703461656021206 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/charmstore_going_away.go0000664000175000017500000000373112677511231026107 0ustar marcomarco// Copyright 2016 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmrepo // import "gopkg.in/juju/charmrepo.v2-unstable" // This file may go away once Juju stops using anything here. import ( "net/http" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" ) // URL returns the root endpoint URL of the charm store. func (s *CharmStore) URL() string { return s.client.ServerURL() } // Latest returns the most current revision for each of the identified // charms. The revision in the provided charm URLs is ignored. func (s *CharmStore) Latest(curls ...*charm.URL) ([]CharmRevision, error) { results, err := s.client.Latest(curls) if err != nil { return nil, err } var responses []CharmRevision for i, result := range results { response := CharmRevision{ Revision: result.Revision, Sha256: result.Sha256, Err: result.Err, } if errgo.Cause(result.Err) == params.ErrNotFound { curl := curls[i].WithRevision(-1) response.Err = CharmNotFound(curl.String()) } responses = append(responses, response) } return responses, nil } // WithTestMode returns a repository Interface where test mode is enabled, // meaning charm store download stats are not increased when charms are // retrieved. func (s *CharmStore) WithTestMode() *CharmStore { newRepo := *s newRepo.client.DisableStats() return &newRepo } // JujuMetadataHTTPHeader is the HTTP header name used to send Juju metadata // attributes to the charm store. const JujuMetadataHTTPHeader = csclient.JujuMetadataHTTPHeader // WithJujuAttrs returns a repository Interface with the Juju metadata // attributes set. func (s *CharmStore) WithJujuAttrs(attrs map[string]string) *CharmStore { newRepo := *s header := make(http.Header) for k, v := range attrs { header.Add(JujuMetadataHTTPHeader, k+"="+v) } newRepo.client.SetHTTPHeader(header) return &newRepo } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/local.go0000664000175000017500000001160112672604507022625 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmrepo // import "gopkg.in/juju/charmrepo.v2-unstable" import ( "fmt" "io/ioutil" "os" "path/filepath" "strings" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" ) // LocalRepository represents a local directory containing subdirectories // named after an Ubuntu series, each of which contains charms targeted for // that series. For example: // // /path/to/repository/oneiric/mongodb/ // /path/to/repository/precise/mongodb.charm // /path/to/repository/precise/wordpress/ type LocalRepository struct { Path string } var _ Interface = (*LocalRepository)(nil) // NewLocalRepository creates and return a new local Juju repository pointing // to the given local path. func NewLocalRepository(path string) (Interface, error) { if path == "" { return nil, errgo.New("path to local repository not specified") } return &LocalRepository{ Path: path, }, nil } // Resolve implements Interface.Resolve. func (r *LocalRepository) Resolve(ref *charm.URL) (*charm.URL, []string, error) { if ref.Series == "" { return nil, nil, errgo.Newf("no series specified for %s", ref) } if ref.Revision != -1 { return ref, nil, nil } if ref.Series == "bundle" { // Bundles do not have revision files and the revision is not included // in metadata. For this reason, local bundles always have revision 0. return ref.WithRevision(0), nil, nil } ch, err := r.Get(ref) if err != nil { return nil, nil, err } // This is strictly speaking unnecessary, but just in case a bad charm is // used locally, we'll check the series. _, err = charm.SeriesForCharm(ref.Series, ch.Meta().Series) if err != nil { return nil, nil, err } // We return nil for supported series because even though a charm in a local // repository may declare series, it doesn't make sense because charms are // expected to be for a single series only in the repository. The local // repository concept is deprecated for multi series charms. return ref.WithRevision(ch.Revision()), nil, nil } func mightBeCharm(info os.FileInfo) bool { if info.IsDir() { return !strings.HasPrefix(info.Name(), ".") } return strings.HasSuffix(info.Name(), ".charm") } // Get returns a charm matching curl, if one exists. If curl has a revision of // -1, it returns the latest charm that matches curl. If multiple candidates // satisfy the foregoing, the first one encountered will be returned. func (r *LocalRepository) Get(curl *charm.URL) (charm.Charm, error) { if err := r.checkUrlAndPath(curl); err != nil { return nil, err } if curl.Series == "bundle" { return nil, errgo.Newf("expected a charm URL, got bundle URL %q", curl) } path := filepath.Join(r.Path, curl.Series) infos, err := ioutil.ReadDir(path) if err != nil { return nil, entityNotFound(curl, r.Path) } var latest charm.Charm for _, info := range infos { chPath := filepath.Join(path, info.Name()) if info.Mode()&os.ModeSymlink != 0 { var err error if info, err = os.Stat(chPath); err != nil { return nil, err } } if !mightBeCharm(info) { continue } if ch, err := charm.ReadCharm(chPath); err != nil { logger.Warningf("failed to load charm at %q: %s", chPath, err) } else if ch.Meta().Name == curl.Name { if ch.Revision() == curl.Revision { return ch, nil } if latest == nil || ch.Revision() > latest.Revision() { latest = ch } } } if curl.Revision == -1 && latest != nil { return latest, nil } return nil, entityNotFound(curl, r.Path) } // GetBundle implements Interface.GetBundle. func (r *LocalRepository) GetBundle(curl *charm.URL) (charm.Bundle, error) { if err := r.checkUrlAndPath(curl); err != nil { return nil, err } if curl.Series != "bundle" { return nil, errgo.Newf("expected a bundle URL, got charm URL %q", curl) } // Note that the bundle does not inherently own a name different than the // directory name. Neither the name nor the revision are included in the // bundle metadata. // TODO frankban: handle bundle revisions, totally ignored for now. path := filepath.Join(r.Path, curl.Series, curl.Name) info, err := os.Stat(path) if err != nil { return nil, entityNotFound(curl, r.Path) } // Do not support bundle archives for the time being. What archive name // should we use? What's the use case for compressing bundles anyway? if !info.IsDir() { return nil, entityNotFound(curl, r.Path) } return charm.ReadBundleDir(path) } // checkUrlAndPath checks that the given URL represents a local entity and that // the repository path exists. func (r *LocalRepository) checkUrlAndPath(curl *charm.URL) error { if curl.Schema != "local" { return fmt.Errorf("local repository got URL with non-local schema: %q", curl) } info, err := os.Stat(r.Path) if err != nil { if isNotExistsError(err) { return repoNotFound(r.Path) } return err } if !info.IsDir() { return repoNotFound(r.Path) } return nil } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/local_test.go0000664000175000017500000002107312672604507023670 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmrepo_test import ( "io/ioutil" "os" "path/filepath" gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable" ) type LocalRepoSuite struct { gitjujutesting.FakeHomeSuite repo *charmrepo.LocalRepository charmsPath string bundlesPath string } var _ = gc.Suite(&LocalRepoSuite{}) func (s *LocalRepoSuite) SetUpTest(c *gc.C) { s.FakeHomeSuite.SetUpTest(c) root := c.MkDir() s.repo = &charmrepo.LocalRepository{Path: root} s.bundlesPath = filepath.Join(root, "bundle") s.charmsPath = filepath.Join(root, "quantal") c.Assert(os.Mkdir(s.bundlesPath, 0777), jc.ErrorIsNil) c.Assert(os.Mkdir(s.charmsPath, 0777), jc.ErrorIsNil) } func (s *LocalRepoSuite) addCharmArchive(name string) string { return TestCharms.CharmArchivePath(s.charmsPath, name) } func (s *LocalRepoSuite) addCharmDir(name string) string { return TestCharms.ClonedDirPath(s.charmsPath, name) } func (s *LocalRepoSuite) addBundleDir(name string) string { return TestCharms.ClonedBundleDirPath(s.bundlesPath, name) } func (s *LocalRepoSuite) checkNotFoundErr(c *gc.C, err error, charmURL *charm.URL) { expect := `entity not found in "` + s.repo.Path + `": ` + charmURL.String() c.Check(err, gc.ErrorMatches, expect) } func (s *LocalRepoSuite) TestMissingCharm(c *gc.C) { for i, str := range []string{ "local:quantal/zebra", "local:badseries/zebra", } { c.Logf("test %d: %s", i, str) charmURL := charm.MustParseURL(str) _, err := s.repo.Get(charmURL) s.checkNotFoundErr(c, err, charmURL) } } func (s *LocalRepoSuite) TestMissingRepo(c *gc.C) { c.Assert(os.RemoveAll(s.repo.Path), gc.IsNil) _, err := s.repo.Get(charm.MustParseURL("local:quantal/zebra")) c.Assert(err, gc.ErrorMatches, `no repository found at ".*"`) _, err = s.repo.GetBundle(charm.MustParseURL("local:bundle/wordpress-simple")) c.Assert(err, gc.ErrorMatches, `no repository found at ".*"`) c.Assert(ioutil.WriteFile(s.repo.Path, nil, 0666), gc.IsNil) _, err = s.repo.Get(charm.MustParseURL("local:quantal/zebra")) c.Assert(err, gc.ErrorMatches, `no repository found at ".*"`) _, err = s.repo.GetBundle(charm.MustParseURL("local:bundle/wordpress-simple")) c.Assert(err, gc.ErrorMatches, `no repository found at ".*"`) } func (s *LocalRepoSuite) TestCharmArchive(c *gc.C) { charmURL := charm.MustParseURL("local:quantal/dummy") s.addCharmArchive("dummy") ch, err := s.repo.Get(charmURL) c.Assert(err, gc.IsNil) c.Assert(ch.Revision(), gc.Equals, 1) } func (s *LocalRepoSuite) TestLogsErrors(c *gc.C) { err := ioutil.WriteFile(filepath.Join(s.charmsPath, "blah.charm"), nil, 0666) c.Assert(err, gc.IsNil) err = os.Mkdir(filepath.Join(s.charmsPath, "blah"), 0666) c.Assert(err, gc.IsNil) samplePath := s.addCharmDir("upgrade2") gibberish := []byte("don't parse me by") err = ioutil.WriteFile(filepath.Join(samplePath, "metadata.yaml"), gibberish, 0666) c.Assert(err, gc.IsNil) charmURL := charm.MustParseURL("local:quantal/dummy") s.addCharmDir("dummy") ch, err := s.repo.Get(charmURL) c.Assert(err, gc.IsNil) c.Assert(ch.Revision(), gc.Equals, 1) c.Assert(c.GetTestLog(), gc.Matches, ` .* WARNING juju.charm.charmrepo failed to load charm at ".*/quantal/blah": .* .* WARNING juju.charm.charmrepo failed to load charm at ".*/quantal/blah.charm": .* .* WARNING juju.charm.charmrepo failed to load charm at ".*/quantal/upgrade2": .* `[1:]) } func renameSibling(c *gc.C, path, name string) { c.Assert(os.Rename(path, filepath.Join(filepath.Dir(path), name)), gc.IsNil) } func (s *LocalRepoSuite) TestIgnoresUnpromisingNames(c *gc.C) { err := ioutil.WriteFile(filepath.Join(s.charmsPath, "blah.notacharm"), nil, 0666) c.Assert(err, gc.IsNil) err = os.Mkdir(filepath.Join(s.charmsPath, ".blah"), 0666) c.Assert(err, gc.IsNil) renameSibling(c, s.addCharmDir("dummy"), ".dummy") renameSibling(c, s.addCharmArchive("dummy"), "dummy.notacharm") charmURL := charm.MustParseURL("local:quantal/dummy") _, err = s.repo.Get(charmURL) s.checkNotFoundErr(c, err, charmURL) c.Assert(c.GetTestLog(), gc.Equals, "") } func (s *LocalRepoSuite) TestFindsSymlinks(c *gc.C) { realPath := TestCharms.ClonedDirPath(c.MkDir(), "dummy") linkPath := filepath.Join(s.charmsPath, "dummy") err := os.Symlink(realPath, linkPath) c.Assert(err, gc.IsNil) ch, err := s.repo.Get(charm.MustParseURL("local:quantal/dummy")) c.Assert(err, gc.IsNil) c.Assert(ch.Revision(), gc.Equals, 1) c.Assert(ch.Meta().Name, gc.Equals, "dummy") c.Assert(ch.Config().Options["title"].Default, gc.Equals, "My Title") c.Assert(ch.(*charm.CharmDir).Path, gc.Equals, linkPath) } func (s *LocalRepoSuite) TestResolve(c *gc.C) { // Add some charms to the local repo. s.addCharmDir("upgrade1") s.addCharmDir("upgrade2") s.addCharmDir("wordpress") s.addCharmDir("riak") s.addCharmDir("multi-series") s.addCharmDir("multi-series-bad") // Define the tests to be run. tests := []struct { id string url string series []string err string }{{ id: "local:quantal/upgrade", url: "local:quantal/upgrade-2", }, { id: "local:quantal/upgrade-1", url: "local:quantal/upgrade-1", }, { id: "local:quantal/wordpress", url: "local:quantal/wordpress-3", }, { id: "local:quantal/riak", url: "local:quantal/riak-7", }, { id: "local:quantal/wordpress-3", url: "local:quantal/wordpress-3", }, { id: "local:quantal/wordpress-2", url: "local:quantal/wordpress-2", }, { id: "local:quantal/new-charm-with-multi-series", url: "local:quantal/new-charm-with-multi-series-7", series: []string{}, }, { id: "local:quantal/multi-series-bad", err: `series \"quantal\" not supported by charm, supported series are: precise,trusty`, }, { id: "local:bundle/openstack", url: "local:bundle/openstack-0", }, { id: "local:bundle/openstack-42", url: "local:bundle/openstack-42", }, { id: "local:trusty/riak", err: "entity not found .*: local:trusty/riak", }, { id: "local:quantal/no-such", err: "entity not found .*: local:quantal/no-such", }, { id: "local:upgrade", err: "no series specified for local:upgrade", }} // Run the tests. for i, test := range tests { c.Logf("test %d: %s", i, test.id) ref, series, err := s.repo.Resolve(charm.MustParseURL(test.id)) if test.err != "" { c.Assert(err, gc.ErrorMatches, test.err) c.Assert(ref, gc.IsNil) continue } c.Assert(err, jc.ErrorIsNil) c.Assert(ref, jc.DeepEquals, charm.MustParseURL(test.url)) c.Assert(series, jc.DeepEquals, test.series) } } func (s *LocalRepoSuite) TestGetBundle(c *gc.C) { url := charm.MustParseURL("local:bundle/openstack") s.addBundleDir("openstack") b, err := s.repo.GetBundle(url) c.Assert(err, jc.ErrorIsNil) c.Assert(b.Data(), jc.DeepEquals, TestCharms.BundleDir("openstack").Data()) } func (s *LocalRepoSuite) TestGetBundleSymlink(c *gc.C) { realPath := TestCharms.ClonedBundleDirPath(c.MkDir(), "wordpress-simple") linkPath := filepath.Join(s.bundlesPath, "wordpress-simple") err := os.Symlink(realPath, linkPath) c.Assert(err, jc.ErrorIsNil) url := charm.MustParseURL("local:bundle/wordpress-simple") b, err := s.repo.GetBundle(url) c.Assert(err, jc.ErrorIsNil) c.Assert(b.Data(), jc.DeepEquals, TestCharms.BundleDir("wordpress-simple").Data()) } func (s *LocalRepoSuite) TestGetBundleErrorNotFound(c *gc.C) { url := charm.MustParseURL("local:bundle/no-such") b, err := s.repo.GetBundle(url) s.checkNotFoundErr(c, err, url) c.Assert(b, gc.IsNil) } var invalidURLTests = []struct { about string bundle bool url string err string }{{ about: "get charm: non-local schema", url: "cs:trusty/django-42", err: `local repository got URL with non-local schema: "cs:trusty/django-42"`, }, { about: "get bundle: non-local schema", bundle: true, url: "cs:bundle/django-scalable", err: `local repository got URL with non-local schema: "cs:bundle/django-scalable"`, }, { about: "get charm: bundle provided", url: "local:bundle/rails", err: `expected a charm URL, got bundle URL "local:bundle/rails"`, }, { about: "get bundle: charm provided", bundle: true, url: "local:trusty/rails", err: `expected a bundle URL, got charm URL "local:trusty/rails"`, }} func (s *LocalRepoSuite) TestInvalidURLTest(c *gc.C) { var err error var e interface{} for i, test := range invalidURLTests { c.Logf("test %d: %s", i, test.about) curl := charm.MustParseURL(test.url) if test.bundle { e, err = s.repo.GetBundle(curl) } else { e, err = s.repo.Get(curl) } c.Assert(e, gc.IsNil) c.Assert(err, gc.ErrorMatches, test.err) } } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/charmstore.go0000664000175000017500000002121112703461656023701 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmrepo // import "gopkg.in/juju/charmrepo.v2-unstable" import ( "crypto/sha512" "fmt" "io" "io/ioutil" "net/http" "net/url" "os" "path/filepath" "github.com/juju/utils" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/juju/charmrepo.v2-unstable/csclient" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" ) // CacheDir stores the charm cache directory path. var CacheDir string // CharmStore is a repository Interface that provides access to the public Juju // charm store. type CharmStore struct { client *csclient.Client } var _ Interface = (*CharmStore)(nil) // NewCharmStoreParams holds parameters for instantiating a new CharmStore. type NewCharmStoreParams struct { // URL holds the root endpoint URL of the charm store, // with no trailing slash, not including the version. // For example https://api.jujucharms.com/charmstore // If empty, the default charm store client location is used. URL string // BakeryClient holds the bakery client to use when making // requests to the store. This is used in preference to // HTTPClient. BakeryClient *httpbakery.Client // HTTPClient holds the HTTP client to use when making // requests to the store. If nil, httpbakery.NewHTTPClient will // be used. HTTPClient *http.Client // VisitWebPage is called when authorization requires that // the user visits a web page to authenticate themselves. // If nil, no interaction will be allowed. This field // is ignored if BakeryClient is provided. VisitWebPage func(url *url.URL) error // User holds the name to authenticate as for the client. If User is empty, // no credentials will be sent. User string // Password holds the password for the given user, for authenticating the // client. Password string } // NewCharmStore creates and returns a charm store repository. // The given parameters are used to instantiate the charm store. // // The errors returned from the interface methods will // preserve the causes returned from the underlying csclient // methods. func NewCharmStore(p NewCharmStoreParams) *CharmStore { client := csclient.New(csclient.Params{ URL: p.URL, BakeryClient: p.BakeryClient, HTTPClient: p.HTTPClient, VisitWebPage: p.VisitWebPage, User: p.User, Password: p.Password, }) return NewCharmStoreFromClient(client) } // NewCharmStoreFromClient creates and returns a charm store repository. // The provided client is used for charm store requests. func NewCharmStoreFromClient(client *csclient.Client) *CharmStore { return &CharmStore{ client: client, } } // Client returns the charmstore client that the CharmStore // implementation uses under the hood. func (s *CharmStore) Client() *csclient.Client { return s.client } // Get implements Interface.Get. func (s *CharmStore) Get(curl *charm.URL) (charm.Charm, error) { // The cache location must have been previously set. if CacheDir == "" { panic("charm cache directory path is empty") } if curl.Series == "bundle" { return nil, errgo.Newf("expected a charm URL, got bundle URL %q", curl) } path, err := s.archivePath(curl) if err != nil { return nil, errgo.Mask(err, errgo.Any) } return charm.ReadCharmArchive(path) } // GetBundle implements Interface.GetBundle. func (s *CharmStore) GetBundle(curl *charm.URL) (charm.Bundle, error) { // The cache location must have been previously set. if CacheDir == "" { panic("charm cache directory path is empty") } if curl.Series != "bundle" { return nil, errgo.Newf("expected a bundle URL, got charm URL %q", curl) } path, err := s.archivePath(curl) if err != nil { return nil, errgo.Mask(err, errgo.Any) } return charm.ReadBundleArchive(path) } // archivePath returns a local path to the downloaded archive of the given // charm or bundle URL, storing it in CacheDir, which it creates if necessary. // If an archive with a matching SHA hash already exists locally, it will use // the local version. func (s *CharmStore) archivePath(curl *charm.URL) (string, error) { // Prepare the cache directory and retrieve the entity archive. if err := os.MkdirAll(CacheDir, 0755); err != nil { return "", errgo.Notef(err, "cannot create the cache directory") } etype := "charm" if curl.Series == "bundle" { etype = "bundle" } r, id, expectHash, expectSize, err := s.client.GetArchive(curl) if err != nil { if errgo.Cause(err) == params.ErrNotFound { // Make a prettier error message for the user. return "", errgo.WithCausef(nil, params.ErrNotFound, "cannot retrieve %q: %s not found", curl, etype) } return "", errgo.NoteMask(err, fmt.Sprintf("cannot retrieve %s %q", etype, curl), errgo.Any) } defer r.Close() // Check if the archive already exists in the cache. path := filepath.Join(CacheDir, charm.Quote(id.String())+"."+etype) if verifyHash384AndSize(path, expectHash, expectSize) == nil { return path, nil } // Verify and save the new archive. f, err := ioutil.TempFile(CacheDir, "charm-download") if err != nil { return "", errgo.Notef(err, "cannot make temporary file") } defer f.Close() hash := sha512.New384() size, err := io.Copy(io.MultiWriter(hash, f), r) if err != nil { return "", errgo.Notef(err, "cannot read entity archive") } if size != expectSize { return "", errgo.Newf("size mismatch; network corruption?") } if fmt.Sprintf("%x", hash.Sum(nil)) != expectHash { return "", errgo.Newf("hash mismatch; network corruption?") } // Move the archive to the expected place, and return the charm. // Note that we need to close the temporary file before moving // it because otherwise Windows prohibits the rename. f.Close() if err := utils.ReplaceFile(f.Name(), path); err != nil { return "", errgo.Notef(err, "cannot move the entity archive") } return path, nil } func verifyHash384AndSize(path, expectHash string, expectSize int64) error { f, err := os.Open(path) if err != nil { return errgo.Mask(err) } defer f.Close() hash := sha512.New384() size, err := io.Copy(hash, f) if err != nil { return errgo.Mask(err) } if size != expectSize { logger.Debugf("size mismatch for %q", path) return errgo.Newf("size mismatch for %q", path) } if fmt.Sprintf("%x", hash.Sum(nil)) != expectHash { logger.Debugf("hash mismatch for %q", path) return errgo.Newf("hash mismatch for %q", path) } return nil } // Resolve implements Interface.Resolve. func (s *CharmStore) Resolve(ref *charm.URL) (*charm.URL, []string, error) { resolved, _, supportedSeries, err := s.ResolveWithChannel(ref) if err != nil { return nil, nil, errgo.Mask(err, errgo.Any) } return resolved, supportedSeries, nil } // ResolveWithChannel does the same thing as Resolve() but also returns // the best channel to use. func (s *CharmStore) ResolveWithChannel(ref *charm.URL) (*charm.URL, params.Channel, []string, error) { var result struct { Id params.IdResponse SupportedSeries params.SupportedSeriesResponse Published params.PublishedResponse } if _, err := s.client.Meta(ref, &result); err != nil { if errgo.Cause(err) == params.ErrNotFound { // Make a prettier error message for the user. etype := "charm" switch ref.Series { case "bundle": etype = "bundle" case "": etype = "charm or bundle" } return nil, params.NoChannel, nil, errgo.WithCausef(nil, params.ErrNotFound, "cannot resolve URL %q: %s not found", ref, etype) } return nil, params.NoChannel, nil, errgo.NoteMask(err, fmt.Sprintf("cannot resolve charm URL %q", ref), errgo.Any) } // TODO(ericsnow) Get this directly from the API. It has high risk // of getting stale. Perhaps add params.PublishedResponse.BestChannel // or, less desireably, have params.PublishedResponse.Info be // priority-ordered. channel := bestChannel(s.client, result.Published.Info) return result.Id.Id, channel, result.SupportedSeries.SupportedSeries, nil } // bestChannel determines the best channel to use for the given client // and published info. // // Note that this is equivalent to code on the server side. // See ReqHandler.entityChannel in internal/v5/auth.go. func bestChannel(client *csclient.Client, published []params.PublishedInfo) params.Channel { explicitChannel := client.Channel() if explicitChannel != params.NoChannel { return explicitChannel } bestChannel := params.UnpublishedChannel for _, info := range published { // TODO(ericsnow) Favor the one with info.Current == true? switch info.Channel { case params.StableChannel: bestChannel = info.Channel break case params.DevelopmentChannel: bestChannel = info.Channel default: panic(fmt.Sprintf("unknown channel %q", info.Channel)) } } return bestChannel } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/legacy_test.go0000664000175000017500000003430712703461656024047 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmrepo_test import ( "fmt" "io/ioutil" "os" "path/filepath" gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable" charmtesting "gopkg.in/juju/charmrepo.v2-unstable/testing" ) type legacyCharmStoreSuite struct { gitjujutesting.FakeHomeSuite server *charmtesting.MockStore store *charmrepo.LegacyCharmStore } var _ = gc.Suite(&legacyCharmStoreSuite{}) func (s *legacyCharmStoreSuite) SetUpSuite(c *gc.C) { s.FakeHomeSuite.SetUpSuite(c) s.server = charmtesting.NewMockStore(c, TestCharms, map[string]int{ "cs:series/good": 23, "cs:series/unwise": 23, "cs:series/better": 24, "cs:series/best": 25, }) } func (s *legacyCharmStoreSuite) SetUpTest(c *gc.C) { s.FakeHomeSuite.SetUpTest(c) s.PatchValue(&charmrepo.CacheDir, c.MkDir()) s.store = newLegacyStore(s.server.Address()) s.server.Downloads = nil s.server.Authorizations = nil s.server.Metadata = nil s.server.DownloadsNoStats = nil s.server.InfoRequestCount = 0 s.server.InfoRequestCountNoStats = 0 } func (s *legacyCharmStoreSuite) TearDownSuite(c *gc.C) { s.server.Close() s.FakeHomeSuite.TearDownSuite(c) } func (s *legacyCharmStoreSuite) TestMissing(c *gc.C) { charmURL := charm.MustParseURL("cs:series/missing") expect := `charm not found: cs:series/missing` revs, err := s.store.Latest(charmURL) c.Assert(err, jc.ErrorIsNil) c.Assert(revs, gc.HasLen, 1) c.Assert(revs[0].Err, gc.ErrorMatches, expect) _, err = s.store.Get(charmURL) c.Assert(err, gc.ErrorMatches, expect) } func (s *legacyCharmStoreSuite) TestError(c *gc.C) { charmURL := charm.MustParseURL("cs:series/borken") expect := `charm info errors for "cs:series/borken": badness` revs, err := s.store.Latest(charmURL) c.Assert(err, jc.ErrorIsNil) c.Assert(revs, gc.HasLen, 1) c.Assert(revs[0].Err, gc.ErrorMatches, expect) _, err = s.store.Get(charmURL) c.Assert(err, gc.ErrorMatches, expect) } func (s *legacyCharmStoreSuite) TestWarning(c *gc.C) { charmURL := charm.MustParseURL("cs:series/unwise") expect := `.* WARNING juju.charm.charmrepo charm store reports for "cs:series/unwise": foolishness` + "\n" revs, err := s.store.Latest(charmURL) c.Assert(err, jc.ErrorIsNil) c.Assert(revs, gc.HasLen, 1) c.Assert(revs[0].Revision, gc.Equals, 23) c.Assert(err, gc.IsNil) c.Assert(c.GetTestLog(), gc.Matches, expect) ch, err := s.store.Get(charmURL) c.Assert(ch, gc.NotNil) c.Assert(err, gc.IsNil) c.Assert(c.GetTestLog(), gc.Matches, expect+expect) } func (s *legacyCharmStoreSuite) TestLatest(c *gc.C) { urls := []*charm.URL{ charm.MustParseURL("cs:series/good"), charm.MustParseURL("cs:series/good-2"), charm.MustParseURL("cs:series/good-99"), } revInfo, err := s.store.Latest(urls...) c.Assert(err, gc.IsNil) c.Assert(revInfo, jc.DeepEquals, []charmrepo.CharmRevision{ {23, s.server.ArchiveSHA256, nil}, {23, s.server.ArchiveSHA256, nil}, {23, s.server.ArchiveSHA256, nil}, }) } func (s *legacyCharmStoreSuite) assertCached(c *gc.C, charmURL *charm.URL) { s.server.Downloads = nil ch, err := s.store.Get(charmURL) c.Assert(err, gc.IsNil) c.Assert(ch, gc.NotNil) c.Assert(s.server.Downloads, gc.IsNil) } func (s *legacyCharmStoreSuite) TestGetCacheImplicitRevision(c *gc.C) { base := "cs:series/good" charmURL := charm.MustParseURL(base) revCharmURL := charm.MustParseURL(base + "-23") ch, err := s.store.Get(charmURL) c.Assert(err, gc.IsNil) c.Assert(ch, gc.NotNil) c.Assert(s.server.Downloads, jc.DeepEquals, []*charm.URL{revCharmURL}) s.assertCached(c, charmURL) s.assertCached(c, revCharmURL) } func (s *legacyCharmStoreSuite) TestGetCacheExplicitRevision(c *gc.C) { base := "cs:series/good-12" charmURL := charm.MustParseURL(base) ch, err := s.store.Get(charmURL) c.Assert(err, gc.IsNil) c.Assert(ch, gc.NotNil) c.Assert(s.server.Downloads, jc.DeepEquals, []*charm.URL{charmURL}) s.assertCached(c, charmURL) } func (s *legacyCharmStoreSuite) TestGetBadCache(c *gc.C) { c.Assert(os.Mkdir(filepath.Join(charmrepo.CacheDir, "cache"), 0777), gc.IsNil) base := "cs:series/good" charmURL := charm.MustParseURL(base) revCharmURL := charm.MustParseURL(base + "-23") name := charm.Quote(revCharmURL.String()) + ".charm" err := ioutil.WriteFile(filepath.Join(charmrepo.CacheDir, "cache", name), nil, 0666) c.Assert(err, gc.IsNil) ch, err := s.store.Get(charmURL) c.Assert(err, gc.IsNil) c.Assert(ch, gc.NotNil) c.Assert(s.server.Downloads, jc.DeepEquals, []*charm.URL{revCharmURL}) s.assertCached(c, charmURL) s.assertCached(c, revCharmURL) } func (s *legacyCharmStoreSuite) TestGetTestModeFlag(c *gc.C) { base := "cs:series/good-12" charmURL := charm.MustParseURL(base) ch, err := s.store.Get(charmURL) c.Assert(err, gc.IsNil) c.Assert(ch, gc.NotNil) c.Assert(s.server.Downloads, jc.DeepEquals, []*charm.URL{charmURL}) c.Assert(s.server.DownloadsNoStats, gc.IsNil) c.Assert(s.server.InfoRequestCount, gc.Equals, 1) c.Assert(s.server.InfoRequestCountNoStats, gc.Equals, 0) storeInTestMode := s.store.WithTestMode(true) other := "cs:series/good-23" otherURL := charm.MustParseURL(other) ch, err = storeInTestMode.Get(otherURL) c.Assert(err, gc.IsNil) c.Assert(ch, gc.NotNil) c.Assert(s.server.Downloads, jc.DeepEquals, []*charm.URL{charmURL}) c.Assert(s.server.DownloadsNoStats, jc.DeepEquals, []*charm.URL{otherURL}) c.Assert(s.server.InfoRequestCount, gc.Equals, 1) c.Assert(s.server.InfoRequestCountNoStats, gc.Equals, 1) } // The following tests cover the low-level CharmStore-specific API. func (s *legacyCharmStoreSuite) TestInfo(c *gc.C) { charmURLs := []charm.Location{ charm.MustParseURL("cs:series/good"), charm.MustParseURL("cs:series/better"), charm.MustParseURL("cs:series/best"), } infos, err := s.store.Info(charmURLs...) c.Assert(err, gc.IsNil) c.Assert(infos, gc.HasLen, 3) expected := []int{23, 24, 25} for i, info := range infos { c.Assert(info.Errors, gc.IsNil) c.Assert(info.Revision, gc.Equals, expected[i]) } } func (s *legacyCharmStoreSuite) TestInfoNotFound(c *gc.C) { charmURL := charm.MustParseURL("cs:series/missing") info, err := s.store.Info(charmURL) c.Assert(err, gc.IsNil) c.Assert(info, gc.HasLen, 1) c.Assert(info[0].Errors, gc.HasLen, 1) c.Assert(info[0].Errors[0], gc.Matches, `charm not found: cs:series/missing`) } func (s *legacyCharmStoreSuite) TestInfoError(c *gc.C) { charmURL := charm.MustParseURL("cs:series/borken") info, err := s.store.Info(charmURL) c.Assert(err, gc.IsNil) c.Assert(info, gc.HasLen, 1) c.Assert(info[0].Errors, jc.DeepEquals, []string{"badness"}) } func (s *legacyCharmStoreSuite) TestInfoWarning(c *gc.C) { charmURL := charm.MustParseURL("cs:series/unwise") info, err := s.store.Info(charmURL) c.Assert(err, gc.IsNil) c.Assert(info, gc.HasLen, 1) c.Assert(info[0].Warnings, jc.DeepEquals, []string{"foolishness"}) } func (s *legacyCharmStoreSuite) TestInfoTestModeFlag(c *gc.C) { charmURL := charm.MustParseURL("cs:series/good") _, err := s.store.Info(charmURL) c.Assert(err, gc.IsNil) c.Assert(s.server.InfoRequestCount, gc.Equals, 1) c.Assert(s.server.InfoRequestCountNoStats, gc.Equals, 0) storeInTestMode, ok := s.store.WithTestMode(true).(*charmrepo.LegacyCharmStore) c.Assert(ok, gc.Equals, true) _, err = storeInTestMode.Info(charmURL) c.Assert(err, gc.IsNil) c.Assert(s.server.InfoRequestCount, gc.Equals, 1) c.Assert(s.server.InfoRequestCountNoStats, gc.Equals, 1) } func (s *legacyCharmStoreSuite) TestInfoDNSError(c *gc.C) { store := newLegacyStore("http://127.1.2.3") charmURL := charm.MustParseURL("cs:series/good") resp, err := store.Info(charmURL) c.Assert(resp, gc.IsNil) expect := `Cannot access the charm store. .*` c.Assert(err, gc.ErrorMatches, expect) } func (s *legacyCharmStoreSuite) TestEvent(c *gc.C) { charmURL := charm.MustParseURL("cs:series/good") event, err := s.store.Event(charmURL, "") c.Assert(err, gc.IsNil) c.Assert(event.Errors, gc.IsNil) c.Assert(event.Revision, gc.Equals, 23) c.Assert(event.Digest, gc.Equals, "the-digest") } func (s *legacyCharmStoreSuite) TestEventWithDigest(c *gc.C) { charmURL := charm.MustParseURL("cs:series/good") event, err := s.store.Event(charmURL, "the-digest") c.Assert(err, gc.IsNil) c.Assert(event.Errors, gc.IsNil) c.Assert(event.Revision, gc.Equals, 23) c.Assert(event.Digest, gc.Equals, "the-digest") } func (s *legacyCharmStoreSuite) TestEventNotFound(c *gc.C) { charmURL := charm.MustParseURL("cs:series/missing") event, err := s.store.Event(charmURL, "") c.Assert(err, gc.ErrorMatches, `charm event not found for "cs:series/missing"`) c.Assert(event, gc.IsNil) } func (s *legacyCharmStoreSuite) TestEventNotFoundDigest(c *gc.C) { charmURL := charm.MustParseURL("cs:series/good") event, err := s.store.Event(charmURL, "missing-digest") c.Assert(err, gc.ErrorMatches, `charm event not found for "cs:series/good" with digest "missing-digest"`) c.Assert(event, gc.IsNil) } func (s *legacyCharmStoreSuite) TestEventError(c *gc.C) { charmURL := charm.MustParseURL("cs:series/borken") event, err := s.store.Event(charmURL, "") c.Assert(err, gc.IsNil) c.Assert(event.Errors, jc.DeepEquals, []string{"badness"}) } func (s *legacyCharmStoreSuite) TestAuthorization(c *gc.C) { store := s.store.WithAuthAttrs("token=value") base := "cs:series/good" charmURL := charm.MustParseURL(base) _, err := store.Get(charmURL) c.Assert(err, gc.IsNil) c.Assert(s.server.Authorizations, gc.HasLen, 1) c.Assert(s.server.Authorizations[0], gc.Equals, "charmstore token=value") } func (s *legacyCharmStoreSuite) TestNilAuthorization(c *gc.C) { store := s.store.WithAuthAttrs("") base := "cs:series/good" charmURL := charm.MustParseURL(base) _, err := store.Get(charmURL) c.Assert(err, gc.IsNil) c.Assert(s.server.Authorizations, gc.HasLen, 0) } func (s *legacyCharmStoreSuite) TestMetadata(c *gc.C) { store := s.store.WithJujuAttrs("juju-metadata") base := "cs:series/good" charmURL := charm.MustParseURL(base) _, err := store.Get(charmURL) c.Assert(err, gc.IsNil) c.Assert(s.server.Metadata, gc.HasLen, 1) c.Assert(s.server.Metadata[0], gc.Equals, "juju-metadata") } func (s *legacyCharmStoreSuite) TestNilMetadata(c *gc.C) { base := "cs:series/good" charmURL := charm.MustParseURL(base) _, err := s.store.Get(charmURL) c.Assert(err, gc.IsNil) c.Assert(s.server.Metadata, gc.HasLen, 0) } func (s *legacyCharmStoreSuite) TestEventWarning(c *gc.C) { charmURL := charm.MustParseURL("cs:series/unwise") event, err := s.store.Event(charmURL, "") c.Assert(err, gc.IsNil) c.Assert(event.Warnings, jc.DeepEquals, []string{"foolishness"}) } func (s *legacyCharmStoreSuite) TestBranchLocation(c *gc.C) { charmURL := charm.MustParseURL("cs:series/name") location := s.store.BranchLocation(charmURL) c.Assert(location, gc.Equals, "lp:charms/series/name") charmURL = charm.MustParseURL("cs:~user/series/name") location = s.store.BranchLocation(charmURL) c.Assert(location, gc.Equals, "lp:~user/charms/series/name/trunk") } func (s *legacyCharmStoreSuite) TestCharmURL(c *gc.C) { tests := []struct{ url, loc string }{ {"cs:precise/wordpress", "lp:charms/precise/wordpress"}, {"cs:precise/wordpress", "http://launchpad.net/+branch/charms/precise/wordpress"}, {"cs:precise/wordpress", "https://launchpad.net/+branch/charms/precise/wordpress"}, {"cs:precise/wordpress", "http://code.launchpad.net/+branch/charms/precise/wordpress"}, {"cs:precise/wordpress", "https://code.launchpad.net/+branch/charms/precise/wordpress"}, {"cs:precise/wordpress", "bzr+ssh://bazaar.launchpad.net/+branch/charms/precise/wordpress"}, {"cs:~charmers/precise/wordpress", "lp:~charmers/charms/precise/wordpress/trunk"}, {"cs:~charmers/precise/wordpress", "http://launchpad.net/~charmers/charms/precise/wordpress/trunk"}, {"cs:~charmers/precise/wordpress", "https://launchpad.net/~charmers/charms/precise/wordpress/trunk"}, {"cs:~charmers/precise/wordpress", "http://code.launchpad.net/~charmers/charms/precise/wordpress/trunk"}, {"cs:~charmers/precise/wordpress", "https://code.launchpad.net/~charmers/charms/precise/wordpress/trunk"}, {"cs:~charmers/precise/wordpress", "http://launchpad.net/+branch/~charmers/charms/precise/wordpress/trunk"}, {"cs:~charmers/precise/wordpress", "https://launchpad.net/+branch/~charmers/charms/precise/wordpress/trunk"}, {"cs:~charmers/precise/wordpress", "http://code.launchpad.net/+branch/~charmers/charms/precise/wordpress/trunk"}, {"cs:~charmers/precise/wordpress", "https://code.launchpad.net/+branch/~charmers/charms/precise/wordpress/trunk"}, {"cs:~charmers/precise/wordpress", "bzr+ssh://bazaar.launchpad.net/~charmers/charms/precise/wordpress/trunk"}, {"cs:~charmers/precise/wordpress", "bzr+ssh://bazaar.launchpad.net/~charmers/charms/precise/wordpress/trunk/"}, {"cs:~charmers/precise/wordpress", "~charmers/charms/precise/wordpress/trunk"}, {"", "lp:~charmers/charms/precise/wordpress/whatever"}, {"", "lp:~charmers/whatever/precise/wordpress/trunk"}, {"", "lp:whatever/precise/wordpress"}, } for _, t := range tests { charmURL, err := s.store.CharmURL(t.loc) if t.url == "" { c.Assert(err, gc.ErrorMatches, fmt.Sprintf("unknown branch location: %q", t.loc)) } else { c.Assert(err, gc.IsNil) c.Assert(charmURL.String(), gc.Equals, t.url) } } } var legacyInferRepositoryTests = []struct { url string path string }{ {"cs:precise/wordpress", ""}, {"local:oneiric/wordpress", "/some/path"}, } func (s *legacyCharmStoreSuite) TestInferRepository(c *gc.C) { for i, t := range legacyInferRepositoryTests { c.Logf("test %d", i) ref, err := charm.ParseURL(t.url) c.Assert(err, gc.IsNil) repo, err := charmrepo.LegacyInferRepository(ref, "/some/path") c.Assert(err, gc.IsNil) switch repo := repo.(type) { case *charmrepo.LocalRepository: c.Assert(repo.Path, gc.Equals, t.path) default: c.Assert(repo, gc.Equals, charmrepo.LegacyStore) } } ref, err := charm.ParseURL("local:whatever") c.Assert(err, gc.IsNil) _, err = charmrepo.LegacyInferRepository(ref, "") c.Assert(err, gc.ErrorMatches, "path to local repository not specified") ref.Schema = "foo" _, err = charmrepo.LegacyInferRepository(ref, "") c.Assert(err, gc.ErrorMatches, "unknown schema for charm reference.*") } func newLegacyStore(url string) *charmrepo.LegacyCharmStore { return &charmrepo.LegacyCharmStore{BaseURL: url} } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/legacy.go0000664000175000017500000002637412677511231023010 0ustar marcomarco// Copyright 2012, 2013 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmrepo // import "gopkg.in/juju/charmrepo.v2-unstable" import ( "encoding/json" "errors" "fmt" "io" "io/ioutil" "net" "net/http" "net/url" "os" "path/filepath" "strings" "github.com/juju/utils" "gopkg.in/juju/charm.v6-unstable" ) // LegacyCharmStore is a repository Interface that provides access to the // legacy Juju charm store. type LegacyCharmStore struct { BaseURL string authAttrs string // a list of attr=value pairs, comma separated jujuAttrs string // a list of attr=value pairs, comma separated testMode bool } var _ Interface = (*LegacyCharmStore)(nil) var LegacyStore = &LegacyCharmStore{BaseURL: "https://store.juju.ubuntu.com"} // WithAuthAttrs return a repository Interface with the authentication token // list set. authAttrs is a list of attr=value pairs. func (s *LegacyCharmStore) WithAuthAttrs(authAttrs string) Interface { authCS := *s authCS.authAttrs = authAttrs return &authCS } // WithTestMode returns a repository Interface where testMode is set to value // passed to this method. func (s *LegacyCharmStore) WithTestMode(testMode bool) Interface { newRepo := *s newRepo.testMode = testMode return &newRepo } // WithJujuAttrs returns a repository Interface with the Juju metadata // attributes set. jujuAttrs is a list of attr=value pairs. func (s *LegacyCharmStore) WithJujuAttrs(jujuAttrs string) Interface { jujuCS := *s jujuCS.jujuAttrs = jujuAttrs return &jujuCS } // Perform an http get, adding custom auth header if necessary. func (s *LegacyCharmStore) get(url string) (resp *http.Response, err error) { req, err := http.NewRequest("GET", url, nil) if err != nil { return nil, err } if s.authAttrs != "" { // To comply with RFC 2617, we send the authentication data in // the Authorization header with a custom auth scheme // and the authentication attributes. req.Header.Add("Authorization", "charmstore "+s.authAttrs) } if s.jujuAttrs != "" { // The use of "X-" to prefix custom header values is deprecated. req.Header.Add("Juju-Metadata", s.jujuAttrs) } return http.DefaultClient.Do(req) } // Resolve canonicalizes charm URLs any implied series in the reference. func (s *LegacyCharmStore) Resolve(ref *charm.URL) (*charm.URL, []string, error) { infos, err := s.Info(ref) if err != nil { return nil, nil, err } if len(infos) == 0 { return nil, nil, fmt.Errorf("missing response when resolving charm URL: %q", ref) } if infos[0].CanonicalURL == "" { return nil, nil, fmt.Errorf("cannot resolve charm URL: %q", ref) } curl, err := charm.ParseURL(infos[0].CanonicalURL) if err != nil { return nil, nil, err } // Legacy store does not support returning the supported series. return curl, nil, nil } // Info returns details for all the specified charms in the charm store. func (s *LegacyCharmStore) Info(curls ...charm.Location) ([]*InfoResponse, error) { baseURL := s.BaseURL + "/charm-info?" queryParams := make([]string, len(curls), len(curls)+1) for i, curl := range curls { queryParams[i] = "charms=" + url.QueryEscape(curl.String()) } if s.testMode { queryParams = append(queryParams, "stats=0") } resp, err := s.get(baseURL + strings.Join(queryParams, "&")) if err != nil { if url_error, ok := err.(*url.Error); ok { switch url_error.Err.(type) { case *net.DNSError, *net.OpError: return nil, fmt.Errorf("Cannot access the charm store. Are you connected to the internet? Error details: %v", err) } } return nil, err } defer resp.Body.Close() if resp.StatusCode != 200 { errMsg := fmt.Errorf("Cannot access the charm store. Invalid response code: %q", resp.Status) body, readErr := ioutil.ReadAll(resp.Body) if err != nil { return nil, readErr } logger.Errorf("%v Response body: %s", errMsg, body) return nil, errMsg } body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } infos := make(map[string]*InfoResponse) if err = json.Unmarshal(body, &infos); err != nil { return nil, err } result := make([]*InfoResponse, len(curls)) for i, curl := range curls { key := curl.String() info, found := infos[key] if !found { return nil, fmt.Errorf("charm store returned response without charm %q", key) } if len(info.Errors) == 1 && info.Errors[0] == "entry not found" { info.Errors[0] = fmt.Sprintf("charm not found: %s", curl) } result[i] = info } return result, nil } // Event returns details for a charm event in the charm store. // // If digest is empty, the latest event is returned. func (s *LegacyCharmStore) Event(curl *charm.URL, digest string) (*EventResponse, error) { key := curl.String() query := key if digest != "" { query += "@" + digest } resp, err := s.get(s.BaseURL + "/charm-event?charms=" + url.QueryEscape(query)) if err != nil { return nil, err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } events := make(map[string]*EventResponse) if err = json.Unmarshal(body, &events); err != nil { return nil, err } event, found := events[key] if !found { return nil, fmt.Errorf("charm store returned response without charm %q", key) } if len(event.Errors) == 1 && event.Errors[0] == "entry not found" { if digest == "" { return nil, &NotFoundError{fmt.Sprintf("charm event not found for %q", curl)} } else { return nil, &NotFoundError{fmt.Sprintf("charm event not found for %q with digest %q", curl, digest)} } } return event, nil } // CharmRevision holds the revision number of a charm and any error // encountered in retrieving it. type CharmRevision struct { Revision int Sha256 string Err error } // revisions returns the revisions of the charms referenced by curls. func (s *LegacyCharmStore) revisions(curls ...charm.Location) (revisions []CharmRevision, err error) { infos, err := s.Info(curls...) if err != nil { return nil, err } revisions = make([]CharmRevision, len(infos)) for i, info := range infos { for _, w := range info.Warnings { logger.Warningf("charm store reports for %q: %s", curls[i], w) } if info.Errors == nil { revisions[i].Revision = info.Revision revisions[i].Sha256 = info.Sha256 } else { // If a charm is not found, we are more concise with the error message. if len(info.Errors) == 1 && strings.HasPrefix(info.Errors[0], "charm not found") { revisions[i].Err = fmt.Errorf(info.Errors[0]) } else { revisions[i].Err = fmt.Errorf("charm info errors for %q: %s", curls[i], strings.Join(info.Errors, "; ")) } } } return revisions, nil } // Latest returns the latest revision of the charms referenced by curls, regardless // of the revision set on each curl. func (s *LegacyCharmStore) Latest(curls ...*charm.URL) ([]CharmRevision, error) { baseCurls := make([]charm.Location, len(curls)) for i, curl := range curls { baseCurls[i] = curl.WithRevision(-1) } return s.revisions(baseCurls...) } // BranchLocation returns the location for the branch holding the charm at curl. func (s *LegacyCharmStore) BranchLocation(curl *charm.URL) string { if curl.User != "" { return fmt.Sprintf("lp:~%s/charms/%s/%s/trunk", curl.User, curl.Series, curl.Name) } return fmt.Sprintf("lp:charms/%s/%s", curl.Series, curl.Name) } var branchPrefixes = []string{ "lp:", "bzr+ssh://bazaar.launchpad.net/+branch/", "bzr+ssh://bazaar.launchpad.net/", "http://launchpad.net/+branch/", "http://launchpad.net/", "https://launchpad.net/+branch/", "https://launchpad.net/", "http://code.launchpad.net/+branch/", "http://code.launchpad.net/", "https://code.launchpad.net/+branch/", "https://code.launchpad.net/", } // CharmURL returns the charm URL for the branch at location. func (s *LegacyCharmStore) CharmURL(location string) (*charm.URL, error) { var l string if len(location) > 0 && location[0] == '~' { l = location } else { for _, prefix := range branchPrefixes { if strings.HasPrefix(location, prefix) { l = location[len(prefix):] break } } } if l != "" { for len(l) > 0 && l[len(l)-1] == '/' { l = l[:len(l)-1] } u := strings.Split(l, "/") if len(u) == 3 && u[0] == "charms" { return charm.ParseURL(fmt.Sprintf("cs:%s/%s", u[1], u[2])) } if len(u) == 4 && u[0] == "charms" && u[3] == "trunk" { return charm.ParseURL(fmt.Sprintf("cs:%s/%s", u[1], u[2])) } if len(u) == 5 && u[1] == "charms" && u[4] == "trunk" && len(u[0]) > 0 && u[0][0] == '~' { return charm.ParseURL(fmt.Sprintf("cs:%s/%s/%s", u[0], u[2], u[3])) } } return nil, fmt.Errorf("unknown branch location: %q", location) } // verify returns an error unless a file exists at path with a hex-encoded // SHA256 matching digest. func verify(path, digest string) error { hash, _, err := utils.ReadFileSHA256(path) if err != nil { return err } if hash != digest { return fmt.Errorf("bad SHA256 of %q", path) } return nil } // Get returns the charm referenced by curl. // CacheDir must have been set, otherwise Get will panic. func (s *LegacyCharmStore) Get(curl *charm.URL) (charm.Charm, error) { // The cache location must have been previously set. if CacheDir == "" { panic("charm cache directory path is empty") } if err := os.MkdirAll(CacheDir, os.FileMode(0755)); err != nil { return nil, err } revInfo, err := s.revisions(curl) if err != nil { return nil, err } if len(revInfo) != 1 { return nil, fmt.Errorf("expected 1 result, got %d", len(revInfo)) } if revInfo[0].Err != nil { return nil, revInfo[0].Err } rev, digest := revInfo[0].Revision, revInfo[0].Sha256 if curl.Revision == -1 { curl = curl.WithRevision(rev) } else if curl.Revision != rev { return nil, fmt.Errorf("store returned charm with wrong revision %d for %q", rev, curl.String()) } path := filepath.Join(CacheDir, charm.Quote(curl.String())+".charm") if verify(path, digest) != nil { store_url := s.BaseURL + "/charm/" + curl.Path() if s.testMode { store_url = store_url + "?stats=0" } resp, err := s.get(store_url) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("bad status from request for %q: %q", store_url, resp.Status) } f, err := ioutil.TempFile(CacheDir, "charm-download") if err != nil { return nil, err } dlPath := f.Name() _, err = io.Copy(f, resp.Body) if cerr := f.Close(); err == nil { err = cerr } if err != nil { os.Remove(dlPath) return nil, err } if err := utils.ReplaceFile(dlPath, path); err != nil { return nil, err } } if err := verify(path, digest); err != nil { return nil, err } return charm.ReadCharmArchive(path) } // GetBundle is only defined for implementing Interface. func (s *LegacyCharmStore) GetBundle(curl *charm.URL) (charm.Bundle, error) { return nil, errors.New("not implemented: legacy API does not support bundles") } // LegacyInferRepository returns a charm repository inferred from the provided // charm or bundle reference. Local references will use the provided path. func LegacyInferRepository(ref *charm.URL, localRepoPath string) (repo Interface, err error) { switch ref.Schema { case "cs": repo = LegacyStore case "local": if localRepoPath == "" { return nil, errors.New("path to local repository not specified") } repo = &LocalRepository{Path: localRepoPath} default: return nil, fmt.Errorf("unknown schema for charm reference %q", ref) } return } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/dependencies.tsv0000664000175000017500000000611612703461656024376 0ustar marcomarcogithub.com/ajstarks/svgo git 89e3ac64b5b3e403a5e7c35ea4f98d45db7b4518 2014-10-04T21:11:59Z github.com/juju/blobstore git 06056004b3d7b54bbb7984d830c537bad00fec21 2015-07-29T11:18:58Z github.com/juju/errors git 1b5e39b83d1835fa480e0c2ddefb040ee82d58b3 2015-09-16T12:56:42Z github.com/juju/gojsonpointer git afe8b77aa08f272b49e01b82de78510c11f61500 2015-02-04T19:46:29Z github.com/juju/gojsonreference git f0d24ac5ee330baa21721cdff56d45e4ee42628e 2015-02-04T19:46:33Z github.com/juju/gojsonschema git e1ad140384f254c82f89450d9a7c8dd38a632838 2015-03-12T17:00:16Z github.com/juju/httpprof git 14bf14c307672fd2456bdbf35d19cf0ccd3cf565 2014-12-17T16:00:36Z github.com/juju/httprequest git 89d547093c45e293599088cc63e805c6f1205dc0 2016-03-02T10:09:58Z github.com/juju/idmclient git 812a86ff450af958df6665839d93590f27961b08 2016-03-16T15:15:55Z github.com/juju/loggo git 8477fc936adf0e382d680310047ca27e128a309a 2015-05-27T03:58:39Z github.com/juju/mempool git 24974d6c264fe5a29716e7d56ea24c4bd904b7cc 2016-02-05T10:49:27Z github.com/juju/names git 8a0aa0963bbacdc790914892e9ff942e94d6f795 2016-03-30T15:05:33Z github.com/juju/schema git 1e25943f8c6fd6815282d6f1ac87091d21e14e19 2016-03-01T11:16:46Z github.com/juju/testing git 162fafccebf20a4207ab93d63b986c230e3f4d2e 2016-04-04T09:43:17Z github.com/juju/txn git 99ec629d0066a4d73c54d8e021a7fc1dc07df614 2015-06-09T16:58:27Z github.com/juju/utils git eb6cb958762135bb61aed1e0951f657c674d427f 2016-04-11T02:40:59Z github.com/juju/version git ef897ad7f130870348ce306f61332f5335355063 2015-11-27T20:34:00Z github.com/juju/webbrowser git 54b8c57083b4afb7dc75da7f13e2967b2606a507 2016-03-09T14:36:29Z github.com/juju/xml git eb759a627588d35166bc505fceb51b88500e291e 2015-04-13T13:11:21Z github.com/juju/zip git f6b1e93fa2e29a1d7d49b566b2b51efb060c982a 2016-02-05T10:52:21Z github.com/julienschmidt/httprouter git 77a895ad01ebc98a4dc95d8355bc825ce80a56f6 2015-10-13T22:55:20Z golang.org/x/crypto git aedad9a179ec1ea11b7064c57cbc6dc30d7724ec 2015-08-30T18:06:42Z golang.org/x/net git ea47fc708ee3e20177f3ca3716217c4ab75942cb 2015-08-29T23:03:18Z gopkg.in/check.v1 git 4f90aeace3a26ad7021961c297b22c42160c7b25 2016-01-05T16:49:36Z gopkg.in/errgo.v1 git 66cb46252b94c1f3d65646f54ee8043ab38d766c 2015-10-07T15:31:57Z gopkg.in/juju/charm.v6-unstable git 728a5ea3ff1c1ae8b4c3ac4779c0027f693d1ca5 2016-04-08T11:12:17Z gopkg.in/juju/charmstore.v5-unstable git 745fa1ca2260cdc9dd5a6df6282da51776baa59f 2016-04-12T11:34:55Z gopkg.in/juju/jujusvg.v1 git a60359df348ef2ca40ec3bcd58a01de54f05658e 2016-02-11T10:02:50Z gopkg.in/macaroon-bakery.v1 git fddb3dcd74806133259879d033fdfe92f9e67a8a 2016-04-01T12:14:21Z gopkg.in/macaroon.v1 git ab3940c6c16510a850e1c2dd628b919f0f3f1464 2015-01-21T11:42:31Z gopkg.in/mgo.v2 git 4d04138ffef2791c479c0c8bbffc30b34081b8d9 2015-10-26T16:34:53Z gopkg.in/natefinch/lumberjack.v2 git 514cbda263a734ae8caac038dadf05f8f3f9f738 2016-01-25T11:17:49Z gopkg.in/yaml.v1 git 9f9df34309c04878acc86042b16630b0f696e1de 2014-09-24T16:16:07Z gopkg.in/yaml.v2 git a83829b6f1293c91addabc89d0571c246397bbf4 2016-03-01T20:40:22Z launchpad.net/tomb bzr gustavo@niemeyer.net-20140529072043-hzcrlnl3ygvg914q 18 charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/bundlepath.go0000664000175000017500000000257512672604507023673 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmrepo // import "gopkg.in/juju/charmrepo.v2-unstable" import ( "os" "path/filepath" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" ) // NewBundleAtPath creates and returns a bundle at a given path, // and a URL that describes it. func NewBundleAtPath(path string) (charm.Bundle, *charm.URL, error) { if path == "" { return nil, nil, errgo.New("path to bundle not specified") } _, err := os.Stat(path) if isNotExistsError(err) { return nil, nil, os.ErrNotExist } else if err == nil && !isValidCharmOrBundlePath(path) { return nil, nil, InvalidPath(path) } b, err := charm.ReadBundle(path) if err != nil { if isNotExistsError(err) { return nil, nil, BundleNotFound(path) } return nil, nil, err } absPath, err := filepath.Abs(path) if err != nil { return nil, nil, err } _, name := filepath.Split(absPath) url := &charm.URL{ Schema: "local", Name: name, Series: "bundle", Revision: 0, } return b, url, nil } // ReadBundleFile attempts to read the file at path // and interpret it as a bundle. func ReadBundleFile(path string) (*charm.BundleData, error) { f, err := os.Open(path) if err != nil { if isNotExistsError(err) { return nil, BundleNotFound(path) } return nil, err } defer f.Close() return charm.ReadBundleData(f) } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/charmstore_test.go0000664000175000017500000004342612703461656024754 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmrepo_test import ( "crypto/sha256" "fmt" "io" "io/ioutil" "net/http" "net/http/httptest" "os" "path/filepath" "strconv" "strings" "time" jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmstore.v5-unstable" "gopkg.in/juju/charmrepo.v2-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" charmtesting "gopkg.in/juju/charmrepo.v2-unstable/testing" ) type charmStoreSuite struct { jujutesting.IsolationSuite } var _ = gc.Suite(&charmStoreSuite{}) func (s *charmStoreSuite) TestDefaultURL(c *gc.C) { repo := charmrepo.NewCharmStore(charmrepo.NewCharmStoreParams{}) c.Assert(repo.Client().ServerURL(), gc.Equals, csclient.ServerURL) } type charmStoreBaseSuite struct { charmtesting.IsolatedMgoSuite srv *httptest.Server client *csclient.Client handler charmstore.HTTPCloseHandler repo *charmrepo.CharmStore } var _ = gc.Suite(&charmStoreBaseSuite{}) func (s *charmStoreBaseSuite) SetUpTest(c *gc.C) { s.IsolatedMgoSuite.SetUpTest(c) s.startServer(c) s.repo = charmrepo.NewCharmStore(charmrepo.NewCharmStoreParams{ URL: s.srv.URL, }) s.PatchValue(&charmrepo.CacheDir, c.MkDir()) } func (s *charmStoreBaseSuite) TearDownTest(c *gc.C) { s.srv.Close() s.handler.Close() s.IsolatedMgoSuite.TearDownTest(c) } func (s *charmStoreBaseSuite) startServer(c *gc.C) { serverParams := charmstore.ServerParams{ AuthUsername: "test-user", AuthPassword: "test-password", } db := s.Session.DB("charmstore") handler, err := charmstore.NewServer(db, nil, "", serverParams, charmstore.V5) c.Assert(err, gc.IsNil) s.handler = handler s.srv = httptest.NewServer(handler) s.client = csclient.New(csclient.Params{ URL: s.srv.URL, User: serverParams.AuthUsername, Password: serverParams.AuthPassword, }) } // addCharm uploads a charm a promulgated revision to the testing charm store, // and returns the resulting charm and charm URL. func (s *charmStoreBaseSuite) addCharm(c *gc.C, urlStr, name string) (charm.Charm, *charm.URL) { id := charm.MustParseURL(urlStr) promulgatedRevision := -1 if id.User == "" { id.User = "who" promulgatedRevision = id.Revision } ch := TestCharms.CharmArchive(c.MkDir(), name) // Upload the charm. err := s.client.UploadCharmWithRevision(id, ch, promulgatedRevision) c.Assert(err, gc.IsNil) s.setPublic(c, id, params.StableChannel) return ch, id } // addCharmNoRevision uploads a charm to the testing charm store, and returns the // resulting charm and charm URL. func (s *charmStoreBaseSuite) addCharmNoRevision(c *gc.C, urlStr, name string) (charm.Charm, *charm.URL) { id := charm.MustParseURL(urlStr) if id.User == "" { id.User = "who" } ch := TestCharms.CharmArchive(c.MkDir(), name) // Upload the charm. url, err := s.client.UploadCharm(id, ch) c.Assert(err, gc.IsNil) s.setPublic(c, id, params.StableChannel) return ch, url } // addBundle uploads a bundle to the testing charm store, and returns the // resulting bundle and bundle URL. func (s *charmStoreBaseSuite) addBundle(c *gc.C, urlStr, name string) (charm.Bundle, *charm.URL) { id := charm.MustParseURL(urlStr) promulgatedRevision := -1 if id.User == "" { id.User = "who" promulgatedRevision = id.Revision } b := TestCharms.BundleArchive(c.MkDir(), name) // Upload the bundle. err := s.client.UploadBundleWithRevision(id, b, promulgatedRevision) c.Assert(err, gc.IsNil) s.setPublic(c, id, params.StableChannel) // Return the bundle and its URL. return b, id } func (s *charmStoreBaseSuite) setPublic(c *gc.C, id *charm.URL, channels ...params.Channel) { if len(channels) > 0 { err := s.client.WithChannel(params.UnpublishedChannel).Put("/"+id.Path()+"/publish", ¶ms.PublishRequest{ Channels: channels, }) c.Assert(err, jc.ErrorIsNil) } else { channels = []params.Channel{params.UnpublishedChannel} } for _, channel := range channels { // Allow read permissions to everyone. err := s.client.WithChannel(channel).Put("/"+id.Path()+"/meta/perm/read", []string{params.Everyone}) c.Assert(err, jc.ErrorIsNil) } } type charmStoreRepoSuite struct { charmStoreBaseSuite } var _ = gc.Suite(&charmStoreRepoSuite{}) // checkCharmDownloads checks that the charm represented by the given URL has // been downloaded the expected number of times. func (s *charmStoreRepoSuite) checkCharmDownloads(c *gc.C, url *charm.URL, expect int) { key := []string{params.StatsArchiveDownload, url.Series, url.Name, url.User, strconv.Itoa(url.Revision)} path := "/stats/counter/" + strings.Join(key, ":") var count int getDownloads := func() int { var result []params.Statistic err := s.client.Get(path, &result) c.Assert(err, jc.ErrorIsNil) return int(result[0].Count) } for retry := 0; retry < 10; retry++ { time.Sleep(100 * time.Millisecond) if count = getDownloads(); count == expect { if expect == 0 && retry < 2 { // Wait a bit to make sure. continue } return } } c.Errorf("downloads count for %s is %d, expected %d", url, count, expect) } func (s *charmStoreRepoSuite) TestNewCharmStoreFromClient(c *gc.C) { client := csclient.New(csclient.Params{URL: csclient.ServerURL}) repo := charmrepo.NewCharmStoreFromClient(client) c.Check(repo.Client().ServerURL(), gc.Equals, csclient.ServerURL) } func (s *charmStoreRepoSuite) TestGet(c *gc.C) { expect, url := s.addCharm(c, "cs:~who/trusty/mysql-0", "mysql") ch, err := s.repo.Get(url) c.Assert(err, jc.ErrorIsNil) checkCharm(c, ch, expect) } func (s *charmStoreRepoSuite) TestGetPromulgated(c *gc.C) { expect, url := s.addCharm(c, "trusty/mysql-42", "mysql") ch, err := s.repo.Get(url) c.Assert(err, jc.ErrorIsNil) checkCharm(c, ch, expect) } func (s *charmStoreRepoSuite) TestGetRevisions(c *gc.C) { s.addCharm(c, "~dalek/trusty/riak-0", "riak") expect1, url1 := s.addCharm(c, "~dalek/trusty/riak-1", "riak") expect2, _ := s.addCharm(c, "~dalek/trusty/riak-2", "riak") // Retrieve an old revision. ch, err := s.repo.Get(url1) c.Assert(err, jc.ErrorIsNil) checkCharm(c, ch, expect1) // Retrieve the latest revision. ch, err = s.repo.Get(charm.MustParseURL("cs:~dalek/trusty/riak")) c.Assert(err, jc.ErrorIsNil) checkCharm(c, ch, expect2) } func (s *charmStoreRepoSuite) TestGetCache(c *gc.C) { _, url := s.addCharm(c, "~who/trusty/mysql-42", "mysql") ch, err := s.repo.Get(url) c.Assert(err, jc.ErrorIsNil) path := ch.(*charm.CharmArchive).Path c.Assert(hashOfPath(c, path), gc.Equals, hashOfCharm(c, "mysql")) } func (s *charmStoreRepoSuite) TestGetSameCharm(c *gc.C) { _, url := s.addCharm(c, "precise/wordpress-47", "wordpress") getModTime := func(path string) time.Time { info, err := os.Stat(path) c.Assert(err, jc.ErrorIsNil) return info.ModTime() } // Retrieve a charm. ch1, err := s.repo.Get(url) c.Assert(err, jc.ErrorIsNil) // Retrieve its cache file modification time. path := ch1.(*charm.CharmArchive).Path modTime := getModTime(path) // Retrieve the same charm again. ch2, err := s.repo.Get(url.WithRevision(-1)) c.Assert(err, jc.ErrorIsNil) // Check this is the same charm, and its underlying cache file is the same. checkCharm(c, ch2, ch1) c.Assert(ch2.(*charm.CharmArchive).Path, gc.Equals, path) // Check the same file has been reused. c.Assert(modTime.Equal(getModTime(path)), jc.IsTrue) } func (s *charmStoreRepoSuite) TestGetInvalidCache(c *gc.C) { _, url := s.addCharm(c, "~who/trusty/mysql-1", "mysql") // Retrieve a charm. ch1, err := s.repo.Get(url) c.Assert(err, jc.ErrorIsNil) // Modify its cache file to make it invalid. path := ch1.(*charm.CharmArchive).Path err = ioutil.WriteFile(path, []byte("invalid"), 0644) c.Assert(err, jc.ErrorIsNil) // Retrieve the same charm again. _, err = s.repo.Get(url) c.Assert(err, jc.ErrorIsNil) // Check that the cache file have been properly rewritten. c.Assert(hashOfPath(c, path), gc.Equals, hashOfCharm(c, "mysql")) } func (s *charmStoreRepoSuite) TestGetIncreaseStats(c *gc.C) { if jujutesting.MgoServer.WithoutV8 { c.Skip("mongo javascript not enabled") } _, url := s.addCharm(c, "~who/precise/wordpress-2", "wordpress") // Retrieve the charm. _, err := s.repo.Get(url) c.Assert(err, jc.ErrorIsNil) s.checkCharmDownloads(c, url, 1) // Retrieve the charm again. _, err = s.repo.Get(url) c.Assert(err, jc.ErrorIsNil) s.checkCharmDownloads(c, url, 2) } func (s *charmStoreRepoSuite) TestGetErrorBundle(c *gc.C) { ch, err := s.repo.Get(charm.MustParseURL("cs:bundle/django")) c.Assert(err, gc.ErrorMatches, `expected a charm URL, got bundle URL "cs:bundle/django"`) c.Assert(ch, gc.IsNil) } func (s *charmStoreRepoSuite) TestGetErrorCacheDir(c *gc.C) { parentDir := c.MkDir() err := os.Chmod(parentDir, 0) c.Assert(err, jc.ErrorIsNil) defer os.Chmod(parentDir, 0755) s.PatchValue(&charmrepo.CacheDir, filepath.Join(parentDir, "cache")) ch, err := s.repo.Get(charm.MustParseURL("cs:trusty/django")) c.Assert(err, gc.ErrorMatches, `cannot create the cache directory: .*: permission denied`) c.Assert(ch, gc.IsNil) } func (s *charmStoreRepoSuite) TestGetErrorCharmNotFound(c *gc.C) { ch, err := s.repo.Get(charm.MustParseURL("cs:trusty/no-such")) c.Assert(err, gc.ErrorMatches, `cannot retrieve "cs:trusty/no-such": charm not found`) c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) c.Assert(ch, gc.IsNil) } func (s *charmStoreRepoSuite) TestGetErrorServer(c *gc.C) { // Set up a server always returning errors. srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { http.Error(w, `{"Message": "bad wolf", "Code": "bad request"}`, http.StatusBadRequest) })) defer srv.Close() // Try getting a charm from the server. repo := charmrepo.NewCharmStore(charmrepo.NewCharmStoreParams{ URL: srv.URL, }) ch, err := repo.Get(charm.MustParseURL("cs:trusty/django")) c.Assert(err, gc.ErrorMatches, `cannot retrieve charm "cs:trusty/django": cannot get archive: bad wolf`) c.Assert(errgo.Cause(err), gc.Equals, params.ErrBadRequest) c.Assert(ch, gc.IsNil) } func (s *charmStoreRepoSuite) TestGetErrorHashMismatch(c *gc.C) { _, url := s.addCharm(c, "trusty/riak-0", "riak") // Set up a proxy server that modifies the returned hash. srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { rec := httptest.NewRecorder() s.handler.ServeHTTP(rec, r) w.Header().Set(params.EntityIdHeader, rec.Header().Get(params.EntityIdHeader)) w.Header().Set(params.ContentHashHeader, "invalid") w.Write(rec.Body.Bytes()) })) defer srv.Close() // Try getting a charm from the server. repo := charmrepo.NewCharmStore(charmrepo.NewCharmStoreParams{ URL: srv.URL, }) ch, err := repo.Get(url) c.Assert(err, gc.ErrorMatches, `hash mismatch; network corruption\?`) c.Assert(ch, gc.IsNil) } func (s *charmStoreRepoSuite) TestGetBundle(c *gc.C) { // Note that getting a bundle shares most of the logic with charm // retrieval. For this reason, only bundle specific code is tested. s.addCharm(c, "cs:trusty/mysql-0", "mysql") s.addCharm(c, "cs:trusty/wordpress-0", "wordpress") expect, url := s.addBundle(c, "cs:~who/bundle/wordpress-simple-42", "wordpress-simple") b, err := s.repo.GetBundle(url) c.Assert(err, jc.ErrorIsNil) c.Assert(b.Data(), jc.DeepEquals, expect.Data()) c.Assert(b.ReadMe(), gc.Equals, expect.ReadMe()) } func (s *charmStoreRepoSuite) TestGetBundleErrorCharm(c *gc.C) { ch, err := s.repo.GetBundle(charm.MustParseURL("cs:trusty/django")) c.Assert(err, gc.ErrorMatches, `expected a bundle URL, got charm URL "cs:trusty/django"`) c.Assert(ch, gc.IsNil) } var resolveTests = []struct { id string url string supportedSeries []string err string }{{ id: "~who/mysql", url: "cs:~who/trusty/mysql-0", supportedSeries: []string{"trusty"}, }, { id: "~who/trusty/mysql", url: "cs:~who/trusty/mysql-0", supportedSeries: []string{"trusty"}, }, { id: "~who/wordpress", url: "cs:~who/precise/wordpress-2", supportedSeries: []string{"precise"}, }, { id: "~who/wordpress-2", err: `cannot resolve URL "cs:~who/wordpress-2": charm or bundle not found`, }, { id: "~dalek/riak", url: "cs:~dalek/utopic/riak-42", supportedSeries: []string{"utopic"}, }, { id: "~dalek/utopic/riak-42", url: "cs:~dalek/utopic/riak-42", supportedSeries: []string{"utopic"}, }, { id: "utopic/mysql", url: "cs:utopic/mysql-47", supportedSeries: []string{"utopic"}, }, { id: "utopic/mysql-47", url: "cs:utopic/mysql-47", supportedSeries: []string{"utopic"}, }, { id: "~who/multi-series", url: "cs:~who/multi-series-0", supportedSeries: []string{"trusty", "precise", "quantal"}, }, { id: "~dalek/utopic/riak-100", err: `cannot resolve URL "cs:~dalek/utopic/riak-100": charm not found`, }, { id: "bundle/no-such", err: `cannot resolve URL "cs:bundle/no-such": bundle not found`, }, { id: "no-such", err: `cannot resolve URL "cs:no-such": charm or bundle not found`, }} func (s *charmStoreRepoSuite) addResolveTestsCharms(c *gc.C) { // Add promulgated entities first so that the base entity // is marked as promulgated when it first gets inserted. s.addCharm(c, "utopic/mysql-47", "mysql") s.addCharmNoRevision(c, "multi-series", "multi-series") s.addCharm(c, "~who/trusty/mysql-0", "mysql") s.addCharm(c, "~who/precise/wordpress-2", "wordpress") s.addCharm(c, "~dalek/utopic/riak-42", "riak") } func (s *charmStoreRepoSuite) TestResolve(c *gc.C) { s.addResolveTestsCharms(c) client := s.repo.Client().WithChannel(params.StableChannel) repo := charmrepo.NewCharmStoreFromClient(client) for i, test := range resolveTests { c.Logf("test %d: %s", i, test.id) ref, supportedSeries, err := repo.Resolve(charm.MustParseURL(test.id)) if test.err != "" { c.Check(err.Error(), gc.Equals, test.err) c.Check(ref, gc.IsNil) continue } c.Assert(err, jc.ErrorIsNil) c.Check(ref, jc.DeepEquals, charm.MustParseURL(test.url)) c.Check(supportedSeries, jc.SameContents, test.supportedSeries) } } func (s *charmStoreRepoSuite) TestResolveWithChannelEquivalentToResolve(c *gc.C) { s.addResolveTestsCharms(c) client := s.repo.Client().WithChannel(params.StableChannel) repo := charmrepo.NewCharmStoreFromClient(client) for i, test := range resolveTests { c.Logf("test %d: %s", i, test.id) ref, channel, supportedSeries, err := repo.ResolveWithChannel(charm.MustParseURL(test.id)) if test.err != "" { c.Check(err.Error(), gc.Equals, test.err) c.Check(ref, gc.IsNil) continue } c.Assert(err, jc.ErrorIsNil) c.Check(ref, jc.DeepEquals, charm.MustParseURL(test.url)) c.Check(channel, gc.Equals, params.StableChannel) c.Check(supportedSeries, jc.SameContents, test.supportedSeries) } } func (s *charmStoreRepoSuite) TestResolveWithChannel(c *gc.C) { tests := []struct { clientChannel params.Channel published []params.Channel expected params.Channel }{{ clientChannel: params.StableChannel, expected: params.StableChannel, }, { clientChannel: params.DevelopmentChannel, expected: params.DevelopmentChannel, }, { clientChannel: params.UnpublishedChannel, expected: params.UnpublishedChannel, }, { clientChannel: params.NoChannel, expected: params.UnpublishedChannel, }, { published: []params.Channel{params.StableChannel}, expected: params.StableChannel, }, { published: []params.Channel{params.DevelopmentChannel}, expected: params.DevelopmentChannel, }, { published: []params.Channel{params.StableChannel, params.DevelopmentChannel}, expected: params.StableChannel, }, { published: []params.Channel{params.DevelopmentChannel, params.StableChannel}, expected: params.StableChannel, }, { clientChannel: params.StableChannel, published: []params.Channel{params.DevelopmentChannel, params.StableChannel}, expected: params.StableChannel, }, { clientChannel: params.DevelopmentChannel, published: []params.Channel{params.StableChannel, params.DevelopmentChannel}, expected: params.DevelopmentChannel, }, { clientChannel: params.UnpublishedChannel, published: []params.Channel{params.StableChannel}, expected: params.UnpublishedChannel, }} ch := TestCharms.CharmArchive(c.MkDir(), "mysql") cURL := charm.MustParseURL("~who/trusty/mysql") for i, test := range tests { c.Logf("test %d: %s/%v", i, test.clientChannel, test.published) cURL.Revision = i err := s.client.UploadCharmWithRevision(cURL, ch, cURL.Revision) c.Assert(err, gc.IsNil) s.setPublic(c, cURL) if len(test.published) > 0 { s.setPublic(c, cURL, test.published...) } else if test.clientChannel != params.NoChannel && test.clientChannel != params.UnpublishedChannel { s.setPublic(c, cURL, test.clientChannel) } repo := charmrepo.NewCharmStoreFromClient(s.client.WithChannel(test.clientChannel)) _, channel, _, err := repo.ResolveWithChannel(cURL) c.Assert(err, jc.ErrorIsNil) c.Check(channel, gc.Equals, test.expected) } } // hashOfCharm returns the SHA256 hash sum for the given charm name. func hashOfCharm(c *gc.C, name string) string { path := TestCharms.CharmArchivePath(c.MkDir(), name) return hashOfPath(c, path) } // hashOfPath returns the SHA256 hash sum for the given path. func hashOfPath(c *gc.C, path string) string { f, err := os.Open(path) c.Assert(err, jc.ErrorIsNil) defer f.Close() hash := sha256.New() _, err = io.Copy(hash, f) c.Assert(err, jc.ErrorIsNil) return fmt.Sprintf("%x", hash.Sum(nil)) } // checkCharm checks that the given charms have the same attributes. func checkCharm(c *gc.C, ch, expect charm.Charm) { c.Assert(ch.Actions(), jc.DeepEquals, expect.Actions()) c.Assert(ch.Config(), jc.DeepEquals, expect.Config()) c.Assert(ch.Meta(), jc.DeepEquals, expect.Meta()) } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/csclient/0000775000175000017500000000000012703461656023012 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/csclient/export_test.go0000664000175000017500000000027412672604507025723 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package csclient var ( Hyphenate = hyphenate UploadArchive = (*Client).uploadArchive ) charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/0000775000175000017500000000000012703461656024275 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/helpers.go0000664000175000017500000000351012672604507026264 0ustar marcomarco// Copyright 2016 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package params import ( "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable/resource" ) // Resource2API converts a charm resource into an API Resource struct. func Resource2API(res resource.Resource) Resource { return Resource{ Name: res.Name, Type: res.Type.String(), Path: res.Path, Description: res.Description, Origin: res.Origin.String(), Revision: res.Revision, Fingerprint: res.Fingerprint.Bytes(), Size: res.Size, } } // API2Resource converts an API Resource struct into // a charm resource. func API2Resource(apiInfo Resource) (resource.Resource, error) { var res resource.Resource rtype, err := resource.ParseType(apiInfo.Type) if err != nil { return res, errgo.Mask(err, errgo.Any) } origin, err := resource.ParseOrigin(apiInfo.Origin) if err != nil { return res, errgo.Mask(err, errgo.Any) } fp, err := deserializeFingerprint(apiInfo.Fingerprint) if err != nil { return res, errgo.Mask(err, errgo.Any) } res = resource.Resource{ Meta: resource.Meta{ Name: apiInfo.Name, Type: rtype, Path: apiInfo.Path, Description: apiInfo.Description, }, Origin: origin, Revision: apiInfo.Revision, Fingerprint: fp, Size: apiInfo.Size, } if err := res.Validate(); err != nil { return res, errgo.Mask(err, errgo.Any) } return res, nil } // deserializeFingerprint converts the serialized fingerprint back into // a Fingerprint. "zero" values are treated appropriately. func deserializeFingerprint(fpSum []byte) (resource.Fingerprint, error) { if len(fpSum) == 0 { return resource.Fingerprint{}, nil } fp, err := resource.NewFingerprint(fpSum) if err != nil { return fp, errgo.Mask(err, errgo.Any) } return fp, nil } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/params_test.go0000664000175000017500000000201512672604507027143 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package params_test import ( "encoding/json" "net/textproto" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" ) type suite struct{} var _ = gc.Suite(&suite{}) func (*suite) TestContentHashHeaderCanonicalized(c *gc.C) { // The header key should be canonicalized, because otherwise // the actually produced header will be different from that // specified. canon := textproto.CanonicalMIMEHeaderKey(params.ContentHashHeader) c.Assert(canon, gc.Equals, params.ContentHashHeader) } func (*suite) TestBakeryErrorCompatibility(c *gc.C) { err1 := httpbakery.Error{ Code: httpbakery.ErrBadRequest, Message: "some request", } err2 := params.Error{ Code: params.ErrBadRequest, Message: "some request", } data1, err := json.Marshal(err1) c.Assert(err, gc.IsNil) c.Assert(string(data1), jc.JSONEquals, err2) } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/error.go0000664000175000017500000000443012672604507025755 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package params // import "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" import ( "fmt" ) // ErrorCode holds the class of an error in machine-readable format. // It is also an error in its own right. type ErrorCode string func (code ErrorCode) Error() string { return string(code) } func (code ErrorCode) ErrorCode() ErrorCode { return code } const ( ErrNotFound ErrorCode = "not found" ErrMetadataNotFound ErrorCode = "metadata not found" ErrForbidden ErrorCode = "forbidden" ErrBadRequest ErrorCode = "bad request" // TODO change to ErrAlreadyExists ErrDuplicateUpload ErrorCode = "duplicate upload" ErrMultipleErrors ErrorCode = "multiple errors" ErrUnauthorized ErrorCode = "unauthorized" ErrMethodNotAllowed ErrorCode = "method not allowed" ErrServiceUnavailable ErrorCode = "service unavailable" ErrEntityIdNotAllowed ErrorCode = "charm or bundle id not allowed" ErrInvalidEntity ErrorCode = "invalid charm or bundle" // Note that these error codes sit in the same name space // as the bakery error codes defined in gopkg.in/macaroon-bakery.v0/httpbakery . // In particular, ErrBadRequest is a shared error code // which needs to share the message too. ) // Error represents an error - it is returned for any response that fails. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#errors type Error struct { Message string Code ErrorCode Info map[string]*Error `json:",omitempty"` } // NewError returns a new *Error with the given error code // and message. func NewError(code ErrorCode, f string, a ...interface{}) error { return &Error{ Message: fmt.Sprintf(f, a...), Code: code, } } // Error implements error.Error. func (e *Error) Error() string { return e.Message } // ErrorCode holds the class of the error in // machine readable format. func (e *Error) ErrorCode() string { return e.Code.Error() } // ErrorInfo returns additional info on the error. // TODO(rog) rename this so that it more accurately // reflects its role. func (e *Error) ErrorInfo() map[string]*Error { return e.Info } // Cause implements errgo.Causer.Cause. func (e *Error) Cause() error { if e.Code != "" { return e.Code } return nil } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/helpers_test.go0000664000175000017500000001243312703461656027330 0ustar marcomarco// Copyright 2016 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package params_test import ( "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable/resource" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" ) const fingerprint = "123456789012345678901234567890123456789012345678" type HelpersSuite struct { testing.IsolationSuite } var _ = gc.Suite(&HelpersSuite{}) func (HelpersSuite) TestResource2API(c *gc.C) { fp, err := resource.NewFingerprint([]byte(fingerprint)) c.Assert(err, jc.ErrorIsNil) res := resource.Resource{ Meta: resource.Meta{ Name: "spam", Type: resource.TypeFile, Path: "spam.tgz", Description: "you need it", }, Origin: resource.OriginUpload, Revision: 0, Fingerprint: fp, Size: 10, } err = res.Validate() c.Assert(err, jc.ErrorIsNil) apiInfo := params.Resource2API(res) c.Check(apiInfo, jc.DeepEquals, params.Resource{ Name: "spam", Type: "file", Path: "spam.tgz", Description: "you need it", Origin: "upload", Revision: 0, Fingerprint: []byte(fingerprint), Size: 10, }) } func (HelpersSuite) TestAPI2ResourceFull(c *gc.C) { res, err := params.API2Resource(params.Resource{ Name: "spam", Type: "file", Path: "spam.tgz", Description: "you need it", Origin: "upload", Revision: 0, Fingerprint: []byte(fingerprint), Size: 10, }) c.Assert(err, jc.ErrorIsNil) fp, err := resource.NewFingerprint([]byte(fingerprint)) c.Assert(err, jc.ErrorIsNil) expected := resource.Resource{ Meta: resource.Meta{ Name: "spam", Type: resource.TypeFile, Path: "spam.tgz", Description: "you need it", }, Origin: resource.OriginUpload, Revision: 0, Fingerprint: fp, Size: 10, } err = expected.Validate() c.Assert(err, jc.ErrorIsNil) c.Check(res, jc.DeepEquals, expected) } func (HelpersSuite) TestAPI2ResourceBasic(c *gc.C) { res, err := params.API2Resource(params.Resource{ Name: "spam", Type: "file", Path: "spam.tgz", Origin: "upload", }) c.Assert(err, jc.ErrorIsNil) expected := resource.Resource{ Meta: resource.Meta{ Name: "spam", Type: resource.TypeFile, Path: "spam.tgz", Description: "", }, Origin: resource.OriginUpload, Revision: 0, Fingerprint: resource.Fingerprint{}, Size: 0, } err = expected.Validate() c.Assert(err, jc.ErrorIsNil) c.Check(res, jc.DeepEquals, expected) } func (HelpersSuite) TestAPI2ResourceNegativeRevision(c *gc.C) { _, err := params.API2Resource(params.Resource{ Name: "spam", Type: "file", Path: "spam.tgz", Fingerprint: []byte(fingerprint), Size: 20, Origin: "store", Revision: -1, }) c.Check(err, gc.ErrorMatches, `bad revision: must be non-negative, got -1`) } func (HelpersSuite) TestAPI2ResourceBadType(c *gc.C) { _, err := params.API2Resource(params.Resource{ Name: "spam", Type: "", Path: "spam.tgz", Origin: "upload", Revision: 0, Fingerprint: []byte(fingerprint), Size: 10, }) c.Check(err, gc.ErrorMatches, `unsupported resource type ""`) } func (HelpersSuite) TestAPI2ResourceBadOrigin(c *gc.C) { _, err := params.API2Resource(params.Resource{ Name: "spam", Type: "file", Path: "spam.tgz", Origin: "", Revision: 0, Fingerprint: []byte(fingerprint), Size: 10, }) c.Check(err, gc.ErrorMatches, `unknown origin ""`) } func (HelpersSuite) TestAPI2ResourceBadFingerprint(c *gc.C) { _, err := params.API2Resource(params.Resource{ Name: "spam", Type: "file", Path: "spam.tgz", Origin: "upload", Revision: 0, Fingerprint: []byte(fingerprint + "1"), Size: 10, }) c.Check(err, gc.ErrorMatches, `invalid fingerprint \(too big\)`) } func (HelpersSuite) TestAPI2ResourceEmptyFingerprintNoSize(c *gc.C) { res, err := params.API2Resource(params.Resource{ Name: "spam", Type: "file", Path: "spam.tgz", Origin: "upload", Revision: 0, Fingerprint: nil, Size: 0, }) c.Assert(err, jc.ErrorIsNil) expected := resource.Resource{ Meta: resource.Meta{ Name: "spam", Type: resource.TypeFile, Path: "spam.tgz", }, Origin: resource.OriginUpload, Revision: 0, Fingerprint: resource.Fingerprint{}, Size: 0, } err = expected.Validate() c.Assert(err, jc.ErrorIsNil) c.Check(res, jc.DeepEquals, expected) } func (HelpersSuite) TestAPI2ResourceEmptyFingerprintWithSize(c *gc.C) { _, err := params.API2Resource(params.Resource{ Name: "spam", Type: "file", Path: "spam.tgz", Origin: "upload", Revision: 0, Fingerprint: nil, Size: 10, }) c.Check(err, gc.ErrorMatches, `bad file info: missing fingerprint`) } func (HelpersSuite) TestAPI2ResourceValidateFailed(c *gc.C) { _, err := params.API2Resource(params.Resource{ Name: "", Type: "file", Path: "spam.tgz", Origin: "upload", Revision: 0, Fingerprint: []byte(fingerprint), Size: 10, }) c.Check(err, gc.ErrorMatches, `.*resource missing name`) } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/params.go0000664000175000017500000003227612703461656026121 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. // The params package holds types that are a part of the charm store's external // contract - they will be marshalled (or unmarshalled) as JSON // and delivered through the HTTP API. package params // import "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" import ( "encoding/json" "time" "github.com/juju/utils/debugstatus" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/macaroon.v1" ) const ( // ContentHashHeader specifies the header attribute // that will hold the content hash for archive GET responses. ContentHashHeader = "Content-Sha384" // EntityIdHeader specifies the header attribute that will hold the // id of the entity for archive GET responses. EntityIdHeader = "Entity-Id" ) // Special user/group names. const ( Everyone = "everyone" Admin = "admin" ) // Channel is the name of a channel in which an entity may be published. type Channel string const ( // DevelopmentChannel is the channel used for charms or bundles under development. DevelopmentChannel Channel = "development" // StableChannel is the channel used for stable charms or bundles. StableChannel Channel = "stable" // UnpublishedChannel is the default channel to which charms are uploaded. UnpublishedChannel Channel = "unpublished" // NoChannel represents where no channel has been specifically requested. NoChannel Channel = "" ) // MetaAnyResponse holds the result of a meta/any request. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaany type MetaAnyResponse EntityResult // ArchiveUploadResponse holds the result of a post or a put to /id/archive. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#post-idarchive type ArchiveUploadResponse struct { Id *charm.URL PromulgatedId *charm.URL `json:",omitempty"` } // Constants for the StatsUpdateRequest type StatsUpdateType string const ( UpdateDownload StatsUpdateType = "download" // Accesses with non listed clients and web browsers. UpdateTraffic StatsUpdateType = "traffic" // Bots and unknown clients. UpdateDeploy StatsUpdateType = "deploy" // known clients like juju client. ) // StatsUpdateRequest holds the parameters for a put to /stats/update. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#stats-update type StatsUpdateRequest struct { Entries []StatsUpdateEntry } // StatsUpdateEntry holds an entry of the StatsUpdateRequest for a put to /stats/update. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#stats-update type StatsUpdateEntry struct { Timestamp time.Time // Time when the update did happen. Type StatsUpdateType // One of the constant Download, Traffic or Deploy. CharmReference *charm.URL // The charm to be updated. } // ExpandedId holds a charm or bundle fully qualified id. // A slice of ExpandedId is used as response for // id/expand-id GET requests. type ExpandedId struct { Id string } // ArchiveSizeResponse holds the result of an // id/meta/archive-size GET request. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaarchive-size type ArchiveSizeResponse struct { Size int64 } // HashResponse holds the result of id/meta/hash and id/meta/hash256 GET // requests. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetahash // and https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetahash256 type HashResponse struct { Sum string } // ManifestFile holds information about a charm or bundle file. // A slice of ManifestFile is used as response for // id/meta/manifest GET requests. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetamanifest type ManifestFile struct { Name string Size int64 } // ArchiveUploadTimeResponse holds the result of an id/meta/archive-upload-time // GET request. See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaarchive-upload-time type ArchiveUploadTimeResponse struct { UploadTime time.Time } // RelatedResponse holds the result of an id/meta/charm-related GET request. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetacharm-related type RelatedResponse struct { // Requires holds an entry for each interface provided by // the charm, containing all charms that require that interface. Requires map[string][]EntityResult `json:",omitempty"` // Provides holds an entry for each interface required by the // the charm, containing all charms that provide that interface. Provides map[string][]EntityResult `json:",omitempty"` } // RevisionInfoResponse holds the result of an id/meta/revision-info GET // request. See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetarevision-info type RevisionInfoResponse struct { Revisions []*charm.URL } // SupportedSeries holds the result of an id/meta/supported-series GET // request. See See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetasupported-series type SupportedSeriesResponse struct { SupportedSeries []string } // BundleCount holds the result of an id/meta/bundle-unit-count // or bundle-machine-count GET request. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetabundle-unit-count // and https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetabundle-machine-count type BundleCount struct { Count int } // TagsResponse holds the result of an id/meta/tags GET request. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetatags type TagsResponse struct { Tags []string } // Published holds the result of a changes/published GET request. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-changespublished type Published struct { Id *charm.URL PublishTime time.Time } // DebugStatus holds the result of the status checks. // This is defined for backward compatibility: new clients should use // debugstatus.CheckResult directly. type DebugStatus debugstatus.CheckResult // EntityResult holds a the resolved entity ID along with any requested metadata. type EntityResult struct { Id *charm.URL // Meta holds at most one entry for each meta value // specified in the include flags, holding the // data that would be returned by reading /meta/meta?id=id. // Metadata not relevant to a particular result will not // be included. Meta map[string]interface{} `json:",omitempty"` } // SearchResponse holds the response from a search operation. type SearchResponse struct { SearchTime time.Duration Total int Results []EntityResult } // ListResponse holds the response from a list operation. type ListResponse struct { Results []EntityResult } // IdUserResponse holds the result of an id/meta/id-user GET request. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-user type IdUserResponse struct { User string } // IdSeriesResponse holds the result of an id/meta/id-series GET request. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-series type IdSeriesResponse struct { Series string } // IdNameResponse holds the result of an id/meta/id-name GET request. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-name type IdNameResponse struct { Name string } // IdRevisionResponse holds the result of an id/meta/id-revision GET request. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-revision type IdRevisionResponse struct { Revision int } // IdResponse holds the result of an id/meta/id GET request. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid type IdResponse struct { Id *charm.URL User string `json:",omitempty"` Series string `json:",omitempty"` Name string Revision int } // PermResponse holds the result of an id/meta/perm GET request. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaperm type PermResponse struct { Read []string Write []string } // PermRequest holds the request of an id/meta/perm PUT request. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idmetaperm type PermRequest struct { Read []string Write []string } // PromulgatedResponse holds the result of an id/meta/promulgated GET request. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetapromulgated type PromulgatedResponse struct { Promulgated bool } // PromulgateRequest holds the request of an id/promulgate PUT request. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idpromulgate type PromulgateRequest struct { Promulgated bool } // PublishRequest holds the request of an id/publish PUT request. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idpublish type PublishRequest struct { Channels []Channel // Resources defines the resource revisions to use for the charm. // Each resource in the charm's metadata.yaml (if any) must have its // name mapped to a revision. That revision must be one of the // existing revisions for that resource. Resources map[string]int `json:",omitempty"` } // PublishResponse holds the result of an id/publish PUT request. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idpublish type PublishResponse struct { Id *charm.URL PromulgatedId *charm.URL `json:",omitempty"` } // PublishedResponse holds the result of an id/meta/published GET request. type PublishedResponse struct { // Channels holds an entry for each channel that the // entity has been published to. Info []PublishedInfo } // PublishedInfo holds information on a channel that an entity // has been published to. type PublishedInfo struct { // Channel holds the value of the channel that // the entity has been published to. // This will never be "unpublished" as entities // cannot be published to that channel. Channel Channel // Current holds whether the entity is the most // recently published member of the channel. Current bool } // WhoAmIResponse holds the result of a whoami GET request. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#whoami type WhoAmIResponse struct { User string Groups []string } // Resource describes a resource in the charm store. type Resource struct { // Name identifies the resource. Name string // Type is the name of the resource type. Type string // Path is where the resource will be stored. Path string // Description contains user-facing info about the resource. Description string `json:",omitempty"` // Origin is where the resource has come from. // This is not set in the charmstore response. Origin string `json: ",omitempty"` // Revision is the revision, if applicable. Revision int // Fingerprint is the SHA-384 checksum for the resource blob. Fingerprint []byte // Size is the size of the resource, in bytes. Size int64 } // ResourceUploadResponse holds the result of a post or a put to /id/resources/name. type ResourceUploadResponse struct { Revision int } // CharmRevision holds the revision number of a charm and any error // encountered in retrieving it. type CharmRevision struct { Revision int Sha256 string Err error } const ( // BzrDigestKey is the extra-info key used to store the Bazaar digest BzrDigestKey = "bzr-digest" // LegacyDownloadStats is the extra-info key used to store the legacy // download counts, and to retrieve them when // charmstore.LegacyDownloadCountsEnabled is set to true. // TODO (frankban): remove this constant when removing the legacy counts // logic. LegacyDownloadStats = "legacy-download-stats" ) // Log holds the representation of a log message. // This is used by clients to store log events in the charm store. type Log struct { // Data holds the log message as a JSON-encoded value. Data *json.RawMessage // Level holds the log level as a string. Level LogLevel // Type holds the log type as a string. Type LogType // URLs holds a slice of entity URLs associated with the log message. URLs []*charm.URL `json:",omitempty"` } // LogResponse represents a single log message and is used in the responses // to /log GET requests. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-log type LogResponse struct { // Data holds the log message as a JSON-encoded value. Data json.RawMessage // Level holds the log level as a string. Level LogLevel // Type holds the log type as a string. Type LogType // URLs holds a slice of entity URLs associated with the log message. URLs []*charm.URL `json:",omitempty"` // Time holds the time of the log. Time time.Time } // LogLevel defines log levels (e.g. "info" or "error") to be used in log // requests and responses. type LogLevel string const ( InfoLevel LogLevel = "info" WarningLevel LogLevel = "warning" ErrorLevel LogLevel = "error" ) // LogType defines log types (e.g. "ingestion") to be used in log requests and // responses. type LogType string const ( IngestionType LogType = "ingestion" LegacyStatisticsType LogType = "legacyStatistics" IngestionStart = "ingestion started" IngestionComplete = "ingestion completed" LegacyStatisticsImportStart = "legacy statistics import started" LegacyStatisticsImportComplete = "legacy statistics import completed" ) // SetAuthCookie holds the parameters used to make a set-auth-cookie request // to the charm store. type SetAuthCookie struct { // Macaroons holds a slice of macaroons. Macaroons macaroon.Slice } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/stats.go0000664000175000017500000000333112672604507025761 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package params // import "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" // Define the kinds to be included in stats keys. const ( StatsArchiveDownload = "archive-download" StatsArchiveDownloadPromulgated = "archive-download-promulgated" StatsArchiveDelete = "archive-delete" StatsArchiveFailedUpload = "archive-failed-upload" StatsArchiveUpload = "archive-upload" // The following kinds are in use in the legacy API. StatsCharmInfo = "charm-info" StatsCharmMissing = "charm-missing" StatsCharmEvent = "charm-event" ) // Statistic holds one element of a stats/counter response. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-statscounter type Statistic struct { Key string `json:",omitempty"` Date string `json:",omitempty"` Count int64 } // StatsResponse holds the result of an id/meta/stats GET request. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetastats type StatsResponse struct { // ArchiveDownloadCount is superceded by ArchiveDownload but maintained for // backward compatibility. ArchiveDownloadCount int64 // ArchiveDownload holds the downloads count for a specific revision of the // entity. ArchiveDownload StatsCount // ArchiveDownloadAllRevisions holds the downloads count for all revisions // of the entity. ArchiveDownloadAllRevisions StatsCount } // StatsCount holds stats counts and is used as part of StatsResponse. type StatsCount struct { Total int64 // Total count over all time. Day int64 // Count over the last day. Week int64 // Count over the last week. Month int64 // Count over the last month. } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/package_test.go0000664000175000017500000000032612672604507027256 0ustar marcomarco// Copyright 2014 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package params_test import ( "testing" gc "gopkg.in/check.v1" ) func TestPackage(t *testing.T) { gc.TestingT(t) } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/csclient/archive.go0000664000175000017500000000560112672604507024763 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package csclient // import "gopkg.in/juju/charmrepo.v2-unstable/csclient" import ( "crypto/sha512" "fmt" "io" "io/ioutil" "os" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" ) // ReadSeekCloser implements io.ReadSeeker and io.Closer. type ReadSeekCloser interface { io.ReadSeeker io.Closer } // openArchive is used to turn the current charm or bundle implementations // into readers for their corresponding archive. // It returns the corresponding archive reader, its hex-encoded SHA384 hash // and size. func openArchive(entity interface{}) (r ReadSeekCloser, hash string, size int64, err error) { var path string switch entity := entity.(type) { case archiverTo: // For example: charm.CharmDir or charm.BundleDir. file, err := newRemoveOnCloseTempFile("entity-archive") if err != nil { return nil, "", 0, errgo.Notef(err, "cannot make temporary file") } if err := entity.ArchiveTo(file); err != nil { file.Close() return nil, "", 0, errgo.Notef(err, "cannot create entity archive") } if _, err := file.Seek(0, 0); err != nil { file.Close() return nil, "", 0, errgo.Notef(err, "cannot seek") } hash, size, err = readerHashAndSize(file) if err != nil { file.Close() return nil, "", 0, errgo.Mask(err) } return file, hash, size, nil case *charm.BundleArchive: path = entity.Path case *charm.CharmArchive: path = entity.Path default: return nil, "", 0, errgo.Newf("cannot get the archive for entity type %T", entity) } file, err := os.Open(path) if err != nil { return nil, "", 0, errgo.Mask(err) } hash, size, err = readerHashAndSize(file) if err != nil { file.Close() return nil, "", 0, errgo.Mask(err) } return file, hash, size, nil } // readerHashAndSize returns the hex-encoded SHA384 hash and size of // the data included in the given reader. func readerHashAndSize(r io.ReadSeeker) (hash string, size int64, err error) { h := sha512.New384() size, err = io.Copy(h, r) if err != nil { return "", 0, errgo.Notef(err, "cannot calculate hash") } if _, err := r.Seek(0, 0); err != nil { return "", 0, errgo.Notef(err, "cannot seek") } return fmt.Sprintf("%x", h.Sum(nil)), size, nil } type archiverTo interface { ArchiveTo(io.Writer) error } // newRemoveOnCloseTempFile creates a new temporary file in the default // directory for temporary files with a name beginning with prefix. // The resulting file is removed when the file is closed. func newRemoveOnCloseTempFile(prefix string) (*removeOnCloseFile, error) { file, err := ioutil.TempFile("", prefix) if err != nil { return nil, err } return &removeOnCloseFile{file}, nil } // removeOnCloseFile represents a file which is removed when closed. type removeOnCloseFile struct { *os.File } func (r *removeOnCloseFile) Close() error { r.File.Close() return os.Remove(r.File.Name()) } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/csclient/csclient.go0000664000175000017500000007150112703461656025151 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. // The csclient package provides access to the charm store API. // // Errors returned from the remote API server with an associated error // code will have a cause of type params.ErrorCode holding that code. // // If a call to the API returns an error because authorization has been // denied, an error with a cause satisfying IsAuthorizationError will be // returned. Note that these errors can also include errors returned by // httpbakery when it attempts to discharge macaroons. package csclient // import "gopkg.in/juju/charmrepo.v2-unstable/csclient" import ( "bytes" "encoding/base64" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/url" "reflect" "strconv" "strings" "unicode" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charm.v6-unstable/resource" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/macaroon.v1" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" ) const apiVersion = "v5" // ServerURL holds the default location of the global charm store. // An alternate location can be configured by changing the URL field in the // Params struct. // For live testing or QAing the application, a different charm store // location should be used, for instance "https://api.staging.jujucharms.com". var ServerURL = "https://api.jujucharms.com/charmstore" // Client represents the client side of a charm store. type Client struct { params Params bclient httpClient header http.Header statsDisabled bool channel params.Channel } // Params holds parameters for creating a new charm store client. type Params struct { // URL holds the root endpoint URL of the charmstore, // with no trailing slash, not including the version. // For example https://api.jujucharms.com/charmstore // If empty, the default charm store client location is used. URL string // User holds the name to authenticate as for the client. If User is empty, // no credentials will be sent. User string // Password holds the password for the given user, for authenticating the // client. Password string // BakeryClient holds the bakery client to use when making // requests to the store. This is used in preference to // HTTPClient. BakeryClient *httpbakery.Client // HTTPClient holds the HTTP client to use when making // requests to the store. If nil, httpbakery.NewHTTPClient will // be used. HTTPClient *http.Client // VisitWebPage is called when authorization requires that // the user visits a web page to authenticate themselves. // If nil, no interaction will be allowed. This field // is ignored if BakeryClient is provided. VisitWebPage func(url *url.URL) error // Auth holds a list of macaroons that will be added to the cookie jar of // the HTTP Client that is used by this client. Auth macaroon.Slice } type httpClient interface { DoWithBody(*http.Request, io.ReadSeeker) (*http.Response, error) } // New returns a new charm store client. func New(p Params) *Client { if p.URL == "" { p.URL = ServerURL } bclient := p.BakeryClient if bclient == nil { if p.HTTPClient == nil { p.HTTPClient = httpbakery.NewHTTPClient() } bclient = &httpbakery.Client{ Client: p.HTTPClient, VisitWebPage: p.VisitWebPage, } } if len(p.Auth) > 0 { url, err := url.Parse(p.URL) // A non-nil error here will get caught at request time when we try // to parse the URL, and without a valid URL, the macaroons don't matter // anyway. if err == nil { httpbakery.SetCookie(bclient.Jar, url, p.Auth) } } return &Client{ bclient: bclient, params: p, } } // ServerURL returns the charm store URL used by the client. func (c *Client) ServerURL() string { return c.params.URL } // DisableStats disables incrementing download stats when retrieving archives // from the charm store. func (c *Client) DisableStats() { c.statsDisabled = true } // WithChannel returns a new client whose requests are done using the // given channel. func (c *Client) WithChannel(channel params.Channel) *Client { client := *c client.channel = channel return &client } // Channel returns the currently set channel. func (c *Client) Channel() params.Channel { return c.channel } // SetHTTPHeader sets custom HTTP headers that will be sent to the charm store // on each request. func (c *Client) SetHTTPHeader(header http.Header) { c.header = header } // GetArchive retrieves the archive for the given charm or bundle, returning a // reader its data can be read from, the fully qualified id of the // corresponding entity, the hex-encoded SHA384 hash of the data and its size. func (c *Client) GetArchive(id *charm.URL) (r io.ReadCloser, eid *charm.URL, hash string, size int64, err error) { // Create the request. req, err := http.NewRequest("GET", "", nil) if err != nil { return nil, nil, "", 0, errgo.Notef(err, "cannot make new request") } // Send the request. v := url.Values{} if c.statsDisabled { v.Set("stats", "0") } u := url.URL{ Path: "/" + id.Path() + "/archive", RawQuery: v.Encode(), } resp, err := c.Do(req, u.String()) if err != nil { return nil, nil, "", 0, errgo.NoteMask(err, "cannot get archive", isAPIError) } // Validate the response headers. entityId := resp.Header.Get(params.EntityIdHeader) if entityId == "" { resp.Body.Close() return nil, nil, "", 0, errgo.Newf("no %s header found in response", params.EntityIdHeader) } eid, err = charm.ParseURL(entityId) if err != nil { // The server did not return a valid id. resp.Body.Close() return nil, nil, "", 0, errgo.Notef(err, "invalid entity id found in response") } if eid.Revision == -1 { // The server did not return a fully qualified entity id. resp.Body.Close() return nil, nil, "", 0, errgo.Newf("archive get returned not fully qualified entity id %q", eid) } hash = resp.Header.Get(params.ContentHashHeader) if hash == "" { resp.Body.Close() return nil, nil, "", 0, errgo.Newf("no %s header found in response", params.ContentHashHeader) } // Validate the response contents. if resp.ContentLength < 0 { // TODO frankban: handle the case the contents are chunked. resp.Body.Close() return nil, nil, "", 0, errgo.Newf("no content length found in response") } return resp.Body, eid, hash, resp.ContentLength, nil } // ListResources retrieves the metadata about resources for the given charms. // It returns a slice with an element for each of the given ids, holding the // resources for the respective id. func (c *Client) ListResources(id *charm.URL) ([]params.Resource, error) { var result []params.Resource if err := c.Get("/"+id.Path()+"/meta/resources", &result); err != nil { return nil, errgo.NoteMask(err, "cannot get resource metadata from the charm store", isAPIError) } // Set all the Origin fields appropriately. for i := range result { result[i].Origin = resource.OriginStore.String() } return result, nil } // UploadResource uploads the bytes for a resource. func (c *Client) UploadResource(id *charm.URL, name, path string, file io.ReadSeeker) (revision int, err error) { hash, size, err := readerHashAndSize(file) if err != nil { return -1, errgo.Mask(err) } // Prepare the request. req, err := http.NewRequest("POST", "", nil) if err != nil { return -1, errgo.Notef(err, "cannot make new request") } req.Header.Set("Content-Type", "application/octet-stream") req.ContentLength = size hash = url.QueryEscape(hash) path = url.QueryEscape(path) url := fmt.Sprintf("/%s/resource/%s?hash=%s&filename=%s", id.Path(), name, hash, path) // Send the request. resp, err := c.DoWithBody(req, url, file) if err != nil { return -1, errgo.NoteMask(err, "cannot post resource", isAPIError) } defer resp.Body.Close() // Parse the response. var result params.ResourceUploadResponse if err := parseResponseBody(resp.Body, &result); err != nil { return -1, errgo.Mask(err) } return result.Revision, nil } // Publish tells the charmstore to mark the given charm as published with the // given resource revisions to the given channels. func (s *Client) Publish(id *charm.URL, channels []params.Channel, resources map[string]int) error { if len(channels) == 0 { return nil } val := ¶ms.PublishRequest{ Resources: resources, Channels: channels, } if err := s.Put("/"+id.Path()+"/publish", val); err != nil { return errgo.Mask(err, isAPIError) } return nil } // ResourceData holds information about a resource. // It must be closed after use. type ResourceData struct { io.ReadCloser Hash string Size int64 } // GetResource retrieves byes of the resource with the given name and revision // for the given charm, returning a reader its data can be read from, the // SHA384 hash of the data and its size. // // Note that the result must be closed after use. func (c *Client) GetResource(id *charm.URL, name string, revision int) (result ResourceData, err error) { if revision < 0 { return result, errgo.New("revision must be a non-negative integer") } // Create the request. req, err := http.NewRequest("GET", "", nil) if err != nil { return result, errgo.Notef(err, "cannot make new request") } url := "/" + id.Path() + "/resource/" + name if revision >= 0 { url += "/" + strconv.Itoa(revision) } resp, err := c.Do(req, url) if err != nil { return result, errgo.NoteMask(err, "cannot get resource", isAPIError) } defer func() { if err != nil { resp.Body.Close() } }() // Validate the response headers. hash := resp.Header.Get(params.ContentHashHeader) if hash == "" { return result, errgo.Newf("no %s header found in response", params.ContentHashHeader) } // Validate the response contents. if resp.ContentLength < 0 { return result, errgo.Newf("no content length found in response") } return ResourceData{ ReadCloser: resp.Body, Hash: hash, Size: resp.ContentLength, }, nil } // ResourceMeta returns the metadata for the resource on charm id with the // given name and revision. func (c *Client) ResourceMeta(id *charm.URL, name string, revision int) (params.Resource, error) { path := fmt.Sprintf("/%s/meta/resource/%s/%d", id.Path(), name, revision) var result params.Resource if err := c.Get(path, &result); err != nil { return result, errgo.NoteMask(err, fmt.Sprintf("cannot get %q", path), isAPIError) } return result, nil } // StatsUpdate updates the download stats for the given id and specific time. func (c *Client) StatsUpdate(req params.StatsUpdateRequest) error { return c.Put("/stats/update", req) } // UploadCharm uploads the given charm to the charm store with the given id, // which must not specify a revision. // The accepted charm implementations are charm.CharmDir and // charm.CharmArchive. // // UploadCharm returns the id that the charm has been given in the // store - this will be the same as id except the revision. func (c *Client) UploadCharm(id *charm.URL, ch charm.Charm) (*charm.URL, error) { if id.Revision != -1 { return nil, errgo.Newf("revision specified in %q, but should not be specified", id) } r, hash, size, err := openArchive(ch) if err != nil { return nil, errgo.Notef(err, "cannot open charm archive") } defer r.Close() return c.uploadArchive(id, r, hash, size, -1) } // UploadCharmWithRevision uploads the given charm to the // given id in the charm store, which must contain a revision. // If promulgatedRevision is not -1, it specifies that the charm // should be marked as promulgated with that revision. // // This method is provided only for testing and should not // generally be used otherwise. func (c *Client) UploadCharmWithRevision(id *charm.URL, ch charm.Charm, promulgatedRevision int) error { if id.Revision == -1 { return errgo.Newf("revision not specified in %q", id) } r, hash, size, err := openArchive(ch) if err != nil { return errgo.Notef(err, "cannot open charm archive") } defer r.Close() _, err = c.uploadArchive(id, r, hash, size, promulgatedRevision) return errgo.Mask(err, isAPIError) } // UploadBundle uploads the given charm to the charm store with the given id, // which must not specify a revision. // The accepted bundle implementations are charm.BundleDir and // charm.BundleArchive. // // UploadBundle returns the id that the bundle has been given in the // store - this will be the same as id except the revision. func (c *Client) UploadBundle(id *charm.URL, b charm.Bundle) (*charm.URL, error) { if id.Revision != -1 { return nil, errgo.Newf("revision specified in %q, but should not be specified", id) } r, hash, size, err := openArchive(b) if err != nil { return nil, errgo.Notef(err, "cannot open bundle archive") } defer r.Close() return c.uploadArchive(id, r, hash, size, -1) } // UploadBundleWithRevision uploads the given bundle to the // given id in the charm store, which must contain a revision. // If promulgatedRevision is not -1, it specifies that the charm // should be marked as promulgated with that revision. // // This method is provided only for testing and should not // generally be used otherwise. func (c *Client) UploadBundleWithRevision(id *charm.URL, b charm.Bundle, promulgatedRevision int) error { if id.Revision == -1 { return errgo.Newf("revision not specified in %q", id) } r, hash, size, err := openArchive(b) if err != nil { return errgo.Notef(err, "cannot open charm archive") } defer r.Close() _, err = c.uploadArchive(id, r, hash, size, promulgatedRevision) return errgo.Mask(err, isAPIError) } // uploadArchive pushes the archive for the charm or bundle represented by // the given body, its hex-encoded SHA384 hash and its size. It returns // the resulting entity reference. The given id should include the series // and should not include the revision. func (c *Client) uploadArchive(id *charm.URL, body io.ReadSeeker, hash string, size int64, promulgatedRevision int) (*charm.URL, error) { // When uploading archives, it can be a problem that the // an error response is returned while we are still writing // the body data. // To avoid this, we log in first so that we don't need to // do the macaroon exchange after POST. // Unfortunately this won't help matters if the user is logged in but // doesn't have privileges to write to the stated charm. // A better solution would be to fix https://github.com/golang/go/issues/3665 // and use the 100-Continue client functionality. // // We only need to do this when basic auth credentials are not provided. if c.params.User == "" { if err := c.Login(); err != nil { return nil, errgo.NoteMask(err, "cannot log in", isAPIError) } } method := "POST" promulgatedArg := "" if id.Revision != -1 { method = "PUT" if promulgatedRevision != -1 { pr := *id pr.User = "" pr.Revision = promulgatedRevision promulgatedArg = "&promulgated=" + pr.Path() } } // Prepare the request. req, err := http.NewRequest(method, "", nil) if err != nil { return nil, errgo.Notef(err, "cannot make new request") } req.Header.Set("Content-Type", "application/zip") req.ContentLength = size // Send the request. resp, err := c.DoWithBody( req, "/"+id.Path()+"/archive?hash="+hash+promulgatedArg, body, ) if err != nil { return nil, errgo.NoteMask(err, "cannot post archive", isAPIError) } defer resp.Body.Close() // Parse the response. var result params.ArchiveUploadResponse if err := parseResponseBody(resp.Body, &result); err != nil { return nil, errgo.Mask(err) } return result.Id, nil } // PutExtraInfo puts extra-info data for the given id. // Each entry in the info map causes a value in extra-info with // that key to be set to the associated value. // Entries not set in the map will be unchanged. func (c *Client) PutExtraInfo(id *charm.URL, info map[string]interface{}) error { return c.Put("/"+id.Path()+"/meta/extra-info", info) } // PutCommonInfo puts common-info data for the given id. // Each entry in the info map causes a value in common-info with // that key to be set to the associated value. // Entries not set in the map will be unchanged. func (c *Client) PutCommonInfo(id *charm.URL, info map[string]interface{}) error { return c.Put("/"+id.Path()+"/meta/common-info", info) } // Meta fetches metadata on the charm or bundle with the // given id. The result value provides a value // to be filled in with the result, which must be // a pointer to a struct containing members corresponding // to possible metadata include parameters // (see https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmeta). // // It returns the fully qualified id of the entity. // // The name of the struct member is translated to // a lower case hyphen-separated form; for example, // ArchiveSize becomes "archive-size", and BundleMachineCount // becomes "bundle-machine-count", but may also // be specified in the field's tag // // This example will fill in the result structure with information // about the given id, including information on its archive // size (include archive-size), upload time (include archive-upload-time) // and digest (include extra-info/digest). // // var result struct { // ArchiveSize params.ArchiveSizeResponse // ArchiveUploadTime params.ArchiveUploadTimeResponse // Digest string `csclient:"extra-info/digest"` // } // id, err := client.Meta(id, &result) func (c *Client) Meta(id *charm.URL, result interface{}) (*charm.URL, error) { if result == nil { return nil, fmt.Errorf("expected valid result pointer, not nil") } resultv := reflect.ValueOf(result) resultt := resultv.Type() if resultt.Kind() != reflect.Ptr { return nil, fmt.Errorf("expected pointer, not %T", result) } resultt = resultt.Elem() if resultt.Kind() != reflect.Struct { return nil, fmt.Errorf("expected pointer to struct, not %T", result) } resultv = resultv.Elem() // At this point, resultv refers to the struct value pointed // to by result, and resultt is its type. numField := resultt.NumField() includes := make([]string, 0, numField) // results holds an entry for each field in the result value, // pointing to the value for that field. results := make(map[string]reflect.Value) for i := 0; i < numField; i++ { field := resultt.Field(i) if field.PkgPath != "" { // Field is private; ignore it. continue } if field.Anonymous { // At some point in the future, it might be nice to // support anonymous fields, but for now the // additional complexity doesn't seem worth it. return nil, fmt.Errorf("anonymous fields not supported") } apiName := field.Tag.Get("csclient") if apiName == "" { apiName = hyphenate(field.Name) } includes = append(includes, "include="+apiName) results[apiName] = resultv.FieldByName(field.Name).Addr() } // We unmarshal into rawResult, then unmarshal each field // separately into its place in the final result value. // Note that we can't use params.MetaAnyResponse because // that will unpack all the values inside the Meta field, // but we want to keep them raw so that we can unmarshal // them ourselves. var rawResult struct { Id *charm.URL Meta map[string]json.RawMessage } path := "/" + id.Path() + "/meta/any" if len(includes) > 0 { path += "?" + strings.Join(includes, "&") } if err := c.Get(path, &rawResult); err != nil { return nil, errgo.NoteMask(err, fmt.Sprintf("cannot get %q", path), isAPIError) } // Note that the server is not required to send back values // for all fields. "If there is no metadata for the given meta path, the // element will be omitted" // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaany for name, r := range rawResult.Meta { v, ok := results[name] if !ok { // The server has produced a result that we // don't know about. Ignore it. continue } // Unmarshal the raw JSON into the final struct field. err := json.Unmarshal(r, v.Interface()) if err != nil { return nil, errgo.Notef(err, "cannot unmarshal %s", name) } } return rawResult.Id, nil } // hyphenate returns the hyphenated version of the given // field name, as specified in the Client.Meta method. func hyphenate(s string) string { // TODO hyphenate FooHTTPBar as foo-http-bar? var buf bytes.Buffer var prevLower bool for _, r := range s { if !unicode.IsUpper(r) { prevLower = true buf.WriteRune(r) continue } if prevLower { buf.WriteRune('-') } buf.WriteRune(unicode.ToLower(r)) prevLower = false } return buf.String() } // Get makes a GET request to the given path in the charm store (not // including the host name or version prefix but including a leading /), // parsing the result as JSON into the given result value, which should // be a pointer to the expected data, but may be nil if no result is // desired. func (c *Client) Get(path string, result interface{}) error { req, err := http.NewRequest("GET", "", nil) if err != nil { return errgo.Notef(err, "cannot make new request") } resp, err := c.Do(req, path) if err != nil { return errgo.Mask(err, isAPIError) } defer resp.Body.Close() // Parse the response. if err := parseResponseBody(resp.Body, result); err != nil { return errgo.Mask(err) } return nil } // Put makes a PUT request to the given path in the charm store // (not including the host name or version prefix, but including a leading /), // marshaling the given value as JSON to use as the request body. func (c *Client) Put(path string, val interface{}) error { return c.PutWithResponse(path, val, nil) } // PutWithResponse makes a PUT request to the given path in the charm store // (not including the host name or version prefix, but including a leading /), // marshaling the given value as JSON to use as the request body. Additionally, // this method parses the result as JSON into the given result value, which // should be a pointer to the expected data, but may be nil if no result is // desired. func (c *Client) PutWithResponse(path string, val, result interface{}) error { req, _ := http.NewRequest("PUT", "", nil) req.Header.Set("Content-Type", "application/json") data, err := json.Marshal(val) if err != nil { return errgo.Notef(err, "cannot marshal PUT body") } body := bytes.NewReader(data) resp, err := c.DoWithBody(req, path, body) if err != nil { return errgo.Mask(err, isAPIError) } defer resp.Body.Close() // Parse the response. if err := parseResponseBody(resp.Body, result); err != nil { return errgo.Mask(err) } return nil } func parseResponseBody(body io.Reader, result interface{}) error { data, err := ioutil.ReadAll(body) if err != nil { return errgo.Notef(err, "cannot read response body") } if result == nil { // The caller doesn't care about the response body. return nil } if err := json.Unmarshal(data, result); err != nil { return errgo.Notef(err, "cannot unmarshal response %q", sizeLimit(data)) } return nil } // DoWithBody is like Do except that the given body is used // as the body of the HTTP request. // // Any error returned from the underlying httpbakery.DoWithBody // request will have an unchanged error cause. func (c *Client) DoWithBody(req *http.Request, path string, body io.ReadSeeker) (*http.Response, error) { if c.params.User != "" { userPass := c.params.User + ":" + c.params.Password authBasic := base64.StdEncoding.EncodeToString([]byte(userPass)) req.Header.Set("Authorization", "Basic "+authBasic) } // Prepare the request. if !strings.HasPrefix(path, "/") { return nil, errgo.Newf("path %q is not absolute", path) } for k, vv := range c.header { req.Header[k] = append(req.Header[k], vv...) } u, err := url.Parse(c.params.URL + "/" + apiVersion + path) if err != nil { return nil, errgo.Mask(err) } if c.channel != params.NoChannel { values := u.Query() values.Set("channel", string(c.channel)) u.RawQuery = values.Encode() } req.URL = u // Send the request. resp, err := c.bclient.DoWithBody(req, body) if err != nil { return nil, errgo.Mask(err, isAPIError) } if resp.StatusCode == http.StatusOK { return resp, nil } defer resp.Body.Close() // Parse the response error. data, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, errgo.Notef(err, "cannot read response body") } var perr params.Error if err := json.Unmarshal(data, &perr); err != nil { return nil, errgo.Notef(err, "cannot unmarshal error response %q", sizeLimit(data)) } if perr.Message == "" { return nil, errgo.Newf("error response with empty message %s", sizeLimit(data)) } return nil, &perr } // Do makes an arbitrary request to the charm store. // It adds appropriate headers to the given HTTP request, // sends it to the charm store, and returns the resulting // response. Do never returns a response with a status // that is not http.StatusOK. // // The URL field in the request is ignored and overwritten. // // This is a low level method - more specific Client methods // should be used when possible. // // For requests with a body (for example PUT or POST) use DoWithBody // instead. func (c *Client) Do(req *http.Request, path string) (*http.Response, error) { if req.Body != nil { return nil, errgo.New("body unexpectedly provided in http request - use DoWithBody") } return c.DoWithBody(req, path, nil) } func sizeLimit(data []byte) []byte { const max = 1024 if len(data) < max { return data } return append(data[0:max], fmt.Sprintf(" ... [%d bytes omitted]", len(data)-max)...) } // Log sends a log message to the charmstore's log database. func (cs *Client) Log(typ params.LogType, level params.LogLevel, message string, urls ...*charm.URL) error { b, err := json.Marshal(message) if err != nil { return errgo.Notef(err, "cannot marshal log message") } // Prepare and send the log. // TODO (frankban): we might want to buffer logs in order to reduce // requests. logs := []params.Log{{ Data: (*json.RawMessage)(&b), Level: level, Type: typ, URLs: urls, }} b, err = json.Marshal(logs) if err != nil { return errgo.Notef(err, "cannot marshal log message") } req, err := http.NewRequest("POST", "", nil) if err != nil { return errgo.Notef(err, "cannot create log request") } req.Header.Set("Content-Type", "application/json") resp, err := cs.DoWithBody(req, "/log", bytes.NewReader(b)) if err != nil { return errgo.NoteMask(err, "cannot send log message", isAPIError) } resp.Body.Close() return nil } // Login explicitly obtains authorization credentials for the charm store // and stores them in the client's cookie jar. If there was an error // perfoming a login interaction then the error will have a cause of type // *httpbakery.InteractionError. func (cs *Client) Login() error { if err := cs.Get("/delegatable-macaroon", &struct{}{}); err != nil { return errgo.NoteMask(err, "cannot retrieve the authentication macaroon", isAPIError) } return nil } // WhoAmI returns the user and list of groups associated with the macaroon // used to authenticate. func (cs *Client) WhoAmI() (*params.WhoAmIResponse, error) { var response params.WhoAmIResponse if err := cs.Get("/whoami", &response); err != nil { return nil, errgo.Mask(err, isAPIError) } return &response, nil } // Latest returns the most current revision for each of the identified // charms. The revision in the provided charm URLs is ignored. func (cs *Client) Latest(curls []*charm.URL) ([]params.CharmRevision, error) { if len(curls) == 0 { return nil, nil } // Prepare the request to the charm store. urls := make([]string, len(curls)) values := url.Values{} // Include the ignore-auth flag so that non-public results do not generate // an error for the whole request. values.Add("ignore-auth", "1") values.Add("include", "id-revision") values.Add("include", "hash256") for i, curl := range curls { url := curl.WithRevision(-1).String() urls[i] = url values.Add("id", url) } u := url.URL{ Path: "/meta/any", RawQuery: values.Encode(), } // Execute the request and retrieve results. var results map[string]struct { Meta struct { IdRevision params.IdRevisionResponse `json:"id-revision"` Hash256 params.HashResponse `json:"hash256"` } } if err := cs.Get(u.String(), &results); err != nil { return nil, errgo.NoteMask(err, "cannot get metadata from the charm store", isAPIError) } // Build the response. responses := make([]params.CharmRevision, len(curls)) for i, url := range urls { result, found := results[url] if !found { responses[i] = params.CharmRevision{ Err: params.ErrNotFound, } continue } responses[i] = params.CharmRevision{ Revision: result.Meta.IdRevision.Revision, Sha256: result.Meta.Hash256.Sum, } } return responses, nil } // JujuMetadataHTTPHeader is the HTTP header name used to send Juju metadata // attributes to the charm store. const JujuMetadataHTTPHeader = "Juju-Metadata" // IsAuthorizationError reports whether the given error // was returned because authorization was denied for a // charmstore request. func IsAuthorizationError(err error) bool { err = errgo.Cause(err) switch { case httpbakery.IsDischargeError(err): return true case httpbakery.IsInteractionError(err): return true case err == params.ErrUnauthorized: return true } return false } func isAPIError(err error) bool { if err == nil { return false } err = errgo.Cause(err) if _, ok := err.(params.ErrorCode); ok { return true } return IsAuthorizationError(err) } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/csclient/package_test.go0000664000175000017500000000037312672604507025775 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package csclient_test import ( "testing" jujutesting "github.com/juju/testing" ) func TestPackage(t *testing.T) { jujutesting.MgoTestPackage(t, nil) } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/csclient/csclient_test.go0000664000175000017500000015333312703461656026214 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package csclient_test import ( "bytes" "crypto/sha256" "crypto/sha512" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/http/httptest" neturl "net/url" "os" "reflect" "strings" "time" jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charm.v6-unstable/resource" "gopkg.in/juju/charmstore.v5-unstable" "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/bakerytest" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/macaroon.v1" "gopkg.in/mgo.v2" "gopkg.in/juju/charmrepo.v2-unstable/csclient" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" charmtesting "gopkg.in/juju/charmrepo.v2-unstable/testing" ) var charmRepo = charmtesting.NewRepo("../internal/test-charm-repo", "quantal") // Define fake attributes to be used in tests. var fakeContent, fakeHash, fakeSize = func() (string, string, int64) { content := "fake content" h := sha512.New384() h.Write([]byte(content)) return content, fmt.Sprintf("%x", h.Sum(nil)), int64(len(content)) }() type suite struct { jujutesting.IsolatedMgoSuite client *csclient.Client srv *httptest.Server handler charmstore.HTTPCloseHandler serverParams charmstore.ServerParams discharge func(cond, arg string) ([]checkers.Caveat, error) } var _ = gc.Suite(&suite{}) func (s *suite) SetUpTest(c *gc.C) { s.IsolatedMgoSuite.SetUpTest(c) s.startServer(c, s.Session) s.client = csclient.New(csclient.Params{ URL: s.srv.URL, User: s.serverParams.AuthUsername, Password: s.serverParams.AuthPassword, }) } func (s *suite) TearDownTest(c *gc.C) { s.srv.Close() s.handler.Close() s.IsolatedMgoSuite.TearDownTest(c) } func (s *suite) startServer(c *gc.C, session *mgo.Session) { s.discharge = func(cond, arg string) ([]checkers.Caveat, error) { return nil, fmt.Errorf("no discharge") } discharger := bakerytest.NewDischarger(nil, func(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { return s.discharge(cond, arg) }) serverParams := charmstore.ServerParams{ AuthUsername: "test-user", AuthPassword: "test-password", IdentityLocation: discharger.Service.Location(), PublicKeyLocator: discharger, } db := session.DB("charmstore") handler, err := charmstore.NewServer(db, nil, "", serverParams, charmstore.V5) c.Assert(err, gc.IsNil) s.handler = handler s.srv = httptest.NewServer(handler) s.serverParams = serverParams } func (s *suite) TestNewWithBakeryClient(c *gc.C) { // Make a csclient.Client with a custom bakery client that // enables us to tell if that's really being used. bclient := httpbakery.NewClient() acquired := false bclient.DischargeAcquirer = dischargeAcquirerFunc(func(firstPartyLocation string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { acquired = true return bclient.AcquireDischarge(firstPartyLocation, cav) }) client := csclient.New(csclient.Params{ URL: s.srv.URL, BakeryClient: bclient, }) s.discharge = func(cond, arg string) ([]checkers.Caveat, error) { return []checkers.Caveat{checkers.DeclaredCaveat("username", "bob")}, nil } err := client.UploadCharmWithRevision( charm.MustParseURL("~bob/precise/wordpress-0"), charmRepo.CharmDir("wordpress"), 42, ) c.Assert(err, gc.IsNil) c.Assert(acquired, gc.Equals, true) } func (s *suite) TestIsAuthorizationError(c *gc.C) { bclient := httpbakery.NewClient() client := csclient.New(csclient.Params{ URL: s.srv.URL, BakeryClient: bclient, }) doSomething := func() error { // Make a request that requires a discharge, which will be denied. err := client.UploadCharmWithRevision( charm.MustParseURL("~bob/precise/wordpress-0"), charmRepo.CharmDir("wordpress"), 42, ) return errgo.Mask(err, errgo.Any) } err := doSomething() c.Assert(err, gc.ErrorMatches, `cannot log in: cannot retrieve the authentication macaroon: cannot get discharge from "https://.*": third party refused discharge: cannot discharge: no discharge`) c.Assert(err, jc.Satisfies, csclient.IsAuthorizationError, gc.Commentf("cause type %T", errgo.Cause(err))) // Make a request that requires an interaction, which will also be denied. s.discharge = func(cond, arg string) ([]checkers.Caveat, error) { return nil, &httpbakery.Error{ Code: httpbakery.ErrInteractionRequired, Message: "get out more", Info: &httpbakery.ErrorInfo{ VisitURL: "http://0.1.2.3/", WaitURL: "http://0.1.2.3/", }, } } err = doSomething() c.Assert(err, gc.ErrorMatches, `cannot log in: cannot retrieve the authentication macaroon: cannot get discharge from "https://.*": cannot start interactive session: interaction required but not possible`) c.Assert(err, jc.Satisfies, csclient.IsAuthorizationError) // Make a request that is denied because it's with the wrong user. s.discharge = func(cond, arg string) ([]checkers.Caveat, error) { return []checkers.Caveat{checkers.DeclaredCaveat("username", "alice")}, nil } err = doSomething() c.Assert(err, gc.ErrorMatches, `cannot post archive: unauthorized: access denied for user "alice"`) c.Assert(err, jc.Satisfies, csclient.IsAuthorizationError) err = ¶ms.Error{ Message: "hello", Code: params.ErrForbidden, } c.Assert(err, gc.Not(jc.Satisfies), csclient.IsAuthorizationError) } func (s *suite) TestDefaultServerURL(c *gc.C) { // Add a charm used for tests. url := charm.MustParseURL("~charmers/vivid/testing-wordpress-42") err := s.client.UploadCharmWithRevision( url, charmRepo.CharmDir("wordpress"), 42, ) c.Assert(err, gc.IsNil) s.setPublic(c, url) // Patch the default server URL. s.PatchValue(&csclient.ServerURL, s.srv.URL) // Instantiate a client using the default server URL. client := csclient.New(csclient.Params{ User: s.serverParams.AuthUsername, Password: s.serverParams.AuthPassword, }) c.Assert(client.ServerURL(), gc.Equals, s.srv.URL) // Check that the request succeeds. err = client.Get("/vivid/testing-wordpress-42/expand-id", nil) c.Assert(err, gc.IsNil) } func (s *suite) TestSetHTTPHeader(c *gc.C) { var header http.Header srv := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, req *http.Request) { header = req.Header })) defer srv.Close() sendRequest := func(client *csclient.Client) { req, err := http.NewRequest("GET", "", nil) c.Assert(err, jc.ErrorIsNil) _, err = client.Do(req, "/") c.Assert(err, jc.ErrorIsNil) } client := csclient.New(csclient.Params{ URL: srv.URL, }) // Make a first request without custom headers. sendRequest(client) defaultHeaderLen := len(header) // Make a second request adding a couple of custom headers. h := make(http.Header) h.Set("k1", "v1") h.Add("k2", "v2") h.Add("k2", "v3") client.SetHTTPHeader(h) sendRequest(client) c.Assert(header, gc.HasLen, defaultHeaderLen+len(h)) c.Assert(header.Get("k1"), gc.Equals, "v1") c.Assert(header[http.CanonicalHeaderKey("k2")], jc.DeepEquals, []string{"v2", "v3"}) // Make a third request without custom headers. client.SetHTTPHeader(nil) sendRequest(client) c.Assert(header, gc.HasLen, defaultHeaderLen) } var getTests = []struct { about string path string nilResult bool expectResult interface{} expectError string expectErrorCode params.ErrorCode }{{ about: "success", path: "/wordpress/expand-id", expectResult: []params.ExpandedId{{ Id: "cs:utopic/wordpress-42", }}, }, { about: "success with nil result", path: "/wordpress/expand-id", nilResult: true, }, { about: "non-absolute path", path: "wordpress", expectError: `path "wordpress" is not absolute`, }, { about: "URL parse error", path: "/wordpress/%zz", expectError: `parse .*: invalid URL escape "%zz"`, }, { about: "result with error code", path: "/blahblah", expectError: "not found", expectErrorCode: params.ErrNotFound, }} func (s *suite) TestGet(c *gc.C) { ch := charmRepo.CharmDir("wordpress") url := charm.MustParseURL("~charmers/utopic/wordpress-42") err := s.client.UploadCharmWithRevision(url, ch, 42) c.Assert(err, gc.IsNil) s.setPublic(c, url) for i, test := range getTests { c.Logf("test %d: %s", i, test.about) // Send the request. var result json.RawMessage var resultPtr interface{} if !test.nilResult { resultPtr = &result } err = s.client.Get(test.path, resultPtr) // Check the response. if test.expectError != "" { c.Assert(err, gc.ErrorMatches, test.expectError, gc.Commentf("error is %T; %#v", err, err)) c.Assert(result, gc.IsNil) cause := errgo.Cause(err) if code, ok := cause.(params.ErrorCode); ok { c.Assert(code, gc.Equals, test.expectErrorCode) } else { c.Assert(test.expectErrorCode, gc.Equals, params.ErrorCode("")) } continue } c.Assert(err, gc.IsNil) if test.expectResult != nil { c.Assert(string(result), jc.JSONEquals, test.expectResult) } } } var putErrorTests = []struct { about string path string val interface{} expectError string expectErrorCode params.ErrorCode }{{ about: "bad JSON val", path: "/~charmers/utopic/wordpress-42/meta/extra-info/foo", val: make(chan int), expectError: `cannot marshal PUT body: json: unsupported type: chan int`, }, { about: "non-absolute path", path: "wordpress", expectError: `path "wordpress" is not absolute`, }, { about: "URL parse error", path: "/wordpress/%zz", expectError: `parse .*: invalid URL escape "%zz"`, }, { about: "result with error code", path: "/blahblah", expectError: "not found", expectErrorCode: params.ErrNotFound, }} func (s *suite) TestPutError(c *gc.C) { url := charm.MustParseURL("~charmers/utopic/wordpress-42") err := s.client.UploadCharmWithRevision(url, charmRepo.CharmDir("wordpress"), 42) c.Assert(err, gc.IsNil) s.setPublic(c, url) checkErr := func(err error, expectError string, expectErrorCode params.ErrorCode) { c.Assert(err, gc.ErrorMatches, expectError) cause := errgo.Cause(err) if code, ok := cause.(params.ErrorCode); ok { c.Assert(code, gc.Equals, expectErrorCode) } else { c.Assert(expectErrorCode, gc.Equals, params.ErrorCode("")) } } var result string for i, test := range putErrorTests { c.Logf("test %d: %s", i, test.about) err := s.client.Put(test.path, test.val) checkErr(err, test.expectError, test.expectErrorCode) err = s.client.PutWithResponse(test.path, test.val, &result) checkErr(err, test.expectError, test.expectErrorCode) c.Assert(result, gc.Equals, "") } } func (s *suite) TestPutSuccess(c *gc.C) { url := charm.MustParseURL("~charmers/utopic/wordpress-42") err := s.client.UploadCharmWithRevision(url, charmRepo.CharmDir("wordpress"), 42) c.Assert(err, gc.IsNil) s.setPublic(c, url) perms := []string{"bob"} err = s.client.Put("/~charmers/utopic/wordpress-42/meta/perm/read", perms) c.Assert(err, gc.IsNil) var got []string err = s.client.Get("/~charmers/utopic/wordpress-42/meta/perm/read", &got) c.Assert(err, gc.IsNil) c.Assert(got, jc.DeepEquals, perms) } func (s *suite) TestPutWithResponseSuccess(c *gc.C) { // There are currently no endpoints that return a response // on PUT, so we'll create a fake server just to test // the PutWithResponse method. handler := func(w http.ResponseWriter, req *http.Request) { io.Copy(w, req.Body) } srv := httptest.NewServer(http.HandlerFunc(handler)) defer srv.Close() client := csclient.New(csclient.Params{ URL: srv.URL, }) sendBody := "hello" var result string err := client.PutWithResponse("/somewhere", sendBody, &result) c.Assert(err, gc.IsNil) c.Assert(result, gc.Equals, sendBody) // Check that the method accepts a nil result. err = client.PutWithResponse("/somewhere", sendBody, nil) c.Assert(err, gc.IsNil) } func (s *suite) TestGetArchive(c *gc.C) { if jujutesting.MgoServer.WithoutV8 { c.Skip("mongo javascript not enabled") } key := s.checkGetArchive(c) // Check that the downloads count for the entity has been updated. s.checkCharmDownloads(c, key, 1) } func (s *suite) TestGetArchiveWithStatsDisabled(c *gc.C) { s.client.DisableStats() key := s.checkGetArchive(c) // Check that the downloads count for the entity has not been updated. s.checkCharmDownloads(c, key, 0) } func (s *suite) TestStatsUpdate(c *gc.C) { if jujutesting.MgoServer.WithoutV8 { c.Skip("mongo javascript not enabled") } key := s.checkGetArchive(c) s.checkCharmDownloads(c, key, 1) err := s.client.StatsUpdate(params.StatsUpdateRequest{ Entries: []params.StatsUpdateEntry{{ CharmReference: charm.MustParseURL("~charmers/utopic/wordpress-42"), Timestamp: time.Now(), Type: params.UpdateDeploy, }}, }) c.Assert(err, gc.IsNil) s.checkCharmDownloads(c, key, 2) } var checkDownloadsAttempt = utils.AttemptStrategy{ Total: 1 * time.Second, Delay: 100 * time.Millisecond, } func (s *suite) checkCharmDownloads(c *gc.C, key string, expect int64) { stableCount := 0 for a := checkDownloadsAttempt.Start(); a.Next(); { count := s.statsForKey(c, key) if count == expect { // Wait for a couple of iterations to make sure that it's stable. if stableCount++; stableCount >= 2 { return } } else { stableCount = 0 } if !a.HasNext() { c.Errorf("unexpected download count for %s, got %d, want %d", key, count, expect) } } } func (s *suite) statsForKey(c *gc.C, key string) int64 { var result []params.Statistic err := s.client.Get("/stats/counter/"+key, &result) c.Assert(err, gc.IsNil) c.Assert(result, gc.HasLen, 1) return result[0].Count } func (s *suite) checkGetArchive(c *gc.C) string { ch := charmRepo.CharmArchive(c.MkDir(), "wordpress") // Open the archive and calculate its hash and size. r, expectHash, expectSize := archiveHashAndSize(c, ch.Path) r.Close() url := charm.MustParseURL("~charmers/utopic/wordpress-42") err := s.client.UploadCharmWithRevision(url, ch, 42) c.Assert(err, gc.IsNil) s.setPublic(c, url) rb, id, hash, size, err := s.client.GetArchive(url) c.Assert(err, gc.IsNil) defer rb.Close() c.Assert(id, jc.DeepEquals, url) c.Assert(hash, gc.Equals, expectHash) c.Assert(size, gc.Equals, expectSize) h := sha512.New384() size, err = io.Copy(h, rb) c.Assert(err, gc.IsNil) c.Assert(size, gc.Equals, expectSize) c.Assert(fmt.Sprintf("%x", h.Sum(nil)), gc.Equals, expectHash) // Return the stats key for the archive download. keys := []string{params.StatsArchiveDownload, "utopic", "wordpress", "charmers", "42"} return strings.Join(keys, ":") } func (s *suite) TestGetArchiveErrorNotFound(c *gc.C) { url := charm.MustParseURL("no-such") r, id, hash, size, err := s.client.GetArchive(url) c.Assert(err, gc.ErrorMatches, `cannot get archive: no matching charm or bundle for cs:no-such`) c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) c.Assert(r, gc.IsNil) c.Assert(id, gc.IsNil) c.Assert(hash, gc.Equals, "") c.Assert(size, gc.Equals, int64(0)) } var getArchiveWithBadResponseTests = []struct { about string response *http.Response error error expectError string }{{ about: "http client Get failure", error: errgo.New("round trip failure"), expectError: "cannot get archive: Get .*: round trip failure", }, { about: "no entity id header", response: &http.Response{ Status: "200 OK", StatusCode: 200, Proto: "HTTP/1.0", ProtoMajor: 1, ProtoMinor: 0, Header: http.Header{ params.ContentHashHeader: {fakeHash}, }, Body: ioutil.NopCloser(strings.NewReader("")), ContentLength: fakeSize, }, expectError: "no " + params.EntityIdHeader + " header found in response", }, { about: "invalid entity id header", response: &http.Response{ Status: "200 OK", StatusCode: 200, Proto: "HTTP/1.0", ProtoMajor: 1, ProtoMinor: 0, Header: http.Header{ params.ContentHashHeader: {fakeHash}, params.EntityIdHeader: {"no:such"}, }, Body: ioutil.NopCloser(strings.NewReader("")), ContentLength: fakeSize, }, expectError: `invalid entity id found in response: charm or bundle URL has invalid schema: "no:such"`, }, { about: "partial entity id header", response: &http.Response{ Status: "200 OK", StatusCode: 200, Proto: "HTTP/1.0", ProtoMajor: 1, ProtoMinor: 0, Header: http.Header{ params.ContentHashHeader: {fakeHash}, params.EntityIdHeader: {"django"}, }, Body: ioutil.NopCloser(strings.NewReader("")), ContentLength: fakeSize, }, expectError: `archive get returned not fully qualified entity id "cs:django"`, }, { about: "no hash header", response: &http.Response{ Status: "200 OK", StatusCode: 200, Proto: "HTTP/1.0", ProtoMajor: 1, ProtoMinor: 0, Header: http.Header{ params.EntityIdHeader: {"cs:utopic/django-42"}, }, Body: ioutil.NopCloser(strings.NewReader("")), ContentLength: fakeSize, }, expectError: "no " + params.ContentHashHeader + " header found in response", }, { about: "no content length", response: &http.Response{ Status: "200 OK", StatusCode: 200, Proto: "HTTP/1.0", ProtoMajor: 1, ProtoMinor: 0, Header: http.Header{ params.ContentHashHeader: {fakeHash}, params.EntityIdHeader: {"cs:utopic/django-42"}, }, Body: ioutil.NopCloser(strings.NewReader("")), ContentLength: -1, }, expectError: "no content length found in response", }} func (s *suite) TestGetArchiveWithBadResponse(c *gc.C) { id := charm.MustParseURL("wordpress") for i, test := range getArchiveWithBadResponseTests { c.Logf("test %d: %s", i, test.about) cl := badResponseClient(test.response, test.error) _, _, _, _, err := cl.GetArchive(id) c.Assert(err, gc.ErrorMatches, test.expectError) } } func (s *suite) TestUploadArchiveWithCharm(c *gc.C) { path := charmRepo.CharmArchivePath(c.MkDir(), "wordpress") // Post the archive. s.checkUploadArchive(c, path, "~charmers/utopic/wordpress", "cs:~charmers/utopic/wordpress-0") // Posting the same archive a second time does not change its resulting id. s.checkUploadArchive(c, path, "~charmers/utopic/wordpress", "cs:~charmers/utopic/wordpress-0") // Posting a different archive to the same URL increases the resulting id // revision. path = charmRepo.CharmArchivePath(c.MkDir(), "mysql") s.checkUploadArchive(c, path, "~charmers/utopic/wordpress", "cs:~charmers/utopic/wordpress-1") } func (s *suite) prepareBundleCharms(c *gc.C) { // Add the charms required by the wordpress-simple bundle to the store. err := s.client.UploadCharmWithRevision( charm.MustParseURL("~charmers/utopic/wordpress-42"), charmRepo.CharmArchive(c.MkDir(), "wordpress"), 42, ) c.Assert(err, gc.IsNil) s.setPublic(c, charm.MustParseURL("~charmers/utopic/wordpress-42")) err = s.client.UploadCharmWithRevision( charm.MustParseURL("~charmers/utopic/mysql-47"), charmRepo.CharmArchive(c.MkDir(), "mysql"), 47, ) c.Assert(err, gc.IsNil) s.setPublic(c, charm.MustParseURL("~charmers/utopic/mysql-47")) } func (s *suite) TestUploadArchiveWithBundle(c *gc.C) { s.prepareBundleCharms(c) path := charmRepo.BundleArchivePath(c.MkDir(), "wordpress-simple") // Post the archive. s.checkUploadArchive(c, path, "~charmers/bundle/wordpress-simple", "cs:~charmers/bundle/wordpress-simple-0") } var uploadArchiveWithBadResponseTests = []struct { about string response *http.Response error error expectError string }{{ about: "http client Post failure", error: errgo.New("round trip failure"), expectError: "cannot post archive: Post .*: round trip failure", }, { about: "invalid JSON in body", response: &http.Response{ Status: "200 OK", StatusCode: 200, Proto: "HTTP/1.0", ProtoMajor: 1, ProtoMinor: 0, Body: ioutil.NopCloser(strings.NewReader("no id here")), ContentLength: 0, }, expectError: `cannot unmarshal response "no id here": .*`, }} func (s *suite) TestUploadArchiveWithBadResponse(c *gc.C) { id := charm.MustParseURL("trusty/wordpress") for i, test := range uploadArchiveWithBadResponseTests { c.Logf("test %d: %s", i, test.about) cl := badResponseClient(test.response, test.error) id, err := csclient.UploadArchive(cl, id, strings.NewReader(fakeContent), fakeHash, fakeSize, -1) c.Assert(id, gc.IsNil) c.Assert(err, gc.ErrorMatches, test.expectError) } } func (s *suite) TestUploadMultiSeriesArchive(c *gc.C) { path := charmRepo.CharmArchivePath(c.MkDir(), "multi-series") s.checkUploadArchive(c, path, "~charmers/wordpress", "cs:~charmers/wordpress-0") } func (s *suite) TestUploadArchiveWithServerError(c *gc.C) { path := charmRepo.CharmArchivePath(c.MkDir(), "wordpress") body, hash, size := archiveHashAndSize(c, path) defer body.Close() // Send an invalid hash so that the server returns an error. url := charm.MustParseURL("~charmers/trusty/wordpress") id, err := csclient.UploadArchive(s.client, url, body, hash+"mismatch", size, -1) c.Assert(id, gc.IsNil) c.Assert(err, gc.ErrorMatches, "cannot post archive: cannot put archive blob: hash mismatch") } func (s *suite) checkUploadArchive(c *gc.C, path, url, expectId string) { // Open the archive and calculate its hash and size. body, hash, size := archiveHashAndSize(c, path) defer body.Close() // Post the archive. id, err := csclient.UploadArchive(s.client, charm.MustParseURL(url), body, hash, size, -1) c.Assert(err, gc.IsNil) c.Assert(id.String(), gc.Equals, expectId) // Ensure the entity has been properly added to the db. r, resultingId, resultingHash, resultingSize, err := s.client.GetArchive(id) c.Assert(err, gc.IsNil) defer r.Close() c.Assert(resultingId, gc.DeepEquals, id) c.Assert(resultingHash, gc.Equals, hash) c.Assert(resultingSize, gc.Equals, size) } func archiveHashAndSize(c *gc.C, path string) (r csclient.ReadSeekCloser, hash string, size int64) { f, err := os.Open(path) c.Assert(err, gc.IsNil) h := sha512.New384() size, err = io.Copy(h, f) c.Assert(err, gc.IsNil) _, err = f.Seek(0, 0) c.Assert(err, gc.IsNil) return f, fmt.Sprintf("%x", h.Sum(nil)), size } func (s *suite) TestUploadCharmDir(c *gc.C) { ch := charmRepo.CharmDir("wordpress") id, err := s.client.UploadCharm(charm.MustParseURL("~charmers/utopic/wordpress"), ch) c.Assert(err, gc.IsNil) c.Assert(id.String(), gc.Equals, "cs:~charmers/utopic/wordpress-0") s.checkUploadCharm(c, id, ch) } func (s *suite) TestUploadCharmArchive(c *gc.C) { ch := charmRepo.CharmArchive(c.MkDir(), "wordpress") id, err := s.client.UploadCharm(charm.MustParseURL("~charmers/trusty/wordpress"), ch) c.Assert(err, gc.IsNil) c.Assert(id.String(), gc.Equals, "cs:~charmers/trusty/wordpress-0") s.checkUploadCharm(c, id, ch) } func (s *suite) TestUploadCharmArchiveWithRevision(c *gc.C) { id := charm.MustParseURL("~charmers/trusty/wordpress-42") err := s.client.UploadCharmWithRevision( id, charmRepo.CharmDir("wordpress"), 10, ) c.Assert(err, gc.IsNil) s.setPublic(c, id) ch := charmRepo.CharmArchive(c.MkDir(), "wordpress") s.checkUploadCharm(c, id, ch) id.User = "" id.Revision = 10 s.checkUploadCharm(c, id, ch) } func (s *suite) TestUploadCharmArchiveWithUnwantedRevision(c *gc.C) { ch := charmRepo.CharmDir("wordpress") _, err := s.client.UploadCharm(charm.MustParseURL("~charmers/bundle/wp-20"), ch) c.Assert(err, gc.ErrorMatches, `revision specified in "cs:~charmers/bundle/wp-20", but should not be specified`) } func (s *suite) TestUploadCharmErrorUnknownType(c *gc.C) { ch := charmRepo.CharmDir("wordpress") unknown := struct { charm.Charm }{ch} id, err := s.client.UploadCharm(charm.MustParseURL("~charmers/trusty/wordpress"), unknown) c.Assert(err, gc.ErrorMatches, `cannot open charm archive: cannot get the archive for entity type .*`) c.Assert(id, gc.IsNil) } func (s *suite) TestUploadCharmErrorOpenArchive(c *gc.C) { // Since the internal code path is shared between charms and bundles, just // using a charm for this test also exercises the same failure for bundles. ch := charmRepo.CharmArchive(c.MkDir(), "wordpress") ch.Path = "no-such-file" id, err := s.client.UploadCharm(charm.MustParseURL("trusty/wordpress"), ch) c.Assert(err, gc.ErrorMatches, `cannot open charm archive: open no-such-file: no such file or directory`) c.Assert(id, gc.IsNil) } func (s *suite) TestUploadCharmErrorArchiveTo(c *gc.C) { // Since the internal code path is shared between charms and bundles, just // using a charm for this test also exercises the same failure for bundles. id, err := s.client.UploadCharm(charm.MustParseURL("trusty/wordpress"), failingArchiverTo{}) c.Assert(err, gc.ErrorMatches, `cannot open charm archive: cannot create entity archive: bad wolf`) c.Assert(id, gc.IsNil) } type failingArchiverTo struct { charm.Charm } func (failingArchiverTo) ArchiveTo(io.Writer) error { return errgo.New("bad wolf") } func (s *suite) checkUploadCharm(c *gc.C, id *charm.URL, ch charm.Charm) { r, _, _, _, err := s.client.GetArchive(id) c.Assert(err, gc.IsNil) data, err := ioutil.ReadAll(r) c.Assert(err, gc.IsNil) result, err := charm.ReadCharmArchiveBytes(data) c.Assert(err, gc.IsNil) // Comparing the charm metadata is sufficient for ensuring the result is // the same charm previously uploaded. c.Assert(result.Meta(), jc.DeepEquals, ch.Meta()) } func (s *suite) TestUploadBundleDir(c *gc.C) { s.prepareBundleCharms(c) b := charmRepo.BundleDir("wordpress-simple") id, err := s.client.UploadBundle(charm.MustParseURL("~charmers/bundle/wordpress-simple"), b) c.Assert(err, gc.IsNil) c.Assert(id.String(), gc.Equals, "cs:~charmers/bundle/wordpress-simple-0") s.checkUploadBundle(c, id, b) } func (s *suite) TestUploadBundleArchive(c *gc.C) { s.prepareBundleCharms(c) path := charmRepo.BundleArchivePath(c.MkDir(), "wordpress-simple") b, err := charm.ReadBundleArchive(path) c.Assert(err, gc.IsNil) id, err := s.client.UploadBundle(charm.MustParseURL("~charmers/bundle/wp"), b) c.Assert(err, gc.IsNil) c.Assert(id.String(), gc.Equals, "cs:~charmers/bundle/wp-0") s.checkUploadBundle(c, id, b) } func (s *suite) TestUploadBundleArchiveWithUnwantedRevision(c *gc.C) { s.prepareBundleCharms(c) path := charmRepo.BundleArchivePath(c.MkDir(), "wordpress-simple") b, err := charm.ReadBundleArchive(path) c.Assert(err, gc.IsNil) _, err = s.client.UploadBundle(charm.MustParseURL("~charmers/bundle/wp-20"), b) c.Assert(err, gc.ErrorMatches, `revision specified in "cs:~charmers/bundle/wp-20", but should not be specified`) } func (s *suite) TestUploadBundleArchiveWithRevision(c *gc.C) { s.prepareBundleCharms(c) path := charmRepo.BundleArchivePath(c.MkDir(), "wordpress-simple") b, err := charm.ReadBundleArchive(path) c.Assert(err, gc.IsNil) id := charm.MustParseURL("~charmers/bundle/wp-22") err = s.client.UploadBundleWithRevision(id, b, 34) c.Assert(err, gc.IsNil) s.checkUploadBundle(c, id, b) id.User = "" id.Revision = 34 s.checkUploadBundle(c, id, b) } func (s *suite) TestUploadBundleErrorUploading(c *gc.C) { // Uploading without specifying the series should return an error. // Note that the possible upload errors are already extensively exercised // as part of the client.uploadArchive tests. id, err := s.client.UploadBundle( charm.MustParseURL("~charmers/wordpress-simple"), charmRepo.BundleDir("wordpress-simple"), ) c.Assert(err, gc.ErrorMatches, `cannot post archive: cannot read charm archive: archive file "metadata.yaml" not found`) c.Assert(id, gc.IsNil) } func (s *suite) TestUploadBundleErrorUnknownType(c *gc.C) { b := charmRepo.BundleDir("wordpress-simple") unknown := struct { charm.Bundle }{b} id, err := s.client.UploadBundle(charm.MustParseURL("bundle/wordpress"), unknown) c.Assert(err, gc.ErrorMatches, `cannot open bundle archive: cannot get the archive for entity type .*`) c.Assert(id, gc.IsNil) } func (s *suite) checkUploadBundle(c *gc.C, id *charm.URL, b charm.Bundle) { r, _, _, _, err := s.client.GetArchive(id) c.Assert(err, gc.IsNil) data, err := ioutil.ReadAll(r) c.Assert(err, gc.IsNil) result, err := charm.ReadBundleArchiveBytes(data) c.Assert(err, gc.IsNil) // Comparing the bundle data is sufficient for ensuring the result is // the same bundle previously uploaded. c.Assert(result.Data(), jc.DeepEquals, b.Data()) } func (s *suite) TestDoAuthorization(c *gc.C) { // Add a charm to be deleted. err := s.client.UploadCharmWithRevision( charm.MustParseURL("~charmers/utopic/wordpress-42"), charmRepo.CharmArchive(c.MkDir(), "wordpress"), 42, ) c.Assert(err, gc.IsNil) // Check that when we use incorrect authorization, // we get an error trying to set the charm's extra-info. client := csclient.New(csclient.Params{ URL: s.srv.URL, User: s.serverParams.AuthUsername, Password: "bad password", }) req, err := http.NewRequest("PUT", "", nil) c.Assert(err, gc.IsNil) _, err = client.Do(req, "/~charmers/utopic/wordpress-42/meta/extra-info/foo") c.Assert(err, gc.ErrorMatches, "invalid user name or password") c.Assert(errgo.Cause(err), gc.Equals, params.ErrUnauthorized) client = csclient.New(csclient.Params{ URL: s.srv.URL, User: s.serverParams.AuthUsername, Password: s.serverParams.AuthPassword, }) // Check that the charm is still there. err = client.Get("/~charmers/utopic/wordpress-42/expand-id", nil) c.Assert(err, gc.IsNil) // Then check that when we use the correct authorization, // the delete succeeds. req, err = http.NewRequest("PUT", "", nil) c.Assert(err, gc.IsNil) req.Header.Set("Content-Type", "application/json") resp, err := client.DoWithBody(req, "/~charmers/utopic/wordpress-42/meta/extra-info/foo", strings.NewReader(`"hello"`)) c.Assert(err, gc.IsNil) resp.Body.Close() // Check that it's really changed. var val string err = client.Get("/utopic/wordpress-42/meta/extra-info/foo", &val) c.Assert(err, gc.IsNil) c.Assert(val, gc.Equals, "hello") } var getWithBadResponseTests = []struct { about string error error response *http.Response responseErr error expectError string }{{ about: "http client Get failure", error: errgo.New("round trip failure"), expectError: "Get .*: round trip failure", }, { about: "body read error", response: &http.Response{ Status: "200 OK", StatusCode: 200, Proto: "HTTP/1.0", ProtoMajor: 1, ProtoMinor: 0, Body: ioutil.NopCloser(&errorReader{"body read error"}), ContentLength: -1, }, expectError: "cannot read response body: body read error", }, { about: "badly formatted json response", response: &http.Response{ Status: "200 OK", StatusCode: 200, Proto: "HTTP/1.0", ProtoMajor: 1, ProtoMinor: 0, Body: ioutil.NopCloser(strings.NewReader("bad")), ContentLength: -1, }, expectError: `cannot unmarshal response "bad": .*`, }, { about: "badly formatted json error", response: &http.Response{ Status: "404 Not found", StatusCode: 404, Proto: "HTTP/1.0", ProtoMajor: 1, ProtoMinor: 0, Body: ioutil.NopCloser(strings.NewReader("bad")), ContentLength: -1, }, expectError: `cannot unmarshal error response "bad": .*`, }, { about: "error response with empty message", response: &http.Response{ Status: "404 Not found", StatusCode: 404, Proto: "HTTP/1.0", ProtoMajor: 1, ProtoMinor: 0, Body: ioutil.NopCloser(bytes.NewReader(mustMarshalJSON(¶ms.Error{ Code: "foo", }))), ContentLength: -1, }, expectError: "error response with empty message .*", }} func (s *suite) TestGetWithBadResponse(c *gc.C) { for i, test := range getWithBadResponseTests { c.Logf("test %d: %s", i, test.about) cl := badResponseClient(test.response, test.error) var result interface{} err := cl.Get("/foo", &result) c.Assert(err, gc.ErrorMatches, test.expectError) } } func badResponseClient(resp *http.Response, err error) *csclient.Client { client := httpbakery.NewHTTPClient() client.Transport = &cannedRoundTripper{ resp: resp, error: err, } return csclient.New(csclient.Params{ URL: "http://0.1.2.3", User: "bob", HTTPClient: client, }) } var hyphenateTests = []struct { val string expect string }{{ val: "Hello", expect: "hello", }, { val: "HelloThere", expect: "hello-there", }, { val: "HelloHTTP", expect: "hello-http", }, { val: "helloHTTP", expect: "hello-http", }, { val: "hellothere", expect: "hellothere", }, { val: "Long4Camel32WithDigits45", expect: "long4-camel32-with-digits45", }, { // The result here is equally dubious, but Go identifiers // should not contain underscores. val: "With_Dubious_Underscore", expect: "with_-dubious_-underscore", }} func (s *suite) TestHyphenate(c *gc.C) { for i, test := range hyphenateTests { c.Logf("test %d. %q", i, test.val) c.Assert(csclient.Hyphenate(test.val), gc.Equals, test.expect) } } func (s *suite) TestDo(c *gc.C) { // Do is tested fairly comprehensively (but indirectly) // in TestGet, so just a trivial smoke test here. url := charm.MustParseURL("~charmers/utopic/wordpress-42") err := s.client.UploadCharmWithRevision( url, charmRepo.CharmArchive(c.MkDir(), "wordpress"), 42, ) c.Assert(err, gc.IsNil) s.setPublic(c, url) err = s.client.PutExtraInfo(url, map[string]interface{}{ "foo": "bar", }) c.Assert(err, gc.IsNil) req, _ := http.NewRequest("GET", "", nil) resp, err := s.client.Do(req, "/wordpress/meta/extra-info/foo") c.Assert(err, gc.IsNil) defer resp.Body.Close() data, err := ioutil.ReadAll(resp.Body) c.Assert(err, gc.IsNil) c.Assert(string(data), gc.Equals, `"bar"`) } func (s *suite) TestWithChannel(c *gc.C) { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { fmt.Fprint(w, req.URL.Query().Encode()) })) client := csclient.New(csclient.Params{ URL: srv.URL, }) makeRequest := func(client *csclient.Client) string { req, err := http.NewRequest("GET", "", nil) c.Assert(err, jc.ErrorIsNil) resp, err := client.DoWithBody(req, "/", nil) c.Assert(err, jc.ErrorIsNil) c.Assert(resp.StatusCode, gc.Equals, http.StatusOK) b, err := ioutil.ReadAll(resp.Body) c.Assert(err, jc.ErrorIsNil) return string(b) } c.Assert(makeRequest(client), gc.Equals, "") devClient := client.WithChannel(params.DevelopmentChannel) c.Assert(makeRequest(devClient), gc.Equals, "channel=development") // Ensure the original client has not been mutated. c.Assert(makeRequest(client), gc.Equals, "") } var metaBadTypeTests = []struct { result interface{} expectError string }{{ result: "", expectError: "expected pointer, not string", }, { result: new(string), expectError: `expected pointer to struct, not \*string`, }, { result: new(struct{ Embed }), expectError: "anonymous fields not supported", }, { expectError: "expected valid result pointer, not nil", }} func (s *suite) TestMetaBadType(c *gc.C) { id := charm.MustParseURL("wordpress") for _, test := range metaBadTypeTests { _, err := s.client.Meta(id, test.result) c.Assert(err, gc.ErrorMatches, test.expectError) } } type Embed struct{} type embed struct{} func (s *suite) TestMeta(c *gc.C) { ch := charmRepo.CharmDir("wordpress") url := charm.MustParseURL("~charmers/utopic/wordpress-42") purl := charm.MustParseURL("utopic/wordpress-42") err := s.client.UploadCharmWithRevision(url, ch, 42) c.Assert(err, gc.IsNil) s.setPublic(c, url) // Put some extra-info. err = s.client.PutExtraInfo(url, map[string]interface{}{ "attr": "value", }) c.Assert(err, gc.IsNil) tests := []struct { about string id string expectResult interface{} expectError string expectErrorCode params.ErrorCode }{{ about: "no fields", id: "utopic/wordpress", expectResult: &struct{}{}, }, { about: "single field", id: "utopic/wordpress", expectResult: &struct { CharmMetadata *charm.Meta }{ CharmMetadata: ch.Meta(), }, }, { about: "three fields", id: "wordpress", expectResult: &struct { CharmMetadata *charm.Meta CharmConfig *charm.Config ExtraInfo map[string]string }{ CharmMetadata: ch.Meta(), CharmConfig: ch.Config(), ExtraInfo: map[string]string{"attr": "value"}, }, }, { about: "tagged field", id: "wordpress", expectResult: &struct { Foo *charm.Meta `csclient:"charm-metadata"` Attr string `csclient:"extra-info/attr"` }{ Foo: ch.Meta(), Attr: "value", }, }, { about: "id not found", id: "bogus", expectResult: &struct{}{}, expectError: `cannot get "/bogus/meta/any": no matching charm or bundle for cs:bogus`, expectErrorCode: params.ErrNotFound, }, { about: "unmarshal into invalid type", id: "wordpress", expectResult: new(struct { CharmMetadata []string }), expectError: `cannot unmarshal charm-metadata: json: cannot unmarshal object into Go value of type \[]string`, }, { about: "unmarshal into struct with unexported fields", id: "wordpress", expectResult: &struct { unexported int CharmMetadata *charm.Meta // Embedded anonymous fields don't get tagged as unexported // due to https://code.google.com/p/go/issues/detail?id=7247 // TODO fix in go 1.5. // embed }{ CharmMetadata: ch.Meta(), }, }, { about: "metadata not appropriate for charm", id: "wordpress", expectResult: &struct { CharmMetadata *charm.Meta BundleMetadata *charm.BundleData }{ CharmMetadata: ch.Meta(), }, }} for i, test := range tests { c.Logf("test %d: %s", i, test.about) // Make a result value of the same type as the expected result, // but empty. result := reflect.New(reflect.TypeOf(test.expectResult).Elem()).Interface() id, err := s.client.Meta(charm.MustParseURL(test.id), result) if test.expectError != "" { c.Assert(err, gc.ErrorMatches, test.expectError) if code, ok := errgo.Cause(err).(params.ErrorCode); ok { c.Assert(code, gc.Equals, test.expectErrorCode) } else { c.Assert(test.expectErrorCode, gc.Equals, params.ErrorCode("")) } c.Assert(id, gc.IsNil) continue } c.Assert(err, gc.IsNil) c.Assert(id, jc.DeepEquals, purl) c.Assert(result, jc.DeepEquals, test.expectResult) } } func (s *suite) TestPutExtraInfo(c *gc.C) { s.checkPutInfo(c, false) } func (s *suite) TestPutCommonInfo(c *gc.C) { s.checkPutInfo(c, true) } func (s *suite) checkPutInfo(c *gc.C, common bool) { ch := charmRepo.CharmDir("wordpress") url := charm.MustParseURL("~charmers/utopic/wordpress-42") err := s.client.UploadCharmWithRevision(url, ch, 42) c.Assert(err, gc.IsNil) s.setPublic(c, url) // Put some info in. info := map[string]interface{}{ "attr1": "value1", "attr2": []interface{}{"one", "two"}, } if common { err = s.client.PutCommonInfo(url, info) c.Assert(err, gc.IsNil) } else { err = s.client.PutExtraInfo(url, info) c.Assert(err, gc.IsNil) } // Verify that we get it back OK. var valExtraInfo struct { ExtraInfo map[string]interface{} } var valCommonInfo struct { CommonInfo map[string]interface{} } if common { _, err = s.client.Meta(url, &valCommonInfo) c.Assert(err, gc.IsNil) c.Assert(valCommonInfo.CommonInfo, jc.DeepEquals, info) } else { _, err = s.client.Meta(url, &valExtraInfo) c.Assert(err, gc.IsNil) c.Assert(valExtraInfo.ExtraInfo, jc.DeepEquals, info) } // Put some more in. if common { err = s.client.PutCommonInfo(url, map[string]interface{}{ "attr3": "three", }) c.Assert(err, gc.IsNil) } else { err = s.client.PutExtraInfo(url, map[string]interface{}{ "attr3": "three", }) c.Assert(err, gc.IsNil) } // Verify that we get all the previous results and the new value. info["attr3"] = "three" if common { _, err = s.client.Meta(url, &valCommonInfo) c.Assert(err, gc.IsNil) c.Assert(valCommonInfo.CommonInfo, jc.DeepEquals, info) } else { _, err = s.client.Meta(url, &valExtraInfo) c.Assert(err, gc.IsNil) c.Assert(valExtraInfo.ExtraInfo, jc.DeepEquals, info) } } func (s *suite) TestPutExtraInfoWithError(c *gc.C) { err := s.client.PutExtraInfo(charm.MustParseURL("wordpress"), map[string]interface{}{"attr": "val"}) c.Assert(err, gc.ErrorMatches, `no matching charm or bundle for cs:wordpress`) c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) } func (s *suite) TestPutCommonInfoWithError(c *gc.C) { err := s.client.PutCommonInfo(charm.MustParseURL("wordpress"), map[string]interface{}{"homepage": "val"}) c.Assert(err, gc.ErrorMatches, `no matching charm or bundle for cs:wordpress`) c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) } type errorReader struct { error string } func (e *errorReader) Read(buf []byte) (int, error) { return 0, errgo.New(e.error) } type cannedRoundTripper struct { resp *http.Response error error } func (r *cannedRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { return r.resp, r.error } func mustMarshalJSON(x interface{}) []byte { data, err := json.Marshal(x) if err != nil { panic(err) } return data } func (s *suite) TestLog(c *gc.C) { logs := []struct { typ params.LogType level params.LogLevel message string urls []*charm.URL }{{ typ: params.IngestionType, level: params.InfoLevel, message: "ingestion info", urls: nil, }, { typ: params.LegacyStatisticsType, level: params.ErrorLevel, message: "statistics error", urls: []*charm.URL{ charm.MustParseURL("cs:mysql"), charm.MustParseURL("cs:wordpress"), }, }} for _, log := range logs { err := s.client.Log(log.typ, log.level, log.message, log.urls...) c.Assert(err, gc.IsNil) } var result []*params.LogResponse err := s.client.Get("/log", &result) c.Assert(err, gc.IsNil) c.Assert(result, gc.HasLen, len(logs)) for i, l := range result { c.Assert(l.Type, gc.Equals, logs[len(logs)-(1+i)].typ) c.Assert(l.Level, gc.Equals, logs[len(logs)-(1+i)].level) var msg string err := json.Unmarshal([]byte(l.Data), &msg) c.Assert(err, gc.IsNil) c.Assert(msg, gc.Equals, logs[len(logs)-(1+i)].message) c.Assert(l.URLs, jc.DeepEquals, logs[len(logs)-(1+i)].urls) } } func (s *suite) TestMacaroonAuthorization(c *gc.C) { ch := charmRepo.CharmDir("wordpress") curl := charm.MustParseURL("~charmers/utopic/wordpress-42") purl := charm.MustParseURL("utopic/wordpress-42") err := s.client.UploadCharmWithRevision(curl, ch, 42) c.Assert(err, gc.IsNil) err = s.client.Put("/"+curl.Path()+"/meta/perm/read", []string{"bob"}) c.Assert(err, gc.IsNil) // Create a client without basic auth credentials client := csclient.New(csclient.Params{ URL: s.srv.URL, }) var result struct{ IdRevision struct{ Revision int } } // TODO 2015-01-23: once supported, rewrite the test using POST requests. _, err = client.Meta(purl, &result) c.Assert(err, gc.ErrorMatches, `cannot get "/utopic/wordpress-42/meta/any\?include=id-revision": cannot get discharge from ".*": third party refused discharge: cannot discharge: no discharge`) c.Assert(httpbakery.IsDischargeError(errgo.Cause(err)), gc.Equals, true) s.discharge = func(cond, arg string) ([]checkers.Caveat, error) { return []checkers.Caveat{checkers.DeclaredCaveat("username", "bob")}, nil } _, err = client.Meta(curl, &result) c.Assert(err, gc.IsNil) c.Assert(result.IdRevision.Revision, gc.Equals, curl.Revision) visitURL := "http://0.1.2.3/visitURL" s.discharge = func(cond, arg string) ([]checkers.Caveat, error) { return nil, &httpbakery.Error{ Code: httpbakery.ErrInteractionRequired, Message: "interaction required", Info: &httpbakery.ErrorInfo{ VisitURL: visitURL, WaitURL: "http://0.1.2.3/waitURL", }} } client = csclient.New(csclient.Params{ URL: s.srv.URL, VisitWebPage: func(vurl *neturl.URL) error { c.Check(vurl.String(), gc.Equals, visitURL) return fmt.Errorf("stopping interaction") }}) _, err = client.Meta(purl, &result) c.Assert(err, gc.ErrorMatches, `cannot get "/utopic/wordpress-42/meta/any\?include=id-revision": cannot get discharge from ".*": cannot start interactive session: stopping interaction`) c.Assert(result.IdRevision.Revision, gc.Equals, curl.Revision) c.Assert(httpbakery.IsInteractionError(errgo.Cause(err)), gc.Equals, true) } func (s *suite) TestLogin(c *gc.C) { ch := charmRepo.CharmDir("wordpress") url := charm.MustParseURL("~charmers/utopic/wordpress-42") purl := charm.MustParseURL("utopic/wordpress-42") err := s.client.UploadCharmWithRevision(url, ch, 42) c.Assert(err, gc.IsNil) err = s.client.Put("/"+url.Path()+"/meta/perm/read", []string{"bob"}) c.Assert(err, gc.IsNil) httpClient := httpbakery.NewHTTPClient() client := csclient.New(csclient.Params{ URL: s.srv.URL, HTTPClient: httpClient, }) var result struct{ IdRevision struct{ Revision int } } _, err = client.Meta(purl, &result) c.Assert(err, gc.NotNil) // Try logging in when the discharger fails. err = client.Login() c.Assert(err, gc.ErrorMatches, `cannot retrieve the authentication macaroon: cannot get discharge from ".*": third party refused discharge: cannot discharge: no discharge`) // Allow the discharge. s.discharge = func(cond, arg string) ([]checkers.Caveat, error) { return []checkers.Caveat{checkers.DeclaredCaveat("username", "bob")}, nil } err = client.Login() c.Assert(err, gc.IsNil) // Change discharge so that we're sure the cookies are being // used rather than the discharge mechanism. s.discharge = func(cond, arg string) ([]checkers.Caveat, error) { return nil, fmt.Errorf("no discharge") } // Check that the request still works. _, err = client.Meta(purl, &result) c.Assert(err, gc.IsNil) c.Assert(result.IdRevision.Revision, gc.Equals, url.Revision) // Check that we've got one cookie. srvURL, err := neturl.Parse(s.srv.URL) c.Assert(err, gc.IsNil) c.Assert(httpClient.Jar.Cookies(srvURL), gc.HasLen, 1) // Log in again. err = client.Login() c.Assert(err, gc.IsNil) // Check that we still only have one cookie. c.Assert(httpClient.Jar.Cookies(srvURL), gc.HasLen, 1) } func (s *suite) TestWhoAmI(c *gc.C) { httpClient := httpbakery.NewHTTPClient() client := csclient.New(csclient.Params{ URL: s.srv.URL, HTTPClient: httpClient, }) response, err := client.WhoAmI() c.Assert(err, gc.ErrorMatches, `cannot get discharge from ".*": third party refused discharge: cannot discharge: no discharge`) s.discharge = func(cond, arg string) ([]checkers.Caveat, error) { return []checkers.Caveat{checkers.DeclaredCaveat("username", "bob")}, nil } response, err = client.WhoAmI() c.Assert(err, gc.IsNil) c.Assert(response.User, gc.Equals, "bob") } func (s *suite) TestPublish(c *gc.C) { id := charm.MustParseURL("cs:~who/trusty/mysql") ch := charmRepo.CharmArchive(c.MkDir(), "mysql") // Upload the charm. url, err := s.client.UploadCharm(id, ch) c.Assert(err, gc.IsNil) // have to make a new repo from the client, since the embedded repo is not // authenticated. err = s.client.Publish(url, []params.Channel{params.DevelopmentChannel}, nil) c.Assert(err, jc.ErrorIsNil) client := s.client.WithChannel(params.DevelopmentChannel) err = client.Get("/"+url.Path()+"/meta/id", nil) c.Assert(err, jc.ErrorIsNil) client = s.client.WithChannel(params.StableChannel) err = client.Get("/"+url.Path()+"/meta/id", nil) c.Assert(err, gc.ErrorMatches, ".*not found in stable channel") } func (s *suite) TestPublishNoChannel(c *gc.C) { id := charm.MustParseURL("cs:~who/trusty/mysql") err := s.client.Publish(id, nil, nil) c.Assert(err, jc.ErrorIsNil) } func (s *suite) TestUploadResource(c *gc.C) { ch := charmtesting.NewCharmMeta(&charm.Meta{ Resources: map[string]resource.Meta{ "resname": { Name: "resname", Path: "foo.zip", }, }, }) url, err := s.client.UploadCharm(charm.MustParseURL("cs:~who/trusty/mysql"), ch) c.Assert(err, gc.IsNil) for i := 0; i < 3; i++ { // Upload the resource. data := fmt.Sprintf("boo!%d", i) rev, err := s.client.UploadResource(url, "resname", "data.zip", strings.NewReader(data)) c.Assert(err, jc.ErrorIsNil) c.Assert(rev, gc.Equals, i) // Check that we can download it OK. getResult, err := s.client.GetResource(url, "resname", i) c.Assert(err, jc.ErrorIsNil) defer getResult.Close() expectHash := fmt.Sprintf("%x", sha512.Sum384([]byte(data))) c.Assert(getResult.Hash, gc.Equals, expectHash) c.Assert(getResult.Size, gc.Equals, int64(len(data))) gotData, err := ioutil.ReadAll(getResult) c.Assert(err, jc.ErrorIsNil) c.Assert(string(gotData), gc.Equals, data) } } func (s *suite) TestListResources(c *gc.C) { ch := charmtesting.NewCharmMeta(&charm.Meta{ Resources: map[string]resource.Meta{ "r1": { Name: "r1", Path: "foo.zip", Description: "r1 description", }, "r2": { Name: "r2", Path: "bar", Description: "r2 description", }, "r3": { Name: "r3", Path: "missing", Description: "r3 description", }, }, }) url := charm.MustParseURL("cs:~who/trusty/mysql") url, err := s.client.UploadCharm(url, ch) c.Assert(err, gc.IsNil) r1content := "r1 content" rev, err := s.client.UploadResource(url, "r1", "data.zip", strings.NewReader(r1content)) c.Assert(err, jc.ErrorIsNil) c.Assert(rev, gc.Equals, 0) r2content := "r2 content" rev, err = s.client.UploadResource(url, "r2", "data", strings.NewReader(r2content)) c.Assert(err, jc.ErrorIsNil) c.Assert(rev, gc.Equals, 0) result, err := s.client.WithChannel(params.UnpublishedChannel).ListResources(url) c.Assert(err, jc.ErrorIsNil) c.Assert(result, jc.DeepEquals, []params.Resource{{ Name: "r1", Type: "file", Path: "foo.zip", Origin: "store", Description: "r1 description", Revision: 0, Fingerprint: resourceHash(r1content), Size: int64(len(r1content)), }, { Name: "r2", Type: "file", Path: "bar", Origin: "store", Description: "r2 description", Revision: 0, Fingerprint: resourceHash(r2content), Size: int64(len(r2content)), }, { Name: "r3", Type: "file", Origin: "store", Path: "missing", Description: "r3 description", Revision: -1, }}) } func resourceHash(s string) []byte { fp, err := resource.GenerateFingerprint(strings.NewReader(s)) if err != nil { panic(err) } return fp.Bytes() } func (s *suite) setPublic(c *gc.C, id *charm.URL) { // Publish to stable. err := s.client.WithChannel(params.UnpublishedChannel).Put("/"+id.Path()+"/publish", ¶ms.PublishRequest{ Channels: []params.Channel{params.StableChannel}, }) c.Assert(err, jc.ErrorIsNil) // Allow read permissions to everyone. err = s.client.WithChannel(params.StableChannel).Put("/"+id.Path()+"/meta/perm/read", []string{params.Everyone}) c.Assert(err, jc.ErrorIsNil) } func (s *suite) TestLatest(c *gc.C) { // Add some charms to the charm store. s.addCharm(c, "~who/trusty/mysql-0", "mysql") s.addCharm(c, "~who/precise/wordpress-1", "wordpress") s.addCharm(c, "~dalek/trusty/riak-0", "riak") s.addCharm(c, "~dalek/trusty/riak-1", "riak") s.addCharm(c, "~dalek/trusty/riak-3", "riak") _, url := s.addCharm(c, "~who/utopic/varnish-0", "varnish") // Change permissions on one of the charms so that it is not readable by // anyone. err := s.client.Put("/"+url.Path()+"/meta/perm/read", []string{"dalek"}) c.Assert(err, jc.ErrorIsNil) // Calculate and store the expected hashes for the uploaded charms. mysqlHash := hashOfCharm(c, "mysql") wordpressHash := hashOfCharm(c, "wordpress") riakHash := hashOfCharm(c, "riak") // Define the tests to be run. tests := []struct { about string urls []*charm.URL revs []params.CharmRevision }{{ about: "no urls", }, { about: "charm not found", urls: []*charm.URL{charm.MustParseURL("cs:trusty/no-such-42")}, revs: []params.CharmRevision{{ Err: params.ErrNotFound, }}, }, { about: "resolve", urls: []*charm.URL{ charm.MustParseURL("cs:~who/trusty/mysql-42"), charm.MustParseURL("cs:~who/trusty/mysql-0"), charm.MustParseURL("cs:~who/trusty/mysql"), }, revs: []params.CharmRevision{{ Revision: 0, Sha256: mysqlHash, }, { Revision: 0, Sha256: mysqlHash, }, { Revision: 0, Sha256: mysqlHash, }}, }, { about: "multiple charms", urls: []*charm.URL{ charm.MustParseURL("cs:~who/precise/wordpress"), charm.MustParseURL("cs:~who/trusty/mysql-47"), charm.MustParseURL("cs:~dalek/trusty/no-such"), charm.MustParseURL("cs:~dalek/trusty/riak-0"), }, revs: []params.CharmRevision{{ Revision: 1, Sha256: wordpressHash, }, { Revision: 0, Sha256: mysqlHash, }, { Err: params.ErrNotFound, }, { Revision: 3, Sha256: riakHash, }}, }, { about: "unauthorized", urls: []*charm.URL{ charm.MustParseURL("cs:~who/precise/wordpress"), url, }, revs: []params.CharmRevision{{ Revision: 1, Sha256: wordpressHash, }, { Err: params.ErrNotFound, }}, }} // Run the tests. client := csclient.New(csclient.Params{ URL: s.srv.URL, }) for i, test := range tests { c.Logf("test %d: %s", i, test.about) revs, err := client.Latest(test.urls) c.Assert(err, jc.ErrorIsNil) c.Check(revs, jc.DeepEquals, test.revs) } } // addCharm uploads a charm a promulgated revision to the testing charm store func (s *suite) addCharm(c *gc.C, urlStr, name string) (charm.Charm, *charm.URL) { id := charm.MustParseURL(urlStr) promulgatedRevision := -1 if id.User == "" { id.User = "who" promulgatedRevision = id.Revision } ch := charmRepo.CharmArchive(c.MkDir(), name) // Upload the charm. err := s.client.UploadCharmWithRevision(id, ch, promulgatedRevision) c.Assert(err, gc.IsNil) // Allow read permissions to everyone. s.setPublic(c, id) return ch, id } // hashOfCharm returns the SHA256 hash sum for the given charm name. func hashOfCharm(c *gc.C, name string) string { path := charmRepo.CharmArchivePath(c.MkDir(), name) return hashOfPath(c, path) } // hashOfPath returns the SHA256 hash sum for the given path. func hashOfPath(c *gc.C, path string) string { f, err := os.Open(path) c.Assert(err, jc.ErrorIsNil) defer f.Close() hash := sha256.New() _, err = io.Copy(hash, f) c.Assert(err, jc.ErrorIsNil) return fmt.Sprintf("%x", hash.Sum(nil)) } type dischargeAcquirerFunc func(firstPartyLocation string, cav macaroon.Caveat) (*macaroon.Macaroon, error) func (f dischargeAcquirerFunc) AcquireDischarge(firstPartyLocation string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { return f(firstPartyLocation, cav) } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/csclient/csclient_resources_test.go0000664000175000017500000000757512703461656030314 0ustar marcomarco// Copyright 2016 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package csclient import ( "bytes" "encoding/json" "io" "io/ioutil" "net/http" "net/url" "strings" "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charm.v6-unstable/resource" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/macaroon.v1" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" ) type ResourceSuite struct{} var _ = gc.Suite(ResourceSuite{}) func (ResourceSuite) TestUploadResource(c *gc.C) { data := []byte("boo!") reader := bytes.NewReader(data) result := params.ResourceUploadResponse{Revision: 1} b, err := json.Marshal(result) c.Assert(err, jc.ErrorIsNil) f := &fakeClient{ Stub: &testing.Stub{}, ReturnDoWithBody: &http.Response{ StatusCode: 200, Body: ioutil.NopCloser(bytes.NewReader(b)), ContentLength: int64(len(b)), }, } client := Client{bclient: f} id := charm.MustParseURL("cs:quantal/starsay") rev, err := client.UploadResource(id, "resname", "data.zip", reader) c.Assert(err, jc.ErrorIsNil) c.Assert(rev, gc.Equals, 1) f.CheckCallNames(c, "DoWithBody") req := f.Calls()[0].Args[0].(*http.Request) body := f.Calls()[0].Args[1].(io.ReadSeeker) hash, size, err := readerHashAndSize(reader) c.Assert(err, jc.ErrorIsNil) c.Assert(req.URL.String(), gc.Equals, "/v5/quantal/starsay/resource/resname?hash="+hash+"&filename=data.zip") c.Assert(req.ContentLength, gc.Equals, size) c.Assert(body, gc.DeepEquals, reader) } func (ResourceSuite) TestGetResource(c *gc.C) { data := []byte("boo!") fp, err := resource.GenerateFingerprint(bytes.NewReader(data)) c.Assert(err, jc.ErrorIsNil) body := ioutil.NopCloser(bytes.NewReader(data)) resp := &http.Response{ StatusCode: 200, Body: body, Header: http.Header{ params.ContentHashHeader: []string{fp.String()}, }, ContentLength: int64(len(data)), } f := &fakeClient{ Stub: &testing.Stub{}, ReturnDoWithBody: resp, } client := Client{bclient: f} id := charm.MustParseURL("cs:quantal/starsay") resdata, err := client.GetResource(id, "data", 1) c.Assert(err, jc.ErrorIsNil) c.Check(resdata, gc.DeepEquals, ResourceData{ ReadCloser: body, Hash: fp.String(), Size: int64(len(data)), }) } func (ResourceSuite) TestResourceMeta(c *gc.C) { data := "somedata" fp, err := resource.GenerateFingerprint(strings.NewReader(data)) c.Assert(err, jc.ErrorIsNil) result := params.Resource{ Name: "data", Type: "file", Origin: "store", Path: "data.zip", Description: "some zip file", Revision: 1, Fingerprint: fp.Bytes(), Size: int64(len(data)), } b, err := json.Marshal(result) c.Assert(err, jc.ErrorIsNil) f := &fakeClient{ Stub: &testing.Stub{}, ReturnDoWithBody: &http.Response{ StatusCode: 200, Body: ioutil.NopCloser(bytes.NewReader(b)), }, } client := Client{bclient: f} id := charm.MustParseURL("cs:quantal/starsay") resdata, err := client.ResourceMeta(id, "data", 1) c.Assert(err, jc.ErrorIsNil) c.Assert(resdata, gc.DeepEquals, result) } type InternalSuite struct{} var _ = gc.Suite(InternalSuite{}) func (s InternalSuite) TestMacaroon(c *gc.C) { var m macaroon.Macaroon macs := macaroon.Slice{&m} client := New(Params{ URL: "https://foo.com", Auth: macs, }) u, err := url.Parse("https://foo.com") c.Assert(err, jc.ErrorIsNil) bc := client.bclient.(*httpbakery.Client) cookies := bc.Jar.Cookies(u) expected, err := httpbakery.NewCookie(macs) c.Assert(err, jc.ErrorIsNil) c.Assert(cookies, gc.DeepEquals, []*http.Cookie{expected}) } type fakeClient struct { *testing.Stub ReturnDoWithBody *http.Response } func (f *fakeClient) DoWithBody(req *http.Request, r io.ReadSeeker) (*http.Response, error) { f.AddCall("DoWithBody", req, r) return f.ReturnDoWithBody, f.NextErr() } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/README.md0000664000175000017500000000007612672604507022467 0ustar marcomarco# charmrepo Charm repositories and charmstore client packages charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/repo_test.go0000664000175000017500000000276012672604507023545 0ustar marcomarco// Copyright 2012, 2013 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmrepo_test import ( jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient" charmtesting "gopkg.in/juju/charmrepo.v2-unstable/testing" ) var TestCharms = charmtesting.NewRepo("internal/test-charm-repo", "quantal") type inferRepoSuite struct{} var _ = gc.Suite(&inferRepoSuite{}) var inferRepositoryTests = []struct { url string localRepoPath string err string }{{ url: "cs:trusty/django", }, { url: "local:precise/wordpress", err: "path to local repository not specified", }, { url: "local:precise/haproxy-47", localRepoPath: "/tmp/repo-path", }} func (s *inferRepoSuite) TestInferRepository(c *gc.C) { for i, test := range inferRepositoryTests { c.Logf("test %d: %s", i, test.url) ref := charm.MustParseURL(test.url) repo, err := charmrepo.InferRepository( ref, charmrepo.NewCharmStoreParams{}, test.localRepoPath) if test.err != "" { c.Assert(err, gc.ErrorMatches, test.err) c.Assert(repo, gc.IsNil) continue } c.Assert(err, jc.ErrorIsNil) switch store := repo.(type) { case *charmrepo.LocalRepository: c.Assert(store.Path, gc.Equals, test.localRepoPath) case *charmrepo.CharmStore: c.Assert(store.URL(), gc.Equals, csclient.ServerURL) default: c.Fatal("unknown repository type") } } } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/0000775000175000017500000000000012672604507023021 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/0000775000175000017500000000000012672604507026033 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/series/0000775000175000017500000000000012672604507027325 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/series/format2/0000775000175000017500000000000012672604507030677 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/series/format2/hooks/0000775000175000017500000000000012672604507032022 5ustar marcomarco././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/series/format2/hooks/symlinkcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/series/format2/hooks/sy0000777000175000017500000000000012672604507034017 2../targetustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/series/format2/build/0000775000175000017500000000000012672604507031776 5ustar marcomarco././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/series/format2/build/ignoredcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/series/format2/build/ig0000664000175000017500000000000012672604507032306 0ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/0000775000175000017500000000000012672604507027304 5ustar marcomarco././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-simple/charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-simple0000775000175000017500000000000012672604507032544 5ustar marcomarco././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-simple/bundle.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-simple0000664000175000017500000000025612672604507032551 0ustar marcomarcoservices: wordpress: charm: wordpress num_units: 1 mysql: charm: mysql num_units: 1 relations: - ["wordpress:db", "mysql:server"] ././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-simple/README.mdcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-simple0000664000175000017500000000001712672604507032544 0ustar marcomarcoA dummy bundle ././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-with-logging/charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-with-l0000775000175000017500000000000012672604507032457 5ustar marcomarco././@LongLink0000644000000000000000000000016700000000000011607 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-with-logging/bundle.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-with-l0000664000175000017500000000045212672604507032462 0ustar marcomarcoservices: wordpress: charm: wordpress num_units: 1 mysql: charm: mysql num_units: 1 logging: charm: logging relations: - ["wordpress:db", "mysql:server"] - ["wordpress:juju-info", "logging:info"] - ["mysql:juju-info", "logging:info"] ././@LongLink0000644000000000000000000000016500000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-with-logging/README.mdcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-with-l0000664000175000017500000000001712672604507032457 0ustar marcomarcoA dummy bundle charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/openstack/0000775000175000017500000000000012672604507031273 5ustar marcomarco././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/openstack/bundle.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/openstack/bundle0000664000175000017500000001221012672604507032463 0ustar marcomarcoseries: precise services: mysql: charm: cs:precise/mysql constraints: mem=1G options: dataset-size: 50% rabbitmq-server: charm: cs:precise/rabbitmq-server constraints: mem=1G ceph: charm: cs:precise/ceph num_units: 3 constraints: mem=1G options: monitor-count: 3 fsid: 6547bd3e-1397-11e2-82e5-53567c8d32dc monitor-secret: AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ== osd-devices: /dev/vdb osd-reformat: "yes" ephemeral-unmount: /mnt keystone: charm: cs:precise/keystone constraints: mem=1G options: admin-password: openstack admin-token: ubuntutesting openstack-dashboard: charm: cs:precise/openstack-dashboard constraints: mem=1G nova-compute: charm: cs:precise/nova-compute num_units: 3 constraints: mem=4G options: config-flags: "auto_assign_floating_ip=False" enable-live-migration: False virt-type: kvm nova-cloud-controller: charm: cs:precise/nova-cloud-controller constraints: mem=1G options: network-manager: Neutron quantum-security-groups: "yes" neutron-gateway: charm: cs:precise/quantum-gateway constraints: mem=1G cinder: charm: cs:precise/cinder options: block-device: "None" constraints": mem=1G glance: charm: cs:precise/glance constraints: mem=1G swift-proxy: charm: cs:precise/swift-proxy constraints: mem=1G options: zone-assignment: manual replicas: 3 use-https: 'no' swift-hash: fdfef9d4-8b06-11e2-8ac0-531c923c8fae swift-storage-z1: charm: cs:precise/swift-storage constraints: mem=1G options: zone: 1 block-device: vdb overwrite: "true" swift-storage-z2: charm: cs:precise/swift-storage constraints: mem=1G options: zone: 2 block-device: vdb overwrite: "true" swift-storage-z3: charm: cs:precise/swift-storage constraints: mem=1G options: zone: 3 block-device: vdb overwrite: "true" ceilometer: charm: cs:precise/ceilometer constraints: mem=1G ceilometer-agent: charm: cs:precise/ceilometer-agent mongodb: charm: cs:precise/mongodb constraints: mem=1G heat: charm: cs:precise/heat constraints: mem=1G ntp: charm: cs:precise/ntp relations: - - keystone:shared-db - mysql:shared-db - - nova-cloud-controller:shared-db - mysql:shared-db - - nova-cloud-controller:amqp - rabbitmq-server:amqp - - nova-cloud-controller:image-service - glance:image-service - - nova-cloud-controller:identity-service - keystone:identity-service - - nova-compute:cloud-compute - nova-cloud-controller:cloud-compute - - nova-compute:shared-db - mysql:shared-db - - nova-compute:amqp - rabbitmq-server:amqp - - nova-compute:image-service - glance:image-service - - nova-compute:ceph - ceph:client - - glance:shared-db - mysql:shared-db - - glance:identity-service - keystone:identity-service - - glance:ceph - ceph:client - - glance:image-service - cinder:image-service - - cinder:shared-db - mysql:shared-db - - cinder:amqp - rabbitmq-server:amqp - - cinder:cinder-volume-service - nova-cloud-controller:cinder-volume-service - - cinder:identity-service - keystone:identity-service - - cinder:ceph - ceph:client - - neutron-gateway:shared-db - mysql:shared-db - - neutron-gateway:amqp - rabbitmq-server:amqp - - neutron-gateway:quantum-network-service - nova-cloud-controller:quantum-network-service - - openstack-dashboard:identity-service - keystone:identity-service - - swift-proxy:identity-service - keystone:identity-service - - swift-proxy:swift-storage - swift-storage-z1:swift-storage - - swift-proxy:swift-storage - swift-storage-z2:swift-storage - - swift-proxy:swift-storage - swift-storage-z3:swift-storage - - ceilometer:identity-service - keystone:identity-service - - ceilometer:amqp - rabbitmq-server:amqp - - ceilometer:shared-db - mongodb:database - - ceilometer-agent:nova-ceilometer - nova-compute:nova-ceilometer - - ceilometer-agent:ceilometer-service - ceilometer:ceilometer-service - - heat:identity-service - keystone:identity-service - - heat:shared-db - mysql:shared-db - - heat:amqp - rabbitmq-server:amqp - - ntp:juju-info - nova-compute:juju-info - - ntp:juju-info - nova-cloud-controller:juju-info - - ntp:juju-info - neutron-gateway:juju-info - - ntp:juju-info - ceph:juju-info - - ntp:juju-info - cinder:juju-info - - ntp:juju-info - keystone:juju-info - - ntp:juju-info - glance:juju-info - - ntp:juju-info - swift-proxy:juju-info - - ntp:juju-info - swift-storage-z1:juju-info - - ntp:juju-info - swift-storage-z2:juju-info - - ntp:juju-info - swift-storage-z3:juju-info - - ntp:juju-info - ceilometer:juju-info - - ntp:juju-info - mongodb:juju-info - - ntp:juju-info - rabbitmq-server:juju-info - - ntp:juju-info - mysql:juju-info - - ntp:juju-info - openstack-dashboard:juju-info - - ntp:juju-info - heat:juju-info ././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/openstack/README.mdcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/openstack/README0000664000175000017500000000425112672604507032155 0ustar marcomarcoOpenStack Bundle for Juju ========================= Overview -------- This bundle deploys a reference OpenStack architecture including all core projects: - OpenStack Compute - OpenStack Networking (using Open vSwitch plugin) - OpenStack Block Storage (backed with Ceph storage) - OpenStack Image - OpenStack Object Storage - OpenStack Identity - OpenStack Dashboard - OpenStack Telemetry - OpenStack Orchestration The charm configuration is an opinioned set for deploying OpenStack for testing on Cloud environments which support nested KVM. Instance types also need to have ephemeral storage (these block devices are used for Ceph and Swift storage). The Ubuntu Server Team use this bundle for testing OpenStack-on-OpenStack. Usage ----- Once deployed, the cloud can be accessed either using the OpenStack command line tools or using the OpenStack Dashboard: http:///horizon The charms configure the 'admin' user with a password of 'openstack' by default. The OpenStack cloud deployed is completely clean; the charms don't attempt to configure networking or upload images. Read the OpenStack User Guide on how to configure your cloud for use: http://docs.openstack.org/user-guide/content/ Niggles ------- The neutron-gateway service requires a service unit with two network interfaces to provide full functionality; this part of OpenStack provides L3 routing between tenant networks and the rest of the world. Its possible todo this when testing on OpenStack by adding a second network interface to the neutron-gateway service: nova interface-attach --net-id juju set neutron-gateway ext-port=eth1 Note that you will need to be running this bundle on an OpenStack cloud that supports MAC address learning of some description; this includes using OpenStack Havana with the Neutron Open vSwitch plugin. For actual OpenStack deployments, this service would reside of a physical server with network ports attached to both the internal network (for communication with nova-compute service units) and the external network (for inbound/outbound network access to/from instances within the cloud). charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/bad/0000775000175000017500000000000012672604507030032 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/bad/bundle.yaml0000664000175000017500000000037212672604507032171 0ustar marcomarco# This bundle has a bad relation, which will cause it to fail # its verification. services: wordpress: charm: wordpress num_units: 1 mysql: charm: mysql num_units: 1 relations: - ["foo:db", "mysql:server"] charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/bad/README.md0000664000175000017500000000001712672604507031307 0ustar marcomarcoA dummy bundle charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/0000775000175000017500000000000012672604507027500 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms1/0000775000175000017500000000000012672604507030713 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms1/hooks/0000775000175000017500000000000012672604507032036 5ustar marcomarco././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms1/hooks/installcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms1/hooks/in0000775000175000017500000000003112672604507032364 0ustar marcomarco#!/bin/bash echo "Done!" charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms1/.ignored0000664000175000017500000000000112672604507032332 0ustar marcomarco#././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms1/config.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms1/config.y0000664000175000017500000000054312672604507032354 0ustar marcomarcooptions: title: {default: My Title, description: A descriptive title used for the service., type: string} outlook: {description: No default outlook., type: string} username: {default: admin001, description: The name of the initial account (given admin permissions)., type: string} skill-level: {description: A number indicating skill., type: int} charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms1/.dir/0000775000175000017500000000000012672604507031547 5ustar marcomarco././@LongLink0000644000000000000000000000015100000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms1/.dir/ignoredcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms1/.dir/ign0000664000175000017500000000000012672604507032235 0ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms1/revision0000664000175000017500000000000112672604507032463 0ustar marcomarco1././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms1/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms1/metadata0000664000175000017500000000026512672604507032421 0ustar marcomarconame: terms1 summary: "That's a dummy charm with terms." description: | This is a longer description which potentially contains multiple lines. terms: ["term1/1", "term3/1"]charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish/0000775000175000017500000000000012672604507031152 5ustar marcomarco././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish/revisioncharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish/revisio0000664000175000017500000000000112672604507032544 0ustar marcomarco1././@LongLink0000644000000000000000000000015300000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish/metadat0000664000175000017500000000015712672604507032517 0ustar marcomarconame: varnish summary: "Database engine" description: "Another popular database" provides: webcache: varnish charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/category/0000775000175000017500000000000012672604507031315 5ustar marcomarco././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/category/.ignoredcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/category/.ignor0000664000175000017500000000000112672604507032423 0ustar marcomarco#charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/category/.dir/0000775000175000017500000000000012672604507032151 5ustar marcomarco././@LongLink0000644000000000000000000000015300000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/category/.dir/ignoredcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/category/.dir/i0000664000175000017500000000000012672604507032312 0ustar marcomarco././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/category/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/category/metada0000664000175000017500000000026212672604507032473 0ustar marcomarconame: categories summary: "Sample charm with a category" description: | That's a boring charm that has a category. categories: ["database"] tags: ["openstack", "storage"]charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms2/0000775000175000017500000000000012672604507030714 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms2/hooks/0000775000175000017500000000000012672604507032037 5ustar marcomarco././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms2/hooks/installcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms2/hooks/in0000775000175000017500000000003112672604507032365 0ustar marcomarco#!/bin/bash echo "Done!" charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms2/.ignored0000664000175000017500000000000112672604507032333 0ustar marcomarco#././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms2/config.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms2/config.y0000664000175000017500000000054312672604507032355 0ustar marcomarcooptions: title: {default: My Title, description: A descriptive title used for the service., type: string} outlook: {description: No default outlook., type: string} username: {default: admin001, description: The name of the initial account (given admin permissions)., type: string} skill-level: {description: A number indicating skill., type: int} charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms2/.dir/0000775000175000017500000000000012672604507031550 5ustar marcomarco././@LongLink0000644000000000000000000000015100000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms2/.dir/ignoredcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms2/.dir/ign0000664000175000017500000000000012672604507032236 0ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms2/revision0000664000175000017500000000000112672604507032464 0ustar marcomarco1././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms2/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terms2/metadata0000664000175000017500000000026712672604507032424 0ustar marcomarconame: terms2 summary: "This is a dummy charm with terms." description: | This is a longer description which potentially contains multiple lines. terms: ["term1/1", "term2/1"] ././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series-bad/charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series-ba0000775000175000017500000000000012672604507032423 5ustar marcomarco././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series-bad/revisioncharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series-ba0000664000175000017500000000000112672604507032414 0ustar marcomarco7././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series-bad/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series-ba0000664000175000017500000000051412672604507032425 0ustar marcomarconame: multi-series-bad summary: "K/V storage engine" description: "An example of an charm which exists in a repo under the quantal series but which declares it only supports precise and trusty." series: - precise - trusty provides: endpoint: interface: http admin: interface: http peers: ring: interface: riak charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/logging/0000775000175000017500000000000012672604507031126 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/logging/hooks/0000775000175000017500000000000012672604507032251 5ustar marcomarco././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/logging/hooks/.gitkeepcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/logging/hooks/.0000664000175000017500000000000012672604507032317 0ustar marcomarco././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/logging/revisioncharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/logging/revisio0000664000175000017500000000000212672604507032521 0ustar marcomarco1 ././@LongLink0000644000000000000000000000015300000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/logging/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/logging/metadat0000664000175000017500000000056212672604507032473 0ustar marcomarconame: logging summary: "Subordinate logging test charm" description: | This is a longer description which potentially contains multiple lines. subordinate: true provides: logging-client: interface: logging requires: logging-directory: interface: logging scope: container info: interface: juju-info scope: container charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/0000775000175000017500000000000012672604507031530 5ustar marcomarco././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/hooks/charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/hooks0000775000175000017500000000000012672604507032574 5ustar marcomarco././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/hooks/.gitkeepcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/hooks0000664000175000017500000000000012672604507032564 0ustar marcomarco././@LongLink0000644000000000000000000000015300000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/config.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/confi0000664000175000017500000000015712672604507032554 0ustar marcomarcooptions: blog-title: {default: My Title, description: A descriptive title used for the blog., type: string} ././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/revisioncharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/revis0000664000175000017500000000000112672604507032572 0ustar marcomarco3././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/metad0000664000175000017500000000063212672604507032546 0ustar marcomarconame: wordpress summary: "Blog engine" description: "A pretty popular blog engine" provides: url: interface: http limit: optional: false logging-dir: interface: logging scope: container monitoring-port: interface: monitoring scope: container requires: db: interface: mysql limit: 1 optional: false cache: interface: varnish limit: 2 optional: true ././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/actions/charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/actio0000775000175000017500000000000012672604507032550 5ustar marcomarco././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/actions/.gitkeepcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/actio0000664000175000017500000000000012672604507032540 0ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/riak/0000775000175000017500000000000012672604507030426 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/riak/revision0000664000175000017500000000000112672604507032176 0ustar marcomarco7././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/riak/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/riak/metadata.y0000664000175000017500000000031712672604507032401 0ustar marcomarconame: riak summary: "K/V storage engine" description: "Scalable K/V Store in Erlang with Clocks :-)" provides: endpoint: interface: http admin: interface: http peers: ring: interface: riak ././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql-alternative/charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql-alternati0000775000175000017500000000000012672604507032547 5ustar marcomarco././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql-alternative/revisioncharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql-alternati0000664000175000017500000000000112672604507032540 0ustar marcomarco1././@LongLink0000644000000000000000000000016500000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql-alternative/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql-alternati0000664000175000017500000000025412672604507032552 0ustar marcomarconame: mysql-alternative summary: "Database engine" description: "A pretty popular database" provides: prod: interface: mysql dev: interface: mysql limit: 2 charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/format2/0000775000175000017500000000000012672604507031052 5ustar marcomarco././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/format2/.ignoredcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/format2/.ignore0000664000175000017500000000000112672604507032325 0ustar marcomarco#charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/format2/.dir/0000775000175000017500000000000012672604507031706 5ustar marcomarco././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/format2/.dir/ignoredcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/format2/.dir/ig0000664000175000017500000000000012672604507032216 0ustar marcomarco././@LongLink0000644000000000000000000000015300000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/format2/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/format2/metadat0000664000175000017500000000024312672604507032413 0ustar marcomarconame: format2 format: 2 summary: "Sample charm described in format 2" description: | That's a boring charm that is described in terms of format 2. charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql/0000775000175000017500000000000012672604507030645 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql/revision0000664000175000017500000000000112672604507032415 0ustar marcomarco1././@LongLink0000644000000000000000000000015100000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql/metadata.0000664000175000017500000000015212672604507032424 0ustar marcomarconame: mysql summary: "Database engine" description: "A pretty popular database" provides: server: mysql charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/0000775000175000017500000000000012672604507030633 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/hooks/0000775000175000017500000000000012672604507031756 5ustar marcomarco././@LongLink0000644000000000000000000000015100000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/hooks/installcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/hooks/ins0000775000175000017500000000003112672604507032467 0ustar marcomarco#!/bin/bash echo "Done!" charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/.ignored0000664000175000017500000000000112672604507032252 0ustar marcomarco#charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/empty/0000775000175000017500000000000012672604507031771 5ustar marcomarco././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/empty/.gitkeepcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/empty/.gi0000664000175000017500000000000012672604507032357 0ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/src/0000775000175000017500000000000012672604507031422 5ustar marcomarco././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/src/hello.ccharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/src/hello0000664000175000017500000000011412672604507032444 0ustar marcomarco#include main() { printf ("Hello World!\n"); return 0; } ././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/config.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/config.ya0000664000175000017500000000054312672604507032435 0ustar marcomarcooptions: title: {default: My Title, description: A descriptive title used for the service., type: string} outlook: {description: No default outlook., type: string} username: {default: admin001, description: The name of the initial account (given admin permissions)., type: string} skill-level: {description: A number indicating skill., type: int} charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/.dir/0000775000175000017500000000000012672604507031467 5ustar marcomarco././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/.dir/ignoredcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/.dir/igno0000664000175000017500000000000012672604507032334 0ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/revision0000664000175000017500000000000112672604507032403 0ustar marcomarco1././@LongLink0000644000000000000000000000015100000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/metadata.0000664000175000017500000000021412672604507032411 0ustar marcomarconame: dummy summary: "That's a dummy charm." description: | This is a longer description which potentially contains multiple lines. charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/build/0000775000175000017500000000000012672604507031732 5ustar marcomarco././@LongLink0000644000000000000000000000015100000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/build/ignoredcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/build/ign0000664000175000017500000000000012672604507032420 0ustar marcomarco././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/actions.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/actions.y0000664000175000017500000000026512672604507032470 0ustar marcomarcosnapshot: description: Take a snapshot of the database. params: outfile: description: The file to write out to. type: string default: foo.bz2 charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered/0000775000175000017500000000000012672604507031125 5ustar marcomarco././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered/metrics.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered/metrics0000664000175000017500000000022012672604507032510 0ustar marcomarcometrics: pings: type: gauge description: Description of the metric. juju-unit-time: type: gauge description: Builtin metric ././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered/revisioncharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered/revisio0000664000175000017500000000000112672604507032517 0ustar marcomarco1././@LongLink0000644000000000000000000000015300000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered/metadat0000664000175000017500000000011512672604507032464 0ustar marcomarconame: metered summary: "A metered charm with custom metrics" description: "" charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade1/0000775000175000017500000000000012672604507031210 5ustar marcomarco././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade1/revisioncharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade1/revisi0000664000175000017500000000000212672604507032424 0ustar marcomarco1 ././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade1/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade1/metada0000664000175000017500000000022212672604507032362 0ustar marcomarconame: upgrade summary: "Sample charm to test version changes" description: | Sample charm to test version changes. This is the old charm. charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series/0000775000175000017500000000000012672604507032122 5ustar marcomarco././@LongLink0000644000000000000000000000015300000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series/revisioncharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series/re0000664000175000017500000000000112672604507032442 0ustar marcomarco7././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series/me0000664000175000017500000000042112672604507032443 0ustar marcomarconame: new-charm-with-multi-series summary: "K/V storage engine" description: "Scalable K/V Store in Erlang with Clocks :-)" series: - precise - trusty - quantal provides: endpoint: interface: http admin: interface: http peers: ring: interface: riak charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered-empty/0000775000175000017500000000000012672604507032261 5ustar marcomarco././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered-empty/metrics.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered-empty/m0000664000175000017500000000000012672604507032426 0ustar marcomarco././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered-empty/revisioncharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered-empty/r0000664000175000017500000000000112672604507032434 0ustar marcomarco1././@LongLink0000644000000000000000000000016100000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered-empty/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered-empty/m0000664000175000017500000000016212672604507032437 0ustar marcomarconame: metered-empty summary: "Metered charm with empty metrics" description: "A charm that will not send metrics" charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terracotta/0000775000175000017500000000000012672604507031650 5ustar marcomarco././@LongLink0000644000000000000000000000015100000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terracotta/revisioncharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terracotta/revi0000664000175000017500000000000212672604507032530 0ustar marcomarco3 ././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terracotta/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terracotta/meta0000664000175000017500000000075112672604507032524 0ustar marcomarconame: terracotta summary: Distributed HA caching/storage platform for Java maintainer: Robert Ayres description: | Distributed HA caching/storage platform for Java. . Terracotta provides out of the box clustering for a number of well known Java frameworks, including EHCache, Hibernate and Quartz as well as clustering for J2EE containers. provides: dso: interface: terracotta optional: true peers: server-array: terracotta-server charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade2/0000775000175000017500000000000012672604507031211 5ustar marcomarco././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade2/revisioncharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade2/revisi0000664000175000017500000000000212672604507032425 0ustar marcomarco2 ././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade2/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade2/metada0000664000175000017500000000022212672604507032363 0ustar marcomarconame: upgrade summary: "Sample charm to test version changes" description: | Sample charm to test version changes. This is the new charm. ././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish-alternative/charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish-alterna0000775000175000017500000000000012672604507032517 5ustar marcomarco././@LongLink0000644000000000000000000000016000000000000011600 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish-alternative/hooks/charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish-alterna0000775000175000017500000000000012672604507032517 5ustar marcomarco././@LongLink0000644000000000000000000000016700000000000011607 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish-alternative/hooks/installcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish-alterna0000775000175000017500000000003512672604507032522 0ustar marcomarco#!/bin/bash echo hello world././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish-alternative/revisioncharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish-alterna0000664000175000017500000000000112672604507032510 0ustar marcomarco1././@LongLink0000644000000000000000000000016700000000000011607 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish-alternative/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish-alterna0000664000175000017500000000017312672604507032522 0ustar marcomarconame: varnish-alternative summary: "Database engine" description: "Another popular database" provides: webcache: varnish charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/monitoring/0000775000175000017500000000000012672604507031665 5ustar marcomarco././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/monitoring/hooks/charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/monitoring/hook0000775000175000017500000000000012672604507032546 5ustar marcomarco././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/monitoring/hooks/.gitkeepcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/monitoring/hook0000664000175000017500000000000012672604507032536 0ustar marcomarco././@LongLink0000644000000000000000000000015600000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/monitoring/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/monitoring/meta0000664000175000017500000000057712672604507032547 0ustar marcomarconame: monitoring summary: "Subordinate monitoring test charm" description: | This is a longer description which potentially contains multiple lines. subordinate: true provides: monitoring-client: interface: monitoring requires: monitoring-port: interface: monitoring scope: container info: interface: juju-info scope: container charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/0000775000175000017500000000000012672604507031371 5ustar marcomarco././@LongLink0000644000000000000000000000014600000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks0000775000175000017500000000000012672604507032435 5ustar marcomarco././@LongLink0000644000000000000000000000015700000000000011606 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/otherdatacharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks0000664000175000017500000000001212672604507032430 0ustar marcomarcosome text ././@LongLink0000644000000000000000000000016500000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/collect-metricscharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks0000664000175000017500000000002212672604507032431 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000015300000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/startcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks0000664000175000017500000000002212672604507032431 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000016300000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/upgrade-charmcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks0000664000175000017500000000002212672604507032431 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000016400000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/config-changedcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks0000664000175000017500000000002212672604507032431 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000017200000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/meter-status-changedcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks0000664000175000017500000000002212672604507032431 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/installcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks0000664000175000017500000000002212672604507032431 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000017100000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-brokencharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks0000664000175000017500000000002212672604507032431 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000017300000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-departedcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks0000664000175000017500000000002212672604507032431 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000017200000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-joinedcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks0000664000175000017500000000002212672604507032431 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000017200000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-changedcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks0000664000175000017500000000002212672604507032431 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000017300000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-changedcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks0000664000175000017500000000002212672604507032431 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000017300000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-departedcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks0000664000175000017500000000002212672604507032431 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000017200000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-changedcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks0000664000175000017500000000002212672604507032431 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000017100000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-brokencharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks0000664000175000017500000000002212672604507032431 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000015200000000000011601 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/stopcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks0000664000175000017500000000002212672604507032431 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000017100000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-joinedcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks0000664000175000017500000000002212672604507032431 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000017200000000000011603 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-brokencharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks0000664000175000017500000000002212672604507032431 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/subdir/charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks0000775000175000017500000000000012672604507032435 5ustar marcomarco././@LongLink0000644000000000000000000000016200000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/subdir/stuffcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks0000664000175000017500000000002712672604507032436 0ustar marcomarconon hook related stuff ././@LongLink0000644000000000000000000000017400000000000011605 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-departedcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks0000664000175000017500000000002212672604507032431 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000017100000000000011602 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-joinedcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks0000664000175000017500000000002212672604507032431 0ustar marcomarco#!/bin/sh echo $0 ././@LongLink0000644000000000000000000000015000000000000011577 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/revisioncharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/revis0000664000175000017500000000000212672604507032434 0ustar marcomarco1 ././@LongLink0000644000000000000000000000015500000000000011604 Lustar rootrootcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/metadata.yamlcharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/metad0000664000175000017500000000036512672604507032412 0ustar marcomarconame: all-hooks summary: "That's a dummy charm with hook scrips for all types of hooks." description: "This is a longer description." provides: foo: interface: phony requires: bar: interface: fake peers: self: interface: dummy charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/params.go0000664000175000017500000000511612677511231023016 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmrepo // import "gopkg.in/juju/charmrepo.v2-unstable" import ( "fmt" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charm.v6-unstable/resource" ) // InfoResponse is sent by the charm store in response to charm-info requests. type InfoResponse struct { CanonicalURL string `json:"canonical-url,omitempty"` Revision int `json:"revision"` // Zero is valid. Can't omitempty. Sha256 string `json:"sha256,omitempty"` Digest string `json:"digest,omitempty"` Errors []string `json:"errors,omitempty"` Warnings []string `json:"warnings,omitempty"` } // EventResponse is sent by the charm store in response to charm-event requests. type EventResponse struct { Kind string `json:"kind"` Revision int `json:"revision"` // Zero is valid. Can't omitempty. Digest string `json:"digest,omitempty"` Errors []string `json:"errors,omitempty"` Warnings []string `json:"warnings,omitempty"` Time string `json:"time,omitempty"` } // ResourceResult holds the resources for a given charm and any error // encountered in retrieving them. type ResourceResult struct { Resources []resource.Resource Err error } // NotFoundError represents an error indicating that the requested data wasn't found. type NotFoundError struct { msg string } func (e *NotFoundError) Error() string { return e.msg } func repoNotFound(path string) error { return &NotFoundError{fmt.Sprintf("no repository found at %q", path)} } func entityNotFound(curl *charm.URL, repoPath string) error { return &NotFoundError{fmt.Sprintf("entity not found in %q: %s", repoPath, curl)} } // CharmNotFound returns an error indicating that the // charm at the specified URL does not exist. func CharmNotFound(url string) error { return &NotFoundError{ msg: "charm not found: " + url, } } // BundleNotFound returns an error indicating that the // bundle at the specified URL does not exist. func BundleNotFound(url string) error { return &NotFoundError{ msg: "bundle not found: " + url, } } // InvalidPath returns an invalidPathError. func InvalidPath(path string) error { return &invalidPathError{path} } // invalidPathError represents an error indicating that the requested // charm or bundle path is not valid as a charm or bundle path. type invalidPathError struct { path string } func (e *invalidPathError) Error() string { return fmt.Sprintf("path %q can not be a relative path", e.path) } func IsInvalidPathError(err error) bool { _, ok := err.(*invalidPathError) return ok } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/bundlepath_test.go0000664000175000017500000000654312672604507024731 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmrepo_test import ( "io/ioutil" "os" "path/filepath" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/yaml.v1" "gopkg.in/juju/charmrepo.v2-unstable" ) type bundlePathSuite struct { repoPath string } var _ = gc.Suite(&bundlePathSuite{}) func (s *bundlePathSuite) SetUpTest(c *gc.C) { s.repoPath = c.MkDir() } func (s *bundlePathSuite) cloneCharmDir(path, name string) string { return TestCharms.ClonedDirPath(path, name) } func (s *bundlePathSuite) TestNoPath(c *gc.C) { _, _, err := charmrepo.NewBundleAtPath("") c.Assert(err, gc.ErrorMatches, "path to bundle not specified") } func (s *bundlePathSuite) TestInvalidPath(c *gc.C) { _, _, err := charmrepo.NewBundleAtPath("/foo") c.Assert(err, gc.Equals, os.ErrNotExist) } func (s *bundlePathSuite) TestRepoURL(c *gc.C) { _, _, err := charmrepo.NewCharmAtPath("cs:foo", "trusty") c.Assert(err, gc.Equals, os.ErrNotExist) } func (s *bundlePathSuite) TestInvalidRelativePath(c *gc.C) { _, _, err := charmrepo.NewBundleAtPath("./foo") c.Assert(err, gc.Equals, os.ErrNotExist) } func (s *bundlePathSuite) TestRelativePath(c *gc.C) { relDir := filepath.Join(TestCharms.Path(), "bundle") cwd, err := os.Getwd() c.Assert(err, jc.ErrorIsNil) defer os.Chdir(cwd) c.Assert(os.Chdir(relDir), jc.ErrorIsNil) _, _, err = charmrepo.NewBundleAtPath("openstack") c.Assert(charmrepo.IsInvalidPathError(err), jc.IsTrue) } func (s *bundlePathSuite) TestNoBundleAtPath(c *gc.C) { _, _, err := charmrepo.NewBundleAtPath(c.MkDir()) c.Assert(err, gc.ErrorMatches, `bundle not found:.*`) } func (s *bundlePathSuite) TestGetBundle(c *gc.C) { bundleDir := filepath.Join(TestCharms.Path(), "bundle", "openstack") b, url, err := charmrepo.NewBundleAtPath(bundleDir) c.Assert(err, jc.ErrorIsNil) c.Assert(b.Data(), jc.DeepEquals, TestCharms.BundleDir("openstack").Data()) c.Assert(url, gc.DeepEquals, charm.MustParseURL("local:bundle/openstack-0")) } func (s *bundlePathSuite) TestGetBundleSymlink(c *gc.C) { realPath := TestCharms.ClonedBundleDirPath(c.MkDir(), "wordpress-simple") bundlesPath := c.MkDir() linkPath := filepath.Join(bundlesPath, "wordpress-simple") err := os.Symlink(realPath, linkPath) c.Assert(err, jc.ErrorIsNil) url := charm.MustParseURL("local:bundle/wordpress-simple") b, url, err := charmrepo.NewBundleAtPath(filepath.Join(bundlesPath, "wordpress-simple")) c.Assert(err, jc.ErrorIsNil) c.Assert(b.Data(), jc.DeepEquals, TestCharms.BundleDir("wordpress-simple").Data()) c.Assert(url, gc.DeepEquals, charm.MustParseURL("local:bundle/wordpress-simple-0")) } func (s *bundlePathSuite) TestGetBundleLocalFile(c *gc.C) { bundlePath := filepath.Join(c.MkDir(), "mybundle") data := ` services: wordpress: charm: wordpress num_units: 1 `[1:] err := ioutil.WriteFile(bundlePath, []byte(data), 0644) c.Assert(err, jc.ErrorIsNil) bundleData, err := charmrepo.ReadBundleFile(bundlePath) c.Assert(err, jc.ErrorIsNil) out, err := yaml.Marshal(bundleData) c.Assert(err, jc.ErrorIsNil) c.Assert(string(out), jc.DeepEquals, data) } func (s *bundlePathSuite) TestGetBundleLocalFileNotExists(c *gc.C) { bundlePath := filepath.Join(c.MkDir(), "mybundle") _, err := charmrepo.ReadBundleFile(bundlePath) c.Assert(err, gc.ErrorMatches, `bundle not found:.*`) } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/charmpath_test.go0000664000175000017500000001227412672604507024550 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmrepo_test import ( "os" "path/filepath" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable" ) type charmPathSuite struct { repoPath string } var _ = gc.Suite(&charmPathSuite{}) func (s *charmPathSuite) SetUpTest(c *gc.C) { s.repoPath = c.MkDir() } func (s *charmPathSuite) cloneCharmDir(path, name string) string { return TestCharms.ClonedDirPath(path, name) } func (s *charmPathSuite) TestNoPath(c *gc.C) { _, _, err := charmrepo.NewCharmAtPath("", "trusty") c.Assert(err, gc.ErrorMatches, "empty charm path") } func (s *charmPathSuite) TestInvalidPath(c *gc.C) { _, _, err := charmrepo.NewCharmAtPath("/foo", "trusty") c.Assert(err, gc.Equals, os.ErrNotExist) } func (s *charmPathSuite) TestRepoURL(c *gc.C) { _, _, err := charmrepo.NewCharmAtPath("cs:foo", "trusty") c.Assert(err, gc.Equals, os.ErrNotExist) } func (s *charmPathSuite) TestInvalidRelativePath(c *gc.C) { _, _, err := charmrepo.NewCharmAtPath("./foo", "trusty") c.Assert(err, gc.Equals, os.ErrNotExist) } func (s *charmPathSuite) TestRelativePath(c *gc.C) { s.cloneCharmDir(s.repoPath, "mysql") cwd, err := os.Getwd() c.Assert(err, jc.ErrorIsNil) defer os.Chdir(cwd) c.Assert(os.Chdir(s.repoPath), jc.ErrorIsNil) _, _, err = charmrepo.NewCharmAtPath("mysql", "trusty") c.Assert(charmrepo.IsInvalidPathError(err), jc.IsTrue) } func (s *charmPathSuite) TestNoCharmAtPath(c *gc.C) { _, _, err := charmrepo.NewCharmAtPath(c.MkDir(), "trusty") c.Assert(err, gc.ErrorMatches, "charm not found.*") } func (s *charmPathSuite) TestCharm(c *gc.C) { charmDir := filepath.Join(s.repoPath, "mysql") s.cloneCharmDir(s.repoPath, "mysql") ch, url, err := charmrepo.NewCharmAtPath(charmDir, "quantal") c.Assert(err, jc.ErrorIsNil) c.Assert(ch.Meta().Name, gc.Equals, "mysql") c.Assert(ch.Revision(), gc.Equals, 1) c.Assert(url, gc.DeepEquals, charm.MustParseURL("local:quantal/mysql-1")) } func (s *charmPathSuite) TestNoSeriesSpecified(c *gc.C) { charmDir := filepath.Join(s.repoPath, "mysql") s.cloneCharmDir(s.repoPath, "mysql") _, _, err := charmrepo.NewCharmAtPath(charmDir, "") c.Assert(err, gc.ErrorMatches, "series not specified and charm does not define any") } func (s *charmPathSuite) TestNoSeriesSpecifiedForceStillFails(c *gc.C) { charmDir := filepath.Join(s.repoPath, "mysql") s.cloneCharmDir(s.repoPath, "mysql") _, _, err := charmrepo.NewCharmAtPathForceSeries(charmDir, "", true) c.Assert(err, gc.ErrorMatches, "series not specified and charm does not define any") } func (s *charmPathSuite) TestMuliSeriesDefault(c *gc.C) { charmDir := filepath.Join(s.repoPath, "multi-series") s.cloneCharmDir(s.repoPath, "multi-series") ch, url, err := charmrepo.NewCharmAtPath(charmDir, "") c.Assert(err, gc.IsNil) c.Assert(ch.Meta().Name, gc.Equals, "new-charm-with-multi-series") c.Assert(ch.Revision(), gc.Equals, 7) c.Assert(url, gc.DeepEquals, charm.MustParseURL("local:precise/multi-series-7")) } func (s *charmPathSuite) TestMuliSeries(c *gc.C) { charmDir := filepath.Join(s.repoPath, "multi-series") s.cloneCharmDir(s.repoPath, "multi-series") ch, url, err := charmrepo.NewCharmAtPath(charmDir, "trusty") c.Assert(err, gc.IsNil) c.Assert(ch.Meta().Name, gc.Equals, "new-charm-with-multi-series") c.Assert(ch.Revision(), gc.Equals, 7) c.Assert(url, gc.DeepEquals, charm.MustParseURL("local:trusty/multi-series-7")) } func (s *charmPathSuite) TestUnsupportedSeries(c *gc.C) { charmDir := filepath.Join(s.repoPath, "multi-series") s.cloneCharmDir(s.repoPath, "multi-series") _, _, err := charmrepo.NewCharmAtPath(charmDir, "wily") c.Assert(err, gc.ErrorMatches, `series "wily" not supported by charm, supported series are.*`) } func (s *charmPathSuite) TestUnsupportedSeriesNoForce(c *gc.C) { charmDir := filepath.Join(s.repoPath, "multi-series") s.cloneCharmDir(s.repoPath, "multi-series") _, _, err := charmrepo.NewCharmAtPathForceSeries(charmDir, "wily", false) c.Assert(err, gc.ErrorMatches, `series "wily" not supported by charm, supported series are.*`) } func (s *charmPathSuite) TestUnsupportedSeriesForce(c *gc.C) { charmDir := filepath.Join(s.repoPath, "multi-series") s.cloneCharmDir(s.repoPath, "multi-series") ch, url, err := charmrepo.NewCharmAtPathForceSeries(charmDir, "wily", true) c.Assert(err, jc.ErrorIsNil) c.Assert(ch.Meta().Name, gc.Equals, "new-charm-with-multi-series") c.Assert(ch.Revision(), gc.Equals, 7) c.Assert(url, gc.DeepEquals, charm.MustParseURL("local:wily/multi-series-7")) } func (s *charmPathSuite) TestFindsSymlinks(c *gc.C) { realPath := TestCharms.ClonedDirPath(c.MkDir(), "dummy") charmsPath := c.MkDir() linkPath := filepath.Join(charmsPath, "dummy") err := os.Symlink(realPath, linkPath) c.Assert(err, gc.IsNil) ch, url, err := charmrepo.NewCharmAtPath(filepath.Join(charmsPath, "dummy"), "quantal") c.Assert(err, gc.IsNil) c.Assert(ch.Revision(), gc.Equals, 1) c.Assert(ch.Meta().Name, gc.Equals, "dummy") c.Assert(ch.Config().Options["title"].Default, gc.Equals, "My Title") c.Assert(ch.(*charm.CharmDir).Path, gc.Equals, linkPath) c.Assert(url, gc.DeepEquals, charm.MustParseURL("local:quantal/dummy-1")) } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/testing/0000775000175000017500000000000012703461656022663 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/testing/suite.go0000664000175000017500000000141112672604507024337 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package testing // import "gopkg.in/juju/charmrepo.v2-unstable/testing" import ( jujutesting "github.com/juju/testing" gc "gopkg.in/check.v1" ) type IsolatedMgoSuite struct { jujutesting.IsolationSuite jujutesting.MgoSuite } func (s *IsolatedMgoSuite) SetUpSuite(c *gc.C) { s.IsolationSuite.SetUpSuite(c) s.MgoSuite.SetUpSuite(c) } func (s *IsolatedMgoSuite) TearDownSuite(c *gc.C) { s.MgoSuite.TearDownSuite(c) s.IsolationSuite.TearDownSuite(c) } func (s *IsolatedMgoSuite) SetUpTest(c *gc.C) { s.IsolationSuite.SetUpTest(c) s.MgoSuite.SetUpTest(c) } func (s *IsolatedMgoSuite) TearDownTest(c *gc.C) { s.MgoSuite.TearDownTest(c) s.IsolationSuite.TearDownTest(c) } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/testing/charm.go0000664000175000017500000001177212672604507024313 0ustar marcomarco// Copyright 2012, 2013 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package testing // import "gopkg.in/juju/charmrepo.v2-unstable/testing" import ( "fmt" "os" "path/filepath" "runtime" "github.com/juju/utils/fs" "gopkg.in/juju/charm.v6-unstable" ) func check(err error) { if err != nil { panic(err) } } // NewRepo returns a new testing charm repository rooted at the given // path, relative to the package directory of the calling package, using // defaultSeries as the default series. func NewRepo(path, defaultSeries string) *Repo { // Find the repo directory. This is only OK to do // because this is running in a test context // so we know the source is available. _, file, _, ok := runtime.Caller(1) if !ok { panic("cannot get caller") } r := &Repo{ path: filepath.Join(filepath.Dir(file), path), defaultSeries: defaultSeries, } _, err := os.Stat(r.path) if err != nil { panic(fmt.Errorf("cannot read repository found at %q: %v", r.path, err)) } return r } // Repo represents a charm repository used for testing. type Repo struct { path string defaultSeries string } func (r *Repo) Path() string { return r.path } func clone(dst, src string) string { dst = filepath.Join(dst, filepath.Base(src)) check(fs.Copy(src, dst)) return dst } // BundleDirPath returns the path to a bundle directory with the given name in the // default series func (r *Repo) BundleDirPath(name string) string { return filepath.Join(r.Path(), "bundle", name) } // BundleDir returns the actual charm.BundleDir named name. func (r *Repo) BundleDir(name string) *charm.BundleDir { b, err := charm.ReadBundleDir(r.BundleDirPath(name)) check(err) return b } // CharmDirPath returns the path to a charm directory with the given name in the // default series func (r *Repo) CharmDirPath(name string) string { return filepath.Join(r.Path(), r.defaultSeries, name) } // CharmDir returns the actual charm.CharmDir named name. func (r *Repo) CharmDir(name string) *charm.CharmDir { ch, err := charm.ReadCharmDir(r.CharmDirPath(name)) check(err) return ch } // ClonedDirPath returns the path to a new copy of the default charm directory // named name. func (r *Repo) ClonedDirPath(dst, name string) string { return clone(dst, r.CharmDirPath(name)) } // ClonedDirPath returns the path to a new copy of the default bundle directory // named name. func (r *Repo) ClonedBundleDirPath(dst, name string) string { return clone(dst, r.BundleDirPath(name)) } // RenamedClonedDirPath returns the path to a new copy of the default // charm directory named name, renamed to newName. func (r *Repo) RenamedClonedDirPath(dst, name, newName string) string { dstPath := filepath.Join(dst, newName) err := fs.Copy(r.CharmDirPath(name), dstPath) check(err) return dstPath } // ClonedDir returns an actual charm.CharmDir based on a new copy of the charm directory // named name, in the directory dst. func (r *Repo) ClonedDir(dst, name string) *charm.CharmDir { ch, err := charm.ReadCharmDir(r.ClonedDirPath(dst, name)) check(err) return ch } // ClonedURL makes a copy of the charm directory. It will create a directory // with the series name if it does not exist, and then clone the charm named // name into that directory. The return value is a URL pointing at the local // charm. func (r *Repo) ClonedURL(dst, series, name string) *charm.URL { dst = filepath.Join(dst, series) if err := os.MkdirAll(dst, os.FileMode(0777)); err != nil { panic(fmt.Errorf("cannot make destination directory: %v", err)) } clone(dst, r.CharmDirPath(name)) return &charm.URL{ Schema: "local", Name: name, Revision: -1, Series: series, } } // CharmArchivePath returns the path to a new charm archive file // in the directory dst, created from the charm directory named name. func (r *Repo) CharmArchivePath(dst, name string) string { dir := r.CharmDir(name) path := filepath.Join(dst, "archive.charm") file, err := os.Create(path) check(err) defer file.Close() check(dir.ArchiveTo(file)) return path } // BundleArchivePath returns the path to a new bundle archive file // in the directory dst, created from the bundle directory named name. func (r *Repo) BundleArchivePath(dst, name string) string { dir := r.BundleDir(name) path := filepath.Join(dst, "archive.bundle") file, err := os.Create(path) check(err) defer file.Close() check(dir.ArchiveTo(file)) return path } // CharmArchive returns an actual charm.CharmArchive created from a new // charm archive file created from the charm directory named name, in // the directory dst. func (r *Repo) CharmArchive(dst, name string) *charm.CharmArchive { ch, err := charm.ReadCharmArchive(r.CharmArchivePath(dst, name)) check(err) return ch } // BundleArchive returns an actual charm.BundleArchive created from a new // bundle archive file created from the bundle directory named name, in // the directory dst. func (r *Repo) BundleArchive(dst, name string) *charm.BundleArchive { b, err := charm.ReadBundleArchive(r.BundleArchivePath(dst, name)) check(err) return b } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/testing/testcharm_test.go0000664000175000017500000001040312672604507026240 0ustar marcomarcopackage testing_test import ( jc "github.com/juju/testing/checkers" "github.com/juju/testing/filetesting" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/testing" ) var _ = gc.Suite(&testCharmSuite{}) type testCharmSuite struct{} var newCharmTests = []struct { about string spec testing.CharmSpec expectMeta *charm.Meta expectConfig *charm.Config expectActions *charm.Actions expectMetrics *charm.Metrics expectFiles filetesting.Entries expectRevision int }{{ about: "all charm populated without files", spec: testing.CharmSpec{ Meta: ` name: mysql summary: "Database engine" description: "A pretty popular database" provides: server: mysql `, Config: ` options: blog-title: {default: My Title, description: Config description, type: string} `, Actions: ` snapshot: description: Take a snapshot of the database. params: outfile: description: outfile description type: string default: foo.bz2 `, Metrics: ` metrics: pings: type: gauge description: Description of the metric. `, Revision: 99, }, expectMeta: &charm.Meta{ Name: "mysql", Format: 1, Summary: "Database engine", Description: "A pretty popular database", Provides: map[string]charm.Relation{ "server": { Name: "server", Role: charm.RoleProvider, Interface: "mysql", Scope: charm.ScopeGlobal, }, }, }, expectConfig: &charm.Config{ Options: map[string]charm.Option{ "blog-title": { Type: "string", Description: "Config description", Default: "My Title", }, }, }, expectActions: &charm.Actions{ ActionSpecs: map[string]charm.ActionSpec{ "snapshot": { Description: "Take a snapshot of the database.", Params: map[string]interface{}{ "title": "snapshot", "description": "Take a snapshot of the database.", "type": "object", "properties": map[string]interface{}{ "outfile": map[string]interface{}{ "description": "outfile description", "type": "string", "default": "foo.bz2", }, }, }, }, }, }, expectMetrics: &charm.Metrics{ Metrics: map[string]charm.Metric{ "pings": { Type: charm.MetricTypeGauge, Description: "Description of the metric.", }, }, }, expectFiles: filetesting.Entries{ filetesting.File{ Path: "hooks/install", Data: "#!/bin/sh\n", Perm: 0755, }, filetesting.File{ Path: "hooks/start", Data: "#!/bin/sh\n", Perm: 0755, }, }, expectRevision: 99, }, { about: "charm with some extra files specified", spec: testing.CharmSpec{ Meta: ` name: mycharm summary: summary description: description `, Files: filetesting.Entries{ filetesting.File{ Path: "hooks/customhook", Data: "custom stuff", Perm: 0755, }, }, }, expectMeta: &charm.Meta{ Name: "mycharm", Summary: "summary", Description: "description", Format: 1, }, expectConfig: &charm.Config{ Options: map[string]charm.Option{}, }, expectActions: &charm.Actions{}, expectFiles: filetesting.Entries{ filetesting.File{ Path: "hooks/customhook", Data: "custom stuff", Perm: 0755, }, }, }, } func (*testCharmSuite) TestNewCharm(c *gc.C) { for i, test := range newCharmTests { c.Logf("test %d: %s", i, test.about) ch := testing.NewCharm(c, test.spec) c.Assert(ch.Meta(), jc.DeepEquals, test.expectMeta) c.Assert(ch.Config(), jc.DeepEquals, test.expectConfig) c.Assert(ch.Metrics(), jc.DeepEquals, test.expectMetrics) c.Assert(ch.Actions(), jc.DeepEquals, test.expectActions) c.Assert(ch.Revision(), gc.Equals, test.expectRevision) archive := ch.Archive() c.Assert(archive.Meta(), jc.DeepEquals, test.expectMeta) c.Assert(archive.Config(), jc.DeepEquals, test.expectConfig) c.Assert(archive.Metrics(), jc.DeepEquals, test.expectMetrics) c.Assert(archive.Actions(), jc.DeepEquals, test.expectActions) c.Assert(archive.Revision(), gc.Equals, test.expectRevision) // Check that we get the same archive again. c.Assert(ch.Archive(), gc.Equals, archive) c.Assert(ch.ArchiveBytes(), gc.Not(gc.HasLen), 0) dir := c.MkDir() err := archive.ExpandTo(dir) c.Assert(err, gc.IsNil) test.expectFiles.Check(c, dir) } } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/testing/mockstore.go0000664000175000017500000001300112703461656025213 0ustar marcomarco// Copyright 2012, 2013 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package testing // import "gopkg.in/juju/charmrepo.v2-unstable/testing" import ( "bytes" "encoding/json" "io" "net" "net/http" "os" "strconv" "strings" "github.com/juju/loggo" "github.com/juju/utils" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable" ) var logger = loggo.GetLogger("juju.charm.testing.mockstore") // MockStore provides a mock charm store implementation useful when testing. type MockStore struct { mux *http.ServeMux listener net.Listener archiveBytes []byte // ArchiveSHA256 holds the hex-encoded SHA256 checksum // of the charm archive served by the mock store. ArchiveSHA256 string Downloads []*charm.URL DownloadsNoStats []*charm.URL Authorizations []string Metadata []string InfoRequestCount int InfoRequestCountNoStats int DefaultSeries string charms map[string]int } // NewMockStore creates a mock charm store containing the specified charms. func NewMockStore(c *gc.C, repo *Repo, charms map[string]int) *MockStore { s := &MockStore{charms: charms, DefaultSeries: "precise"} f, err := os.Open(repo.CharmArchivePath(c.MkDir(), "dummy")) c.Assert(err, gc.IsNil) defer f.Close() buf := &bytes.Buffer{} s.ArchiveSHA256, _, err = utils.ReadSHA256(io.TeeReader(f, buf)) c.Logf("ArchiveSHA256: %v", s.ArchiveSHA256) c.Assert(err, gc.IsNil) s.archiveBytes = buf.Bytes() c.Assert(err, gc.IsNil) s.mux = http.NewServeMux() s.mux.HandleFunc("/charm-info", s.serveInfo) s.mux.HandleFunc("/charm-event", s.serveEvent) s.mux.HandleFunc("/charm/", s.serveCharm) lis, err := net.Listen("tcp", "127.0.0.1:0") c.Assert(err, gc.IsNil) s.listener = lis go http.Serve(s.listener, s) return s } // Close closes the mock store's socket. func (s *MockStore) Close() { s.listener.Close() } // Address returns the URL used to make requests to the mock store. func (s *MockStore) Address() string { return "http://" + s.listener.Addr().String() } // UpdateStoreRevision sets the revision of the specified charm to rev. func (s *MockStore) UpdateStoreRevision(ch string, rev int) { s.charms[ch] = rev } // ServeHTTP implements http.ServeHTTP func (s *MockStore) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.mux.ServeHTTP(w, r) } func (s *MockStore) serveInfo(w http.ResponseWriter, r *http.Request) { if metadata := r.Header.Get("Juju-Metadata"); metadata != "" { s.Metadata = append(s.Metadata, metadata) logger.Infof("Juju metadata: " + metadata) } r.ParseForm() if r.Form.Get("stats") == "0" { s.InfoRequestCountNoStats += 1 } else { s.InfoRequestCount += 1 } response := map[string]*charmrepo.InfoResponse{} for _, url := range r.Form["charms"] { cr := &charmrepo.InfoResponse{} response[url] = cr charmURL, err := charm.ParseURL(url) if err != nil { panic(err) } if charmURL.Series == "" { charmURL.Series = s.DefaultSeries } switch charmURL.Name { case "borken": cr.Errors = append(cr.Errors, "badness") case "terracotta": cr.Errors = append(cr.Errors, "cannot get revision") case "unwise": cr.Warnings = append(cr.Warnings, "foolishness") fallthrough default: if rev, ok := s.charms[charmURL.WithRevision(-1).String()]; ok { if charmURL.Revision == -1 { cr.Revision = rev } else { cr.Revision = charmURL.Revision } cr.Sha256 = s.ArchiveSHA256 cr.CanonicalURL = charmURL.String() } else { cr.Errors = append(cr.Errors, "entry not found") } } } data, err := json.Marshal(response) if err != nil { panic(err) } w.Header().Set("Content-Type", "application/json") _, err = w.Write(data) if err != nil { panic(err) } } func (s *MockStore) serveEvent(w http.ResponseWriter, r *http.Request) { r.ParseForm() response := map[string]*charmrepo.EventResponse{} for _, url := range r.Form["charms"] { digest := "" if i := strings.Index(url, "@"); i >= 0 { digest = url[i+1:] url = url[:i] } er := &charmrepo.EventResponse{} response[url] = er if digest != "" && digest != "the-digest" { er.Kind = "not-found" er.Errors = []string{"entry not found"} continue } charmURL := charm.MustParseURL(url) switch charmURL.Name { case "borken": er.Kind = "publish-error" er.Errors = append(er.Errors, "badness") case "unwise": er.Warnings = append(er.Warnings, "foolishness") fallthrough default: if rev, ok := s.charms[charmURL.WithRevision(-1).String()]; ok { er.Kind = "published" er.Revision = rev er.Digest = "the-digest" } else { er.Kind = "not-found" er.Errors = []string{"entry not found"} } } } data, err := json.Marshal(response) if err != nil { panic(err) } w.Header().Set("Content-Type", "application/json") _, err = w.Write(data) if err != nil { panic(err) } } func (s *MockStore) serveCharm(w http.ResponseWriter, r *http.Request) { charmURL := charm.MustParseURL("cs:" + r.URL.Path[len("/charm/"):]) r.ParseForm() if r.Form.Get("stats") == "0" { s.DownloadsNoStats = append(s.DownloadsNoStats, charmURL) } else { s.Downloads = append(s.Downloads, charmURL) } if auth := r.Header.Get("Authorization"); auth != "" { s.Authorizations = append(s.Authorizations, auth) } w.Header().Set("Connection", "close") w.Header().Set("Content-Type", "application/octet-stream") w.Header().Set("Content-Length", strconv.Itoa(len(s.archiveBytes))) _, err := w.Write(s.archiveBytes) if err != nil { panic(err) } } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/testing/package_test.go0000664000175000017500000000017112672604507025642 0ustar marcomarcopackage testing_test import ( "testing" gc "gopkg.in/check.v1" ) func TestPackage(t *testing.T) { gc.TestingT(t) } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/testing/testcharm.go0000664000175000017500000001433012677511231025200 0ustar marcomarco// Copyright 2012, 2013 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package testing // import "gopkg.in/juju/charmrepo.v2-unstable/testing" import ( "archive/zip" "bytes" "fmt" "io" "os" "path" "strings" "sync" "github.com/juju/testing/filetesting" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/yaml.v2" ) // Charm holds a charm for testing. It does not // have a representation on disk by default, but // can be written to disk using Archive and its ExpandTo // method. It implements the charm.Charm interface. // // All methods on Charm may be called concurrently. type Charm struct { meta *charm.Meta config *charm.Config actions *charm.Actions metrics *charm.Metrics revision int files filetesting.Entries makeArchiveOnce sync.Once archiveBytes []byte archive *charm.CharmArchive } // CharmSpec holds the specification for a charm. The fields // hold data in YAML format. type CharmSpec struct { // Meta holds the contents of metadata.yaml. Meta string // Config holds the contents of config.yaml. Config string // Actions holds the contents of actions.yaml. Actions string // Metrics holds the contents of metrics.yaml. Metrics string // Files holds any additional files that should be // added to the charm. If this is nil, a minimal set // of files will be added to ensure the charm is readable. Files []filetesting.Entry // Revision specifies the revision of the charm. Revision int } type file struct { path string data []byte perm os.FileMode } // NewCharm returns a charm following the given specification. func NewCharm(c *gc.C, spec CharmSpec) *Charm { return newCharm(spec) } // newCharm is the internal version of NewCharm that // doesn't take a *gc.C so it can be used in NewCharmWithMeta. func newCharm(spec CharmSpec) *Charm { ch := &Charm{ revision: spec.Revision, } var err error ch.meta, err = charm.ReadMeta(strings.NewReader(spec.Meta)) if err != nil { panic(err) } ch.files = append(ch.files, filetesting.File{ Path: "metadata.yaml", Data: spec.Meta, Perm: 0644, }) if spec.Config != "" { ch.config, err = charm.ReadConfig(strings.NewReader(spec.Config)) if err != nil { panic(err) } ch.files = append(ch.files, filetesting.File{ Path: "config.yaml", Data: spec.Config, Perm: 0644, }) } if spec.Actions != "" { ch.actions, err = charm.ReadActionsYaml(strings.NewReader(spec.Actions)) if err != nil { panic(err) } ch.files = append(ch.files, filetesting.File{ Path: "actions.yaml", Data: spec.Actions, Perm: 0644, }) } if spec.Metrics != "" { ch.metrics, err = charm.ReadMetrics(strings.NewReader(spec.Metrics)) if err != nil { panic(err) } ch.files = append(ch.files, filetesting.File{ Path: "metrics.yaml", Data: spec.Metrics, Perm: 0644, }) } if spec.Files == nil { ch.files = append(ch.files, filetesting.File{ Path: "hooks/install", Data: "#!/bin/sh\n", Perm: 0755, }, filetesting.File{ Path: "hooks/start", Data: "#!/bin/sh\n", Perm: 0755, }) } else { ch.files = append(ch.files, spec.Files...) // Check for duplicates. names := make(map[string]bool) for _, f := range ch.files { name := path.Clean(f.GetPath()) if names[name] { panic(fmt.Errorf("duplicate file entry %q", f.GetPath())) } names[name] = true } } return ch } // NewCharmMeta returns a charm with the given metadata. // It doesn't take a *gc.C, so it can be used at init time, // for example in table-driven tests. func NewCharmMeta(meta *charm.Meta) *Charm { if meta == nil { meta = new(charm.Meta) } metaYAML, err := yaml.Marshal(meta) if err != nil { panic(err) } return newCharm(CharmSpec{ Meta: string(metaYAML), }) } // Meta implements charm.Charm.Meta. func (ch *Charm) Meta() *charm.Meta { return ch.meta } // Config implements charm.Charm.Config. func (ch *Charm) Config() *charm.Config { if ch.config == nil { return &charm.Config{ Options: map[string]charm.Option{}, } } return ch.config } // Metrics implements charm.Charm.Metrics. func (ch *Charm) Metrics() *charm.Metrics { return ch.metrics } // Actions implements charm.Charm.Actions. func (ch *Charm) Actions() *charm.Actions { if ch.actions == nil { return &charm.Actions{} } return ch.actions } // Revision implements charm.Charm.Revision. func (ch *Charm) Revision() int { return ch.revision } // Archive returns a charm archive holding the charm. func (ch *Charm) Archive() *charm.CharmArchive { ch.makeArchiveOnce.Do(ch.makeArchive) return ch.archive } // ArchiveBytes returns the contents of the charm archive // holding the charm. func (ch *Charm) ArchiveBytes() []byte { ch.makeArchiveOnce.Do(ch.makeArchive) return ch.archiveBytes } // ArchiveTo implements ArchiveTo as implemented // by *charm.Dir, enabling the charm to be used in some APIs // that check for that method. func (c *Charm) ArchiveTo(w io.Writer) error { _, err := w.Write(c.ArchiveBytes()) return err } // Size returns the size of the charm's archive blob. func (c *Charm) Size() int64 { return int64(len(c.ArchiveBytes())) } func (ch *Charm) makeArchive() { var buf bytes.Buffer zw := zip.NewWriter(&buf) for _, f := range ch.files { addZipEntry(zw, f) } if err := zw.Close(); err != nil { panic(err) } // ReadCharmArchiveFromReader requires a ReaderAt, so make one. r := bytes.NewReader(buf.Bytes()) // Actually make the charm archive. archive, err := charm.ReadCharmArchiveFromReader(r, int64(buf.Len())) if err != nil { panic(err) } ch.archiveBytes = buf.Bytes() ch.archive = archive ch.archive.SetRevision(ch.revision) } func addZipEntry(zw *zip.Writer, f filetesting.Entry) { h := &zip.FileHeader{ Name: f.GetPath(), // Don't bother compressing - the contents are so small that // it will just slow things down for no particular benefit. Method: zip.Store, } contents := "" switch f := f.(type) { case filetesting.Dir: h.SetMode(os.ModeDir | 0755) case filetesting.File: h.SetMode(f.Perm) contents = f.Data case filetesting.Symlink: h.SetMode(os.ModeSymlink | 0777) contents = f.Link } w, err := zw.CreateHeader(h) if err != nil { panic(err) } if contents != "" { if _, err := w.Write([]byte(contents)); err != nil { panic(err) } } } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/charmstore_going_away_test.go0000664000175000017500000001102712677511231027143 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmrepo_test import ( "net/http" "net/http/httptest" "sort" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable" ) func (s *charmStoreSuite) TestURL(c *gc.C) { repo := charmrepo.NewCharmStore(charmrepo.NewCharmStoreParams{ URL: "https://1.2.3.4/charmstore", }) c.Assert(repo.URL(), gc.Equals, "https://1.2.3.4/charmstore") } func (s *charmStoreRepoSuite) TestLatest(c *gc.C) { // Add some charms to the charm store. s.addCharm(c, "~who/trusty/mysql-0", "mysql") s.addCharm(c, "~who/precise/wordpress-1", "wordpress") s.addCharm(c, "~dalek/trusty/riak-0", "riak") s.addCharm(c, "~dalek/trusty/riak-1", "riak") s.addCharm(c, "~dalek/trusty/riak-3", "riak") _, url := s.addCharm(c, "~who/utopic/varnish-0", "varnish") // Change permissions on one of the charms so that it is not readable by // anyone. err := s.client.Put("/"+url.Path()+"/meta/perm/read", []string{"dalek"}) c.Assert(err, jc.ErrorIsNil) // Calculate and store the expected hashes for the uploaded charms. mysqlHash := hashOfCharm(c, "mysql") wordpressHash := hashOfCharm(c, "wordpress") riakHash := hashOfCharm(c, "riak") // Define the tests to be run. tests := []struct { about string urls []*charm.URL revs []charmrepo.CharmRevision }{{ about: "no urls", }, { about: "charm not found", urls: []*charm.URL{charm.MustParseURL("cs:trusty/no-such-42")}, revs: []charmrepo.CharmRevision{{ Err: charmrepo.CharmNotFound("cs:trusty/no-such"), }}, }, { about: "resolve", urls: []*charm.URL{ charm.MustParseURL("cs:~who/trusty/mysql-42"), charm.MustParseURL("cs:~who/trusty/mysql-0"), charm.MustParseURL("cs:~who/trusty/mysql"), }, revs: []charmrepo.CharmRevision{{ Revision: 0, Sha256: mysqlHash, }, { Revision: 0, Sha256: mysqlHash, }, { Revision: 0, Sha256: mysqlHash, }}, }, { about: "multiple charms", urls: []*charm.URL{ charm.MustParseURL("cs:~who/precise/wordpress"), charm.MustParseURL("cs:~who/trusty/mysql-47"), charm.MustParseURL("cs:~dalek/trusty/no-such"), charm.MustParseURL("cs:~dalek/trusty/riak-0"), }, revs: []charmrepo.CharmRevision{{ Revision: 1, Sha256: wordpressHash, }, { Revision: 0, Sha256: mysqlHash, }, { Err: charmrepo.CharmNotFound("cs:~dalek/trusty/no-such"), }, { Revision: 3, Sha256: riakHash, }}, }, { about: "unauthorized", urls: []*charm.URL{ charm.MustParseURL("cs:~who/precise/wordpress"), url, }, revs: []charmrepo.CharmRevision{{ Revision: 1, Sha256: wordpressHash, }, { Err: charmrepo.CharmNotFound("cs:~who/utopic/varnish"), }}, }} // Run the tests. for i, test := range tests { c.Logf("test %d: %s", i, test.about) revs, err := s.repo.Latest(test.urls...) c.Assert(err, jc.ErrorIsNil) c.Assert(revs, jc.DeepEquals, test.revs) } } func (s *charmStoreRepoSuite) TestGetWithTestMode(c *gc.C) { _, url := s.addCharm(c, "~who/precise/wordpress-42", "wordpress") // Use a repo with test mode enabled to download a charm a couple of // times, and check the downloads count is not increased. repo := s.repo.WithTestMode() _, err := repo.Get(url) c.Assert(err, jc.ErrorIsNil) _, err = repo.Get(url) c.Assert(err, jc.ErrorIsNil) s.checkCharmDownloads(c, url, 0) } func (s *charmStoreRepoSuite) TestGetWithJujuAttrs(c *gc.C) { _, url := s.addCharm(c, "trusty/riak-0", "riak") // Set up a proxy server that stores the request header. var header http.Header srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { header = r.Header s.handler.ServeHTTP(w, r) })) defer srv.Close() repo := charmrepo.NewCharmStore(charmrepo.NewCharmStoreParams{ URL: srv.URL, }) // Make a first request without Juju attrs. _, err := repo.Get(url) c.Assert(err, jc.ErrorIsNil) c.Assert(header.Get(charmrepo.JujuMetadataHTTPHeader), gc.Equals, "") // Make a second request after setting Juju attrs. repo = repo.WithJujuAttrs(map[string]string{ "k1": "v1", "k2": "v2", }) _, err = repo.Get(url) c.Assert(err, jc.ErrorIsNil) values := header[http.CanonicalHeaderKey(charmrepo.JujuMetadataHTTPHeader)] sort.Strings(values) c.Assert(values, jc.DeepEquals, []string{"k1=v1", "k2=v2"}) // Make a third request after restoring empty attrs. repo = repo.WithJujuAttrs(nil) _, err = repo.Get(url) c.Assert(err, jc.ErrorIsNil) c.Assert(header.Get(charmrepo.JujuMetadataHTTPHeader), gc.Equals, "") } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/LICENCE0000664000175000017500000002150112672604507022171 0ustar marcomarcoAll files in this repository are licensed as follows. If you contribute to this repository, it is assumed that you license your contribution under the same license unless you state otherwise. All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. This software is licensed under the LGPLv3, included below. As a special exception to the GNU Lesser General Public License version 3 ("LGPL3"), the copyright holders of this Library give you permission to convey to a third party a Combined Work that links statically or dynamically to this Library without providing any Minimal Corresponding Source or Minimal Application Code as set out in 4d or providing the installation information set out in section 4e, provided that you comply with the other provisions of LGPL3 and provided that you meet, for the Application the terms and conditions of the license(s) which apply to the Application. Except as stated in this special exception, the provisions of LGPL3 will continue to comply in full to this Library. If you modify this Library, you may apply this exception to your version of this Library, but you are not obliged to do so. If you do not wish to do so, delete this exception statement from your version. This exception does not (and cannot) modify any license terms which apply to the Application, with which you must still comply. GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/package_test.go0000664000175000017500000000037412672604507024172 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmrepo_test import ( "testing" jujutesting "github.com/juju/testing" ) func TestPackage(t *testing.T) { jujutesting.MgoTestPackage(t, nil) } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/migratebundle/0000775000175000017500000000000012672604507024027 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/migratebundle/allcharms.json.gz0000664000175000017500000027052512672604507027322 0ustar marcomarco‹ nˆÿ콉rÛH¶6ø*9Š{CR]‚¢dK¶u£b®¼”ín/jKUÕ5Í5H‚$l`a‘D÷ôÄÿóó`ÿ“ÌùÎÉL$@R„l¹ÊîÂÿï²ÀÜóäÙ—n ³ãy Ã,Øóçþpxû[ÇÿÜzä>þûÆŸ[Ç[úÇ­ÎÖY1›ù邾ð7õâüüTéeªõ¢¯üI@MŸÙ0 çy˜ÄÔüœÚê.gÉ8¿òÓ@ýñÈGƒíLM?Ra¦òD Š0)_eÁ°HƒŽ ÆãpqÞýx¤‚ë<ˆ³péÙ3™ÝÏT–Óï~:ʼa2›G¡ç*™±Ê’"ý8ÓSwÖ“YåjJ=£$ž¨A@Msú!.f1‰u Ì ÿØ_ÆyÆAÞUêe®ÆŸÓ*iòb>ORú¤¼®³Žº Ó¼ mM“,ãIG=yþ²£ÎÎ^vh”ÓË#ÚšŸ-”>'Z¶G'âü,P!Í3Iù|:´Ö_‹ Ë÷h¦yg´ŒqÑ:xØ™/Ô8 ®ùPü‚–çá»ÒèÓ`d|–Ðæû1ßä IGaìçtÃc?Ê‚ÎÖiš\†£ ÃÕGÉмY‡y’f0Ô~èl½K"|ŸK甾ð ýáÊæo$üÈÎú*œ…ùÖqÖ4¤Û¢>Ã$Îý0¦±þÕÙŠÓyàáÎÓùýËYÎÊŸ7-jM§Û.€# q~v5æË¦Ló|ÞlÂI” ¨Á¿hºwa*×CßüxX9 ûÉNžJ‡[N¾¿u †¹ å§ÁØ'’Ä€…sÂ%ŽÂ c_Ìhµ‘<þ›oêU˜ñBu̥ǨÌ@ É‹ Ô" ãQp¹‚ø³§’!*3ñÈ×yê_hîà#s?eú1wÊH‹"® ;é2\é0$RûܹîV&#æ" #u¦$„ÉüxiÊ âG~- ¿EÏ„¡û—_~yýúéS¾„ÀaAYHÜÖBù£?-½Ôê®ó´*K‘ÿl†^D‡ö|À‰“ȧø!Eà§Ñâÿ¬Ü27ÇUÓ ¼˜ù×Üc¶á€OF#`‹¡;ÔAïþÃrÍ€es´)ñ§Ÿ0`¯-›>̧)à§,íÁ¡Šnáó†:0C Œ„BÝz˜ýsVŒà Eú„õèadGŸºœ£û•a>çÖÊ¡¸ß&p½JÒx¦©"ÑŠ^㇠tʯÌÖú“0Ɉ Ãà9¦þ¬j2à¶Q@F‰s?ãÿ²ìB¬­šGñfª¼»„ Íüôœªaͬ„Fj°°sÔÙg’¬@jœÎˆP2uòsÁŽ„½é\H&ÌG0*'ä©bÚ>Ñä9 3ÉGÚˆ,¶K¢#‹W¶“iÈí„rÓ Éùóa‘Vôï‹÷…7§¶¢Á¿F^D°%®`FçΉâ1ÉnI<ê …çSž*Ã4íp0  3úbLˆ%WÆzKD=…†Óù•-gu:eá:4źªÜ 4–5y>ó؉¥˜æÔù\Y¬0X²~5š˜X¡³«èžÇE„&¡Ë»³¦©.1kà“ú[Ó$"F®¾!Ý {$p‘…“˜eäÍ»:ƒÄæ Ã:Ñ˲!ŠÓ>Õ[ºË·cúdzׄç+¿9ó„ètƒcÜ8áÄÓ”gJ½ãpFþA0¨^ûïñ’™GÕuœ•ëˆYxؼ´… ÁÐ} ˆîâíÃ.H‚ÎÔ¼DáÐ#RK`œ-½ê,‹.ˆiðžqÛG÷ Ї ž¢æ€Ñ7Cu@½1æ™? Â[#µ}öìÕg/Ÿ¿yöt»˜k4qú1tP yà0'$)OMùÄ eA4ö$(ÔÀÇÝT²ÚM›á³Ã¼¦ ?Š,ª¬‹"á¥=;=Ñ”žJ’.ºŒâš›"¦ýðºiqP•(õ˜Î‰ÑK2–Ç3£Àî '@²H¦¹C·©ŠJ“„·«½ îÑ¢öð™zâÇýX«Kâ÷E,I‰sôa”(Ÿ±ä<†ãE?¶óÐŒ0T¤Zó£azÝO‰k¼5 p/÷\åDZìbCÜ2¯1ä;W%ètäÍ ÊA¯‡L÷™€ Q/]¯vå½öc÷bÕº{µã”·Ûk×»f?ô0îàmâ}}©§¡ïàáÑ(|@¿ù£æYñææix)Hmõ«[~thý›Ã¯·{r$ǃ è[š“B–ÏNVPCÚãEªp›E¾'¢CÑŠ=:é©&ož>ªABÇã‰÷ÉøÎƒ4%rÅbçeè«Ôjúì2XíEë`’&\n¬ØHðÓæî«L7µS}»Ëœm9YöÅgûw+Ì(‡­¥5£´f”ÖŒòÇ4£0õ†Öþ ç­|Þ4¯ K¯Ö§µæ´ÖœÖšÓZsZkNkÍi­9­5§µæ´Öœ¯Îši9d‚"RËÆûx^Œ3×ZòHÌ·4ÐË§Ê Ä¡&™.CkKjmI­-©µ%µ¶¤Ö–ÔÚ’Z[RkKú&mICÿC0ŸÎW’Ì/®!é };}qzÌ0—úó²Ïe%s°½Šš«1ðÊ1uPP¶2\' 3Mí… *à+¸J>  ÎÐU?ÖZ6¨ÖèÄ€ŒÓd&œ! mpÛa–ÃÁpóÏWþÂð(c>¢~œYì @õÙ4'f”Ä‚š,4rC¹êq"Ì;¾ÒK‡˜³íǯzÒQa^ãï?ƒfdH;LfáGABòÔ W(»à1xã„0œ­÷ œW–@š¸§GÇIˆœøqøÑ-2„©b˜³eŽOh È'£á!tûUŽGβSÖ!TÌq¼ ÈÐC–kÉHtÀ3cMänB¿E°Ô꣟¡Ç…<„ò ‚ä*H‰Qïªøì„0%ñeÀ¨,ÓCAº¢kò£+h¬>ÄÔ0¸ö‡9ÝÕ”ä#ÅRšpøè1•‘ÉôÞ ¥Ød5û-BæÜ¹í§M†‚Ù‚dÑßÎR`n½¡©@KSÜ¥¡úã¥të/t!Ejê(DO"Ÿk´¶ziîÄüÿëФFìÑt½bL欀<›ôHVïUe¸ƒîa÷`VÍ2‚ÎÔ÷®Ä¬ú× nµ»E,mR"ÌÎÈоd±:/ÓB3(Þ¯ÜÑvætù^žxøï®;(›ü˜¦šé×L<™ž|QuÎfϪl¯yXîÉÜâuu¶ÞÏ®ùðכꫵqWpŒcf‰Õý9îß_âÍz`‹&OœZšÑPnž1‘6@%‚@X°Á5L«j QZ„1±Ø–ð)¨0ªŸÙðJ±zð(Ûh”MÃ1’÷pÆ{´Ï†^l½fÊØ7Ž.¡džÈÊS£$ÞγÃ"ÈÿÚq/õÙr–Ð`Of&XÔ õéC­oíA¯Ç[KfôŒÑÂ˧iRL¦óæt:´$Ï!^•½ü6 /r ³¨r@¾±zýx/ †»$ÿ§I&’š–e„¿Â±X¢ ¤‚‚^ ÌŠèˆß€91§^h1A¢f¸¤•0"F±yžÎý*î°èCEuŒrDÈuÞ?ÂÿÞ;`€LËÕ$ÇcRÈ8²(ô`}u*‰NVgË®‚²¥2pÏxpd|0t€¬“ŠØÖÄ5”Å2I€xL×À"hyÔðJ p`;=YŒhÔÅþlÞ¦ïp3c7Çkš¯ÒÄÒë^F8ˆÕK8Ç;{ýª±£(E«kÄ,j¨­¡°¡÷l|¥¶Lé[7úÏÁN³æ,òÓ hAC‹V3'ã´”Èr¬Úè4ðçÄÞl@öÎç£-fþݰ!÷¤Ã-¸öYÿ´ÿ …Ãýƒ×]õ6&XUúX9l†ú^3áµeí?×òeZø§×uåé%"¸ÿèÑ'øQøwáÑïÐ)¢ ïÌ'VŠ;pŠÐg N?\Ð)æóag9jHBO`ï4œLØgTÆU¸LZ&!¾êõ–§¿òÓø.§ÇxtÀÕ‰òÄÁ•G®áã9sŸ ÄZmçP{@½=Û¿êÆ—R̼%‡UñÆ:,Js55†lŽÕ”œ*Éÿꪗb”•ú)1Õ©Ñ׌— PmY‡Gì¿Bl¦mÊ¢¸r«íªéèf²7šæÝwôÏdvêLŒu¤ø‡^4ó±Ô¬C!øþ²Ì_<¿¿0E“ çìMx*ãŽ1 ¯dpF hÆ­x£ÄH‰¸5;e´l¾£v«™@†oF„d°ð ‹i&Fýròúáî9Ž€=„û[ÿ,·¸×;¦GðàÑaï~ïèÑÁþ½ôƒ{G‡î?::¼ïþÃú;N¯}êE]>èIýQà±³Û‘Èv¬“(íùGç牱j«x˜^ÎyZlèeŽ /DôZôZG÷yÙ¢É3_dY?®[é5ÇF{ÕÓZ\àÀzÊ;‹6Kºï7êðT¿žKºË¥ž+´›gAàêd!3Ó-\wIÚØ%Ãlo¿ÛÛ«èÑ÷ª[©¿u·à'á‚Kåàz¾¶ÅüïhãY}Q}_™‚]Ô„ä§×Ë;÷·2¼QEÜ—ZÌoHÈ£oÖEj8²–3€qžöh€½ËÑßÞÇ¿o•; æìLýÈÓï·ˆýÀ&Çi‡FbxÒ–þE©Êûg½\ôßþãO?þéNj߼<¿xsòúÙß±` H;t8µ-r÷A@ø®òÔòïò›¸B ôsˆøz^±ª1}?D­ûãš# "_½t¿€«¢ðlÐטÃÔγÇg»æV;Ü?Ÿ™Å„Q8 =“ã~E/.›ÙƒïrÇ_Þþ¸ýÓ3õøÙ³7êç“wož=]­Ò-—}œƒ%2û½wPþrdˆJå@±#¾]‚þV èD+ÄÌ$œ8wo|¦3¾l–örºÔ0ßFH\Màp¦¦É«F`¤òØ£… eIt ø²¬¾Þâý¨µx·ïÖâÝZ¼[‹wkñn-Þ­Å»µx·ïÖâÝZ¼[‹÷ofñn Þ­Áûó Þ!±½Ð¦Q„Uø$Z£3M”íÈkZÖ]»kbEÛ:%ŽšU`]f‹3ÍËLÂ'ý§É°Àňºq½à^v…WFK•]tóëšµvk8þµ5ì·†ýÖ°ßö[Ãþ7aاÁö)Røü¤ÈˆÜV2 ‡{!F›8—8à f•IöÍ’­ÁÜa&⺠?…åvèÃtQÂ3Ž–Ã;Án" 5f…ÙË·r?åZ$Ú¼ l’äô á äë jà‡©‰GCøk„Z HRâç&L.&,ŠÀP"?q 2#øb½` E«t>ÄÃCEIÔA+ü ¸r‘ÿt ìê ×ã}b ‡,[âé•™á\!«öYðmý0Z?ŒÖ£õÃhý0Z?ŒÖ£õÃhý0~?Œ ŒÎ´Ê £üÑõÃ@þ?dzƳÀÏ8Aþ0‰"ÍCŠí`™¦Ø±°Ö÷ÔXùáL§Û"űH$ÛÛxd3kÅ úÎDxÆ+ÉQŠÐYW«X™™i †1MT-yz`ÉvÄšöàP/qÒó4ÄfUu].Z9ç>Ô¹üy/!RôÂÁÕÊd+¥ùZçq¡«CýæƒËMáz%!5ÖA¶Ñ%esG9ÇVŽi†É¦X—L» ˆHæó òE?fSEœx¥Y3ÿ”òÚ='V„ýq£#‰ FŸžÅŸý:wÂnJ~’úƒA˜Ï~½[Fg‹vA¤/_¬8€¥Ÿ6­áC@/‚¤üÛ®owäÎäå·i_’x’p»ß"ñË,̆ADiHÚ˜Ñ, Ì(Mx=ãäbŠ›«²HÁ_b§÷h…“&Iß•žœÕE¤g×2¡^KÆwžŒÍŸœ™ˆ†#–ŽÝɵ£Ebb^çsÿšSIá´G‚#€ÏF‘F:mL‹kº ênƱ0=‰’b¤NRø:0ªÄ;9'w:[ÓÔŒ+&ÉàX ñ³Á÷-™ÖDó­ú¼' }ÖYÙi¾®3HP’q6¥~\ZQyL+2Yë. Š:y7 %{P3çÍ"a{I^*óLòYAù’ ÝjFMý—~¬¥ˆ%o¾!.@ÁXÁƒ ¦QVâÔ(Ómf0ÉG+2FH¨š÷ÄWÎÊ Ø$·~ý—J¡™OXCù±Vãmž¼ÍítWÌÈ€¦ /ŒÊ²ÃJ^k{â+ÀÙq½ :Öî:©TdbðÂPƒ‡ª{’@¦’!Ø]Zç–Ôš%Yjw8¤ƒ$k"cjB¨;¬'…ëÙ}Yä™~ÝÄeýÏêØØKP‡]…ûKÓƒ|9q@Ý¥< îN Pw(¨» TM"è"Sd˜éû,‡¼1ks %¼÷8¹ôõ®hµ,)®@ ºWŒæU%Sã;^ûe£ÛøRûÏðÿ Éh “Þhe+…ƒrÎì·Mó²ížWÒlÚêü.BËÑ·ýZŽ~™Êͧ+)}v©Ù‹p2·8ô#‘+—ŒV®ùt9€Èµî»*›‹u(!6aIs›ƒ—ú5^û•FV”ƒŒBFa¾îMÖÇ*pØ{ºÊ Þ¤¨™O=ÛöV1?I6ræÃ_&“†·š‰!É&WÎlæK£ËÆŸª‰ªÍD Òk™±°tCÀÀà‰úÛ@îv¥ú8Á"©ÍXõgÆ¢53×8ò/uõ‚èÿýÄ&X V°ùk[^%&¼ Õ9¬Ùæ/Ö{ÖŸÐtg‚²\MŒ¿%×SeÆïOsI¡$üàuI"zÁ7uÌÁ+a×Ukà)âY­LÚ:ë(cgã2—Ù:8l³Hy0‹„è°õ†®±< ÒûÓ¹íÍâ\NëÔŠ±ŒSåØ*",]bhѤʮ\Û¾P(°v¶Èƒ¯Þž=µ‹’ötð5÷Vš‰˜ß “ú:E ÜŸ©& ú¨C+ŒQ¾‰›b nHÔlP8Κx UYX„¬v³eØ”WaŠJJ0÷?Š"ýÃád+% ˆÒše£´¤Ï,Aéyk¢ÕpÔ~)Ø:Do%`ì×è%Vo²‘ÔK²µzù”™ÖR¡?‡?ìÂøk›rÃV2öÓmš–mŠWwÈüMRä(,á+Ÿøõéܧ»‚½rNOO¶ë.pzñZ’ñš DµB.JlŸ®üP—ª‘ÁEj¡7"oU‚he s«ÚËSWdYG‹dÉhTÖ3–0TXoš‹z+PÎy.Ë)ýÙ­ÚŒ]ßä=g‡Y@kjðÈÏË2RÚZk…ÒB¹>É&ÀAΊ™€¬£Da¨=X+Ù˜‹¢Xcê§i4BÉ›·ç¥µM»ÛÙ¨<]={S?Ê>õÖqå\{Ç%ñ»Y9Žî÷ž­é¸$…UÎ]«ªô±oÝÖ¶ƒZq’*B»¸ l¾—Í<€õ–˜%}Ží*IQÜW¤÷G”6_é=%?Têañ'×LA|§±± ã’—ò¸MÊÍéôV*Ùµ%¥™ŽˆGö´ãÙ möÊß7ªXÌž¿ 7£(¬*7éì›(ÉVN7uÁcêoœjêß*«Œ£¦ÿ:<˜ÂÁëªT¾oš}Â¥ïo;÷|‚Ìç)ûiÓŒÜð¶~¦»ªtyÒþÖ3 bð„ˆÇvç¯ý²¶åÑ—Í?Õ8ôÉù”dÓ›rN}¦Ž´jˆj¦$‡^Äqˆnþ["øŸ$hî vv ©ø2<|𡈋¬…ïf‚ÍÀñ¦g_ kåÒª—W?½6+“4)ælÊÝˈ¾šf¾¬´å‹]½”ÀÁÓ±ÙœÃËh;Á¤Ã|øÞ»ÇOn¬°¿«‹BáÈÚþÚÿêf`OÊQ§2çÒƒàd4k!@²´5âÒŸ6ïCÿÉY ) ß“¨ƒj¡x»ôC±ý̧‹Œ•™& AOV;¼ ÿ’‰Ün,ÈþdÊß5R¥9J¦Q8̹Ö0‡ÙèW=b&üÞ å.gþ‡@óáÄ&Øu"”ø¢@b«M€Ÿ>§t0"ˆKH8{ Ñ"4AX~v«â¢}KµÄr•/5"Èànj”Ä‹ËýØL¢¦>¢ÐHzÓzúŽ$Ö©ú?•@5õ/ÙŠ¸7üåSÄN:jl þk#VuæÇE%—GCsJWy\×HˆéQGãÈŸ4Ðp­‹…"šø½ìEÆS<žÖa²S~¬§‹«\—Ü<€7/â©­»Y÷ÿ«9 ûåŒýN޶Øç†™îØwWÛm {ÐàŠÀù‹Z½eùM®Ö rÑy¢UMšFŒ`\/>ëð¶&ìLXta¹täÊxöà ‹2h9RhqF\\s žóÈL/ %»0{ƱŒ)ù„t¼ ºÚÕÙ‘²òÆ?͈ÉOÞþÌ–~ÉÕ5V̇™Èd׿dâz©ï徨ö \ùéB$ò塤Uá¸å@ìÖµ'ÌûzºšPýF€× 0¶ôÐð%{qb±jc“€ªÌ²%ža|RâU5 $zQë»h«~ÓA>í ÏîÍç ¨¾Y4ëËЇ ½Áæu¶IìP.ÖfµèÇå¢tšêÒï÷ö[÷¯À#¦–þ’òTZWƒãõ©¹ï‹®_Ü oZ¡›R[:º¸ôMu¯|mеlŒæZª ~ÁzmŒßJÚo;¢À¹Ó³ràë=éáX ÊOš°4{ú¹jPÍàáèìH56P‹@~Pd†Ä´;&6l¾BÔ ›ep ¥+Œµ;BeñLg~Ó(K*Û€6à“#„3h½ÿ[ïÿÆÞÿ4fØ ú'_^žš )?%Äk¸iÀ2ÜÔ¯š)V\Mx1 GéF~8ŸùÙ‡L‘Ñj—‹ªÈo÷õ ùÈ7š´:&dH‚yäkvõT–Ô*pÖ4O6[ÑL”úßHÓš†[ψ$C+ß³®t7)Qno8žÌVÙ$ìï≟¤77âH››Ð?’è¦&¥4»¾MIŠnhtEb¸²A1œ²L{°ÊÚb~­\ÌWÐguc]¼¯6PÝÞ’{/‰:g xmd@©7ÉÙ_^õc+$ v&YS«r8/¡Û\³ˆdâºû×NýŒ:ú€µ–½£þ÷ÿúÿvyõ?ù)ë`–¡æ$Íd2ú‹óóS ôY¾¾¡ è`9'.}Ô+9‰¸‰=<Î{Ë55$1­=^:>búsÉsIð¢YGeXG=I"UöÒš‹´…ÜÓ3_l¡öÿâ3“ïÚ&&ŒŸ†lø±R°ƒÏƒ ÎiÂ&¬ç³ ÈñzçÍ)ÍŒ³ù™uoƒ)Ôè1J ã´Q›Åp¸àvÞ@íº"uŽ^`üÛcóJódΡüJ— —ߥ<ƒ±v‡£Á¤±,»2_Ëq[#¯z—åy~ßßÚïué˜‰Ü «º] Ïévt(ä$ ”›87/¹ü±ŸÿÞíªÕæá›EÝ{­¨ÛŠº­¨ÛŠº­¨ÛŠºß´¨û·-vÿoɦáw)öò4­Ðû[ ½[[­ÈÛŠ¼­Èû;‹¼ü [·x¿r÷°x[·x[·x[·x[·xWJ“G‡û$>ŽõFãƒý wïþà <òöèWÿh8ê´­ÀÛ ¼­Àûí¼ûÝýn¯•wÿ=åÝ ábž½û+$]ý[EÆ}.ßÜ"0Ùê#ÓR’¸­ª5àŽ!aDx='H2/yÙ̦7´™ùˆž§áhcapd6΂Ž-á"aíJ¯]2þ9Ë$ ]Íø°‰S’Ðê¦@ß›XF=ëm¹Ó«`é¸t=›ù²iB ëÏ`Mÿ¹åžè2Éçå7ìze*0yFΤúÚ®¶Ã/·³O,;ÓÔ‘sD€xðú $&Søj©žPÊ—Þݘ‘HT‰Ô—ÐiädO¡Hƒa¨‘¤Þ`UTE’ÅÃ5äk”&ÄÂQbõ5ÔÔhѽ£¢k&ôÒ›ñhoœŠö»£·„<ëu;Ë4w€‡®o+`Ä[ªE8õGÐ.ñ—_ʰu›´ø·È‰¯“ºÔ’â«õ9ñ¹n´>)þo¥V¦§­%¨-tŠÚ2I­¤±9jÍ_*=­d)YÊO«lzÚ¥ƒ[GW´tµ¥«-]méjKW[ºÚÒÕ;£«À ^o=q5h±y ×e üÞuéõõI”DX/Nç µ¹z$ûés1l,Tzž¾Kº[£„eýÎrr·¦çWVý³)¹h‘Z‹ÔJ¤¶ß"µ©µH­Ejß:R“ú+™)lQõœ“´›/‘›™“ofÆôñ4ˆB¶qœ­©ƒÝéÒ¦¨¾~¯@8’î™­‚œçÆHG¥íD#~¼sê#•”½+†YÓwdV`ŒÍ;ç¨`œîrݱ¬LÍ®4_OíLIôzƒbñ¬ áÑtj ¯’Ð^²Ùw tH%y>˜íL]…ãÜLÕ‘ê—þ6ÄBNdçfMÞ¤9¹e‰’5èoc‰’¶ìͨìÀ°Ç0ìÌ_ù¼iî 0îqØÛNÿ‡©|óùåftÿ¯¬ÜÌ×WOD]s«ŸÏž¿”x9Gôã›—íª—9½^Ÿkï Zùö¯ÇE,L¤4g¾ã]1XP[3¾V@IAz» Áõx¼j&þˆ9n)'é7u©ú®ˆÝ§´iÌU⿊£Àfl×Q—DC("Ò\„`‘±¦@¬(c¿èJ9Jƒ}~ñ*›„àTD'ÆR‘¡ãÚøb¦øŽ›:i€f ÖÃoÌCòÝ&B 3õÀ0hÑw)ÒqhÒE|ìP Tõ¢v•H؈‡:%®{Žý¶IGËŠ ,~ å¹ÈÅÍÄá ¾¼®ó´½~¥ë©KHØ…£üO@f™}ø|Ý&lRI-½Þ¾ý¾¿5ðÓþ–z|òÁ]¨{ý­á RÈõ¢ â#„ç³›} «©Ú— H›G"8 A~ù;ˆÍª,Üä{Ð@Š9e»¬NÁ¢E™ U«Ui{á 3g$qAtÚ$$ç2ZÓ«Àé/ç/Þ¾9=9a 7,,næBXÄ ¢9’7gÝ×Qw¦LGþpÐkžlžEƒìmçán]õ4Aý5:M}ü¹TúµHò@l}iRÐÿRh@òlÍš ©²«µ<¨ÊΊ™£Ù!Þ—Jz›q U"+³ôî?4ó4|Mg\§·,}ÊÝŒD/ç_\û@ÈÇÊóD·°rc k眦£_(KE ®ŠYQ©Ä«NÀ“†#]`‡þ±ã‹‘#ÎwMŠeËÕÕª±+ûó vˆ §þgj¨¸¡‚/˜í£Ö­k„x`ºÇ‹æð»p1\2ƒ7ƒ€%:m®šPQsâ~‹9ññ5NsËÛr×ÁÎÍBò'!rB™\bF=KSŸÀ8/êËöV3Áä…ó²OaÓ™ Œ   ‚±äAàýÖ‘UÏ,£™§á¹´bŧN×Üé½ÊØ4û¬,c‹†ôˆy~ŽYz0‚¿%Äy *põÓ)ÀÅ6;V$L+æx ™cxšqú‚XŠ—1h®ؘمAÓ—ÐS3’2ÃN C¹Òü¬¢²å·è³’5CÁ£k±¤`LmÏ–ÐV¯ÜS#õÅJl,á ÖUÐu|:9.Æb—4œ»eœ©¡«ž°vùX!óBGÑ“‹ó(È;jÂÿ„2ýQRY Ú.­¢ÄàŸôŒˆQ¦ÁÅà ÄbW½‰ý^¯W³ÉDå5üÌ>0@G ?æ w –8wÌ;Ãy±«þKí¯|°ü?ÍQeìxܲàâp4µk?»^¼zØJWw+]µò oªZñ Zñ Zñ Zñ ¾IñàQ+´Æ—?¼0Ó_Z骕®Z骕®Z骕®Z骕®Zéê3¤«)ÝA2÷öW%¬ß*¢Õ™öŒ.Ó^b[zïØñ%üñýöÉV}á/x`×½4"?¨9F“2ù¸:×~Ìà–æüW- Yä¡I~µܾõ…27½O9³@n””óqã´þüæ­•³bêåæÓ¦GãìvÓÕb*qµ¹í§MòØÍs¯I…òVì¸þ˘\h'=›Ó/ÿöù[†/÷ëÆHÀMvw ¬>% <ÉÅ ":{‘…7q¬çN>(î¤Ð‰ÑW\iŽÀ„Ü€ªÝýx—â—1"Ý2“æ:îÇÄ‚c‘\ªëH Ïü+£vwòPÔœÀì÷><‚{3vbsŽ×Ã=øoE‰dŒ ‰øšñ„mdâ_R„˜7Iuq+_<ýá¬ì7æ¡àäå–Ø’¸¾èLæˆ[;•âXH}¾Ì¶©›€*luâѶˆ=ö¥qÊp©Pª…¢UsŽ61ÔÎõˆHcøó[ðâh.È–¦zý˜ãõ"ÂÀˆÑáœ@~0ã:IV¦)cœE•r5—0GøØŸHQ‰º6“p2£= ì«r°à °qAŒÅRºÖD±¹E â-ý+¤ßø`áÕ·•¦tе~:&VgIùNº¾I$¹ÿü1à‹wíqš@ERÆ9ç˜8ÈûáÞg†(‡y ½;ðÜçX5‹L éø§dp.ÄüX<,{l×"ŸÎ‰êÛ†òU™?H.ƒs ïêuÒ²c–ŸêzI—°c@w?XôãW>É}Ïhýdä·ƒK˜-ˆqÇ“ãÚK.öóEû*x5sc¨ß©œ;ïÇBžcœ‰ŒíK(ËêQ«Y:¤Œ`í‚X\ˆl·brškÆÔw€É3‰µF—¶§’èì¤JÙP+J)áKêËe:n;4X¨@³EfævÊçÂ4cy©¦š³`â9V^_:jÂY©øUÊžåÔþëÇÆ”§m’„‚Œ²ƒËÛéYìbH‹]¼'Üz‘Ìó:&®‡¦–÷Ê9”P0šâñJú!j#êéIÏIÔéÇDÝiþŒõO%ƒ@4—rbÇê0n8úRs ï+ˆ1N¾|ÊLóB³EÉP~Dà'úöÿlËØ:ðXg¶¨X:‚ ATú± èœ ML§Ä³3d€EU¶ÌylÕ^>› õJ€ó$#Csé~„“PnµC//ƒºe›‡·i¥&Á—÷×Ù5ÈLyz1Ç´ï¯XÎðSí™SéN†–nTù«Û•îòt‹b¢ÉòQ&cˆs²j³`ž®¡Õš‘«[ãÁ°ôƒ^oVÊúï“Á…‹»gøKj«Å™’ÆpI<Áï[êRçÈÜ+SMì;o#eRr1ç|_AD‹Ÿ‡A9Ј®ŽLwgcPZèΜeƒA™Ï¢y;Ù”pFìªùÔÏê,ï!¬Ÿa“éó2˜]—ß; 'Æ|[eè|T>DB<#ƒš9XQ-yðÃZh-Çe5bEILb´øˆ­U…q-2êJ‚çàó%]¤”Œèµ Q̸]YdV¶àQ©#àqBl:r[$=è‘NÆãÔ' V QÊT3ôÜLΊ@ò¹¯¦¦>+¤ñ•ÈV¾àaO¦6gØ¥0UåàØôLûs^¿wœ‡Èägî]¨´U¾b2{˜ 1¦ÐÖ3æSËÏ׉M3¾J†e^;º²œ±ÈIv ì´e£GÁå1ÒW ¿KÆ"‡ñ¶ƒ€1•\ƒæÿ%Eë³%Íà 'ã]Ã%ßѸz8›Àæ{GcËp%ƒn;Û„IDwSÈÎ(©#™AeI©d ÓG\S ¦Q¦ÝzU:L&¢tp¾&)ªKôòjåªë¨WVYS3^ _Ä…ÆšŸbD1—AÞ`] ª1²F»¦^K„7µ9V’åå^ªœß¿¸LÂJÔÍØËX'$ë ‹S´–}–Ä;Œ òx=3Ç8ò'¬ ¶rÞ‡Ÿæ6% W>²u–šû­¥æ7·Ôü¦6.äÊN´®ö¾þË—0ý¾6›j©ÅãµÅÍož¸iYÛ¯Ïl„q—ïÝýú%¬6¿»¹ª555íß»°ÿàÁÁÃßÕnôon4"a:ÍŸ_°~àÒ6œèÓRÏSž…D™A\ŸªZ›?°µªºo{òÄ÷ ¶òçÜ€bÍMÌðB}ýøeîÖAúѰˆD³ (\w‰Çc/9IqÊ(Àžén/6ÍßÐhôýØÏ´.êXí÷º÷zДð„ý^¨kBã¬k²+íêµ{fÅß~wÔûÞJ»U¢öÿE³T‚†ô½Ôvo·½žøAá2œ4·ÝEN?‘oý!+u–ÒÝ 7¨õBež¶«)òrZo=¼^I2+£,k®)¬Œ§M[Îp¡IRËȤx­•ô7±’Yº[_¤å—=ãiØK[cik,m¥­±ô+6–¶öºÖ^×Úën²×ÁVgãr/š•(}v ¾ÛÓäÖ2õ/'ïÞt¿#âôV½y{®ž¼8yóü™úî»Úy.ü4Þª,EÿGÛñ¬ñë2"–ËÅA_ÜÊ#ã50»qÄ`„ÑnwㇱfÅÝ2^ o]0êPá–PÖ¹`²R ÖÛ\¸Šªu£çÀ ES0“wIÈ=+upc:`h55Äh85Ù²R¯Òqˆ nöÔ©˜ü˜Ï3QÛìB=R*eF¡.Nê‚1ßÕ †¹l*ËÊþÌ® TÑ´dØÛÒvfﺦÀj…ù¢Ãe9ô’gØ«†¡~,wæK¶–/u1G„˧ŸJ°—+wÍågú±¹æ¯Ó¿¤þÂ^%m$p üõºt´‘Àm$p üÍÚ¸ÛHàָݷ[ãvkÜþ¦ŒÛm$p Üz´ž_©gA ÜÚ£[{tkníÑt{ô²e”Ëú¯4ŒÊ/®etÌvG$]×¶+…üʨó.l3D/FHܵv'§ø.¸õüÉéï¸Ò§T(`[!âf¸ÎDæšib54}ÜšêjÐù9k6Ê„¿Ð˜fœŒŸíÐLXåÀ‡Iò!¿ZÝJ¬V&á~GMker‡JðÈâØÔ•¿¹¡H3Y¯Y#Ì©ãý¹,6ÔIëõ«Ë„QäÊòо…Qœ…aè5ä'Þddâ¨OŒ2çk?l²Ä,5o`”qJV»Aô7v-"ü÷F“ZyÚbq;³S:¼àš†Š±pÚ×µêçM«YÓé¶GA¨! s×>c¾lZPÓgÄô‹²/Û|;Ÿ7Yi6,âFÍ?·æAåøÏrÛÁòl‚l<ýÓífml ‚»ódÿÍlA‚öL|¤³¶™kÛŒD/iÔ0ôð=ãRSy•N.ðûq¿èõî µ‡þ]gfµÚU/‹X7¸Ì¿p»õ·˜Í!(\p¬ôFL—Ì.Z"^Aª4}Ù§éÂAÄHFÄ"ʰ¢] …Ÿ÷ãàÚ–¡ä„—Àã²+:`Â6ì.EW ¯²o3.Ófóé"sZV3FžiÍ­–ÝÚ;l¦„¥&‚™'bÖämŒhô]¥„o“YÌNq±Ö¡!ÞFÀ‚ü ˆ±ÇÜm?þ¯=ÞòêbB„LƒnpH‹ DçA?IÈŒøµ›î9×Cüpv!ÅPÜ6asV¥Z½¨N0—àÓeUÖè"ôœÍ ª Fʺg=f[ãðzóX§ÜN¬3Òµ«ÎÄwSŸåÐêÏëúO-XjÒçTò€FÌÜš¬¡.uCŸÁcvW,;círÓÄ §ÄR%#wñjH¢™zŸ LuN–ÀI êj@3emQÞ„ò@_"š ÌÈW;ò¤|˜™¿£Ë‰©þÖ4Éòcðý-T€‚O`nT÷ –\÷ªcWNâÀfÔU†¹¦A– Ù•ÞÌ\…¨jSph\Qä®ìz‹õLᨔ=yùôÒ]Œ¯ð=sýu!Rî*qS°,Q9u)xÖ—h©ÀÞ½ƒÚr¡í§]±niêÒ­š˜¶ŽéDÅš1 ú[`ià6eæÑ®?t´“ ds¸'-'‹Òê}ñ¾ð@kY(ì¸õMl.éWrÞvÕÜ«jyÛS9° 'za(Ó¡g+ÎCw-±êN™âe·Î¨9G5‹t¦µÖ ­ØŸ„I†’I9ÉÂP v  ¾w… e²ÑaX‹%£ /o.›žä\¼-ŒJ¤qš˜¦„šÑkæý˜ŽI–K˜2©PÓ’{Ûo jp÷ô!8å)‘¿|B·ðkäõŒm~µÑ´¯}t®Ò©ô£` l'4Ö8ƒÇËs=ëzØà åz^’G4¯?H¬‹kQŠÊöBÔBͼ¯¤­1>Ñ¢<),Æ mTÀæÜ þ*E*M6Fób­ˆýæG Ðe¹¥ð#ý-­€ïo @(7¶­íH·ÖÆ0æTGÚW$‰w²]â0"¸ýOäü˺p4–޽àá:Ú)"´À6K‚P©ô@’ôþ,êªs±jÛ»¶Zq!ÂUëcá­£SÆ,œê¢NÔ=»v¬Ò ´c¦a Uþ¥…u#"™¿wè|:d$HKx9'¼! ÙÎZŽÄ“b—ìÓUúD:€ ²eÑÁÊg¦©ñZ.ZüP¦€(,/ʲŠ>}öˆM9VœÞG„°˜7—r•b·¤*A·ïæpÈö^öØ3ÎW@hÙ6óÉ1NV å¤@DXO\Ï¡ö‰ÙVˆ?†L2ƒ©ߢ$‰ý´V€ *ãŽC#êۇ˾}\Dh¥†Ù>“y2~™cÿ$ãkY.g›ÛÐ’5\θBà‘ëú±Ü®^^iº˜ùÄüYæˆØÃ?ïÆŒmªDkØÝw¹jŸ¢iU Œ uü i x›ŸíùB%1ƒ³¶°-ª³w?½9yý Ï"HómFö·†ÿÚÖnlWÿ¨û?¸Ç?ªÿ%C}²Íc¼Ð_ÕÝvLOE—öŃQ•÷v¬öÜozÎcõ7±°ƒ ÚQ+wùwÓ½\ñ±Ò3õ”³ŸúpŸ-²aÞ †è/'¯_iÞÖJôÀëÜ]cá—\>VÛÿ$>*ï†óËûÝ|8gçïl›2¸êXÞ;RÿÚ^âO׈¼ý‡­è›¶µ¦™Ö4sëð™ÖbÒZLZ‹Éoo1Ù¯˜Hö]óɾk/ÙoÍ%­¹ä[0—¬Qéßz½Õ¦¶ÚÔV›ÚjSo©Mm­âÑU<~ š¥M*$(ŒÖêgV'7jõ3ߊ~¦õámE_‘¢¨õám5R­FªÕHµ>¼­RªUJµJ©V)Õ*¥Z¥Tëâ׺øµš¶?†¦í›qÍ:xЪþZÕ_«úkU­ê¯Uýµª¿VõתþZÕ_«úkÃ÷¿ªðýV­ÚªU[µj«VmÕª­ZµU«¶jÕ¯^­ÚF¼¶jÕV­ÚªU[µj«VmÕª­ZµU«¶jÕV­ÚªU[µj«VmÕª­ZµU«¶jÕV­ÚªU[µêmÔª÷[µj«VmÕª­ZµU«¶jÕV­ÚªU[µj«VmÕª­Zõ«R«¶Å¦ÚbSm±©V·ßêö[Ý~«Ûouû­nÿßO·ß›j‹MµÅ¦¾&˜w´Ê@„_*æ!Pœþ(IæL-Ÿœ=[¶=¦^Ø|¹q?≔%_ÐÊ8 ÝŠÚ!°'Q:„DJ‚úÞUí,y\ÆÉzJÒPü6&ÞÓ#ßÎHÊ$L¤g@àuÐ Ò…‘x ™C傦=O W0¦ƒ “Èv?þ+‰-å·a³˜›ú9m„/p9ú·LEù‚ 2]Ñ£ÈÝdÅñ/ÓÄ9_þóîmÄ:ÖÍ$ #æÂo5–D–‘MåÓ4»3é_ÜØB¢mž½º'ê|Üdh™Ö F•%@‚¼ÉæîœOmE0¶.óiÓ„£ñM–½•Ó¥Á€ÏÈ­r™Îç¥ гrâ„3’ÜYËo›¦t[~¢!+M’ÜËDž3»6Ÿn2hÉï_ÈŽeXñ¬™køó òè¹£­Es•…™ýý˜¹žHT̺|"ø1#V Þ:[¬™J¨À•`fCB½Fðcä,&ÙL8È!/½H}á0‰}]”Ì]J}ÌÝÔwê5£«cuÿùcùðŽáðŒáðXí<ÜÊ^TÐïéØ^ÖN0ÐØ£×Á q¹½pC¹O|s)Š)×±Bû§Z¡NÏ B*´¿~üÊ'ôÿ»rÃÛyŸå´3ÂÚô†ia"ALl°,BPÃÇÏ|Ú¿fñÍQ”ý­ §ŸøÀª!Ê;?¦G¶b^eä1=« "TK26¤Ža”#WA‘YjE86˜ýX ¿B¸Á5e(o ˆGŒ8¸ ê>`ŽQ#Aꆺ¬¨?ûuî øs”úƒA˜Ï~½-gFk ä’/<½y—~Ú´†Ñ›$¾5w˜7Œ¼ÑÀ™¼ü¶‘ñ^@Ë+í?•A¼eíVHaø¡¡û‘æó˜ÂtKµ¬ÒÇcƒc‡ná5·h²Î\8>=ºˆÚ+ÇâaºàÞ]çæ žÙöŠÚ‹“)uAï…ÀGS ¢  v KÖ4{¤ò$Œ7Ïü.à' v‡ù:¡…%ê*â0áèCOÙZQÇ ´÷ @£4r¢v´~l·Ãx6…aAp<Û£?¡wU¤Äß0Ò•ÍÓEȺRa÷h×O“œ /-"VscœîÇgkššqµé÷Xp’Á¾-™ÖDó­ú¼WÌÁ2‚cYõ+”‡ë:ƒ;IèÆxyo’\³³<¦;‡O&^«4£ß>ÄÉUÌŠbMǨ f žƒU‘tî%gÍ-ÝædÂÎbY2ΡVéÇÅœ0úh‰ É ±äÊèí¶@SµöáÕÚ%È SzZ×»òèù.¡_jŠÜúõ_Ôe˜æÐ“³Qëö³—xÅ ë›'/Wä§ÊˆòémÌÖS:=D‚ªgÒ:Vÿ±Uw\î^Û  } cÑxû:LR¢]óDeð#GºÏ3=%jN_† ,AT”:™Eº,u%ŸºÄ¡Ð;òî­âQè‡  «t$QACYåì…Gb¡„]lDtY‰¦o;³~/ðÞY¬}ÂüììSÃqÑ3vübá,´HÄûµb‚?š&CE ˜.pIJ÷c+ôÐð¥lÀ²” A˜UØR=¹Ü(˜.»%Y’”f,ỏé,7åãÖµŒ5‚ K·—8ýUÆ \?浕RÚbÎHñqýåUé8i gôCÊ/Zî!<ã‘û³0 ýTLkhJk“삺ê$/M½ÐEw´tjÁkÕÖ^b)%|2¶?ߣ£)†Vè›aVqÑrÝRTL4ÛˆÂ]çìõűB:Ž´‡i–Ìi L5÷Žd›3"gàçaãµ´DÛêb<š¯£ŒÅ„göÔL½¤ ¬«×é`å±}º¶Â6eýnËm~EoƒYg„—øI¹úzûm£Òõæ“ýýuËŸÈJKãoRõ¹B‡vwººîFªàǻלÜzÔÆŠ“[üèMnýŽÕ&`Jî·LIË”´LIË”´LIË”´LIË”üÎLÉû þ@7ê­ÊÀª«°&’oŠ^fA|›œ¤Ž7gºL¦sÒuuOÇ\ÞÇQpÍnCÛ#tzpÈ@0’ÅLLçù4%p•ů WæZÌ«( ;nÄ,L;³[ÿrº0âÊÑ‘$¸ÆÇ™VÒé3,¿m"Læ&Ük|‡¾ÂØôÎgk¸Ùuù5ê±É[ê cóæA1'£Y(~Ê6ÂEy6€KP³âý)еL»kÀ(‰·s%-€9 Ž-0 ‡ bÕÂ&Ñàg#T‹%þYà ð%£‹nⱫG|_.‰1‹3 2ˆCÇ®hsW1’o±²EƇÝG¢ otšU÷‚.ÅYˆæ sÃpË ªL¨·ªºÓy¨­²˜¿¦ÙÇ Ž÷ŒÁ¤kqv“t²'w¾¥ +²¾ÀNé|¯8ýØSb¾8&̱eŒ1ÐB ø·0;0›§¹|eÌáZóÎ({#íFû½z»˜'ʳc;þ,a·ò!P¤Áv¯ÎÏ 5áôÔâ7w)›¯³È°»¸WÄìú?òšÃ«öbO_a1´ë9âc Ô‚ÖÉÈx'ú+‘À•%0H3e¨/2æ² Ú ¼†šió– Çs©$ˆN'Ã!½óDÃ?¦Ú°Ò0rËÁ¦Ó]âŽ-㯧úî†êŸm¦ú±bºÆ’'ÌZÆr8´#JO`âI\CCÃ"×¾ÚˆP˜‹ FÚÏjœ %.Ê™àÑÁ">ìµÁÑz𔫉e˜\W™_™H¸há!–Ñ_CD†yÔˆÙ”œ±¡šŠa+kCÆD³ûY;ÑÏýº£V¡¤zHGƒ€êåÊ_8°m¦¡u "êÇ&ç&ʆ>î«(ñž5Ñ[~èà‡þ¶å:;m±ç…¼A´‘;KË)µœRË)µœÒ¿5§Ä#¯t)©cì\ÓKu¦üÕ"`UŠÈ/ù=k!ßWè~3Í<ïÕQÌë½7#÷Ë„ïv¤Wˆß—Ô Dþ hò>ÌíJ{ÆöY– 91sm•³Uq-œ¦fŸü‘žåÆ V<È%èGî„I®{ó“ ñð´s2_€þDíàSyDɹ»´öŸƒzþãKÛ¼Œ}Ý "« ç—f6?âk¼_#nUÍaì•ö·féÛ*ö?#cH*À-g[5i€¤$îÇà(Æp¿OR Ïp¼jP“Ã@lŠÌèŒÏ~‘sÂÒ¿5 &þ°lª@ÉQom˜,7±1yú„Q#~ Ë]uFÞÆX«,/‘ï‘×@ÂíØIIÁ‰ÅðJÅP ÜËŽžâpJ€T£Ë`ýé>ªGä8“j½ù†€ß˜ÉØ€ sx“]uj’ 8?8^‡ÅØb&<¸– M“TdmãÖÉY}#ef8ö.ýt~Úƒñ‚Žjp„ömïÍsçŒp³“XløcUÝŸ_c{ŽËÙFÆÌ­pöO¤‡Š+i ×ãlØ#É%-'Ç#h‘ñ×QDT§¢‡èyž$äfzú†ÎCH¥db\¸?sgurú²«ø¨¨9¶36V┘uDæV‚a˜‹™6=×Ã`žËrÔΫ¿>Ù­$¥Ñ‚y¢óÓ9<ïCÉ‘ã }5@p»œ¡Iqc¶Ëð6Œàçlà;Úü%['#‘ì7pìu`‡%†YAp®7"HÝ©ۢŚ02{š*yEÚ +íÇw¯Œ›¾¼ÂS>€ŽuvúΠý­]×韠cÕíÅE§6¥þÕßkDa#» mÐH ã0—Oq:@è”É#j2‚°uƒ¨aù$O¸X72Ó‰â)B6 B.ssª•;Éœe#OX O‹Ña §‰ÎiÄÉ v‘ÁR;8û³fJ:îu•‹ÓZ<¼%í¦òŠ[çþHÎmúvÅ¡tÕ¶!u$üM‹Ó·*½ phmUcIâQƒJ #. × kÉB8æŒR_öPGÅ–Š ½qu´Wò ”{íêB}CVŽJVTßlN2£¦.P€†ÒÊÎ^ð"$!9¿û42EU¨rdgFⵑ0Ê2‡b]^êñ:ê<ùöø‰Ö¥sXæi…ÃEɽ-à©[zÌ1) 3v¶œÁ‹£Ô$òVæì² þ÷ÿúËE°!¥‘ëÁ'e¯Zc•m½j³à´Zzs£ûßÔׇo˜uáy>Aâ«Jx¢ý´i^nØlί)O×?·Ìq•+(p}Ws·_ÉJç: ³a3›:ûy·õ£4íÁï—˜ï;Z3±ðßÑïï8϶‹ŽÜDð„ý&‘ä@zM›«ªœ‚‡d6ÆèMdyôh­wâNqïðÞá;Cš41¦ÉhkB˜+ö€æÁ›ÈH„YW¶v".^f昭éÔܲJnä㇇,²ñ—¹ÎÜF†•!¢5p'å¸öŸî Y’+SÜÆ/ýäè¤D×Êç"¶ÍU>o°n*6md(mÉ<¤jš_&ÇËl€ilÿÑE/7‹ÝçdijøúöYñì +«çT®¡ÖfÁ3Ö„A®â©áŽÊ¨¦,‰èÍ?„Ç=ýóK {Y8aùoçy~ü‰.x,’x´»zº©ïᡆ‚ ›B•vp|»`$aàʼnɒ¯_£„5;$ö±Ý*þ–©Hè¼úøHƒ<‘Aê $‰`=½îÉùmê\æÉ”(¬(Pÿh¹X«aìÇå¢Ä¥´¶´Ãû=TlbñÌpÉ@l$hÀÓ¾ŸÕ%fº†B”²Š$~07j‰Ùmê;F¥¬Ëe«à ëüŸ©RIÃEoì/»«‘ª]†-IôYË‘bFp±ÑÇ ÈÊÚð´ºà] |4ع© .Ó~Pjçç“wo^¾yÞQ/ßü𶣞>{ü#ýñìÝ»·ïª;Ö Û,•m–ÊMY*?1eãN¶«<˜RZRtaáx´2˜KVæ¯äx4ñÓ¹u,Ü¡TÉ3ƒ#°/n`ùzcêWrûªG´y殹­²-[RLC¿ç#áÖ''ÆœcI«ÖÁ„Áæjö¾a¸_ØCÑØ¸¤1#(£†€B•ˆqÀ „ Ì`´ 4üZÕ ·l¶wÕŽG f1Æ*\+÷ÅM«|ì^"œzEºlV¬=¸„b&†b?‚f7åÇ/xí¨qàœ8ôÚY!’ˆn‡Y”÷†¡ú¬`vM»\’; §Â åƒ7Z.°Ê£%NÄ%ü†©4g¿¤ įҀʮþÓØq‚ ¶ìÛNíâ¶1fS9_bž—ݬ[#…ãíƒ=ì6>]©xcí­5Ú¶ ò‘²- |âß>UÝöŃKäp.šzWÂÉfå÷¯½8 ÌíQ/;z›.Ýʹp<«ôªþÇ»Çs?íN>²°ã.TW–/f›WÌA££Cjkxûê–ݵƒñÑhø°w´?|ø°×†‡ÁààÁÁáàÑÁAoØ[^GÃàšlš­Ç­*^Z{ØËš÷+øCÆ®ñ[îñ¬{ËÞAûšÿˆ¯y 4¬Š¡m¡áß¾rÜ>ç >ìþoŒåïÁáýý£áCÿhì?:øçßÛ÷Ž=z|mX¾<¨µø~UŽãö…·/¼åÞ¾Eîí°}Ííkn_ó·þš#6_)‰á‡ÊKfGÛ9¼Ä|©½j+äS5ŸÎE¯m¼È}ÇÉŽöuºóšm¡Ì¦}t{pó—xô®ˆ‘ßþ˜þPž:}qНgyuÌ_8¸™ºˆ~lgQÚýI" ¹98£(m¬ýê¯UCý3œÿ«:Òˆ(ŒjÉòÍ6­_ä`Õ`0Ž]œÛ—ŽÇéÊëCØÐØD#é b®›uT²µÜqaYæ¶Pèm >¦&N5Å”¿W^¼ßßgì‹ä¢+ðĨ¹ùø¯A`“SêÓbËûDuiˆóõ&*Oþ|]ŒqˆЙh±2†GñMTêdô2§¢‡|Aw¹Õ¥}™‰šûרqë‘ÇßQb\B,»¬×HøæõÈÔÊRf »€85eCxoáí¯J£^oUA¶Xó+Ó@¬A¡-X«ëuÛn5ßÛDè¼<‰ùçð]vù®Õ©ódx0 -Þõ~6þ½)ïF61Iä$¾a“NÖ»0þ¿yûã;šøÏÏ~éoõãÔ5+ݙÙƒ(Ö˜‹êGóãÿ§Ä/õsÙãt½+o 9£îÏ`¤5È…'iœïŒÓXi¬¥µ)s"ËDMi¯“)͆ÿµÀ!DJ„ÓÕäÓ.œ!‰ò"öJçåÿ:Š÷æ’IhÅò\¾ yr0C¯àÛU©ñ.ãLäÖœ‹ éI„Æg`#©¤gÓhK' +/b ‹á_ú±óÓÀæRƒc†èHäú±:yõjõz%á¢K©øBi"}£nçõÀxóÛxrR÷ ¤ñ‘‹&/7n†áÑ%¡µ&ÞS(yG0d‰£ût4ÃdÄÁá¹IUu §²œiƒqxmrú[2ÀqËÌÇÜq¬Ôqš2žÉÊ" îÇI5ÃY‘$é¦ö{e ³úsÒžžšV¤}§Þ,>(Étºò$WÏZ¤È~w˽Ƞ ™˜Æ(¦zÎË”š-$Ò &g$¢;z"A6‰eÂqú o•D,¿UáWòMÁ ÙàBêÖ é¨?ù—¾–oIÒuÓ…ÁÓ,š\Ú².î–©¥Áá:•z$N‹97îÚÎNEÐyrE˜¹ˆR?y}Öᤀ«©)fÊ TìPn~î1B¡¡m•ൠ‚'INÝ´|l³}»;FL}Ä>¾ÁõúL/åE‰ÙýºÕ¿A1ÍßÐd"ÂÄfOûó'§Zz€Ë½¤2€¦bëýaï!œîßr‰ k’œšªŸ^Û € =I=hÁ{ ÔßelOC}ÿãù•÷4I']±tÙÓ›€¨Ë)çÒ.>ГÈϲWòåÙ›“ǯž]˜ÐþzüÚ¿> ÒÙYø1øþpÿ`¶BÛ¦­‚ž?Ѫ:Z©6¨`cQô´º_èì_Nzl Xœ~oi+l‹Xßk&ôy^äÆ'YV£Õ¹`Ò€¸€OÒk…1 ílWþÞôbx¾Û½˜Ž£„:®&|׊©/¥Âúä·Ú<Öú <?4 vœ`WÈ£V7 îÜGÌD·Û²NACÿ ý”“÷BŸ 3Ë*0¨¥ôô: yŸcâ¹Ð§$! øDöXóô·RÈå$/øŸïËw»£¶÷èAËgÙž|ÿ;Ħ%-ºÝ®lsóv5H£™ ¢Þ0ÂøÒ /†çô&ž5ΫCré§–ñÆt‚BþSC¿!~–˜õôcwÎuÌnG¹_Om¾1÷럃>f«£Vµl`PÑ[ø`Çn~íºOíæñi÷Œ¬¹ØU›m¾X›kà¦al\œÅ »2~z‹i>¨Ææ´h+!|:á!’„±KA¿K¶K R~}„$9©œ˜LØå€Ãü84„æ”ä.Ä8A8’´õ±AÕ„c†Ó:E\eC¯,Àë}Û4h•±¸¥BŸN…š4Z ÔR –µè³(Fè7Q Ód% 2áVTÈЮóå3tÍ:#'§Öñ󥤉¤f Ä L•N ‹"|NÏÄñïŒÿPwõÏìʦãˆÙ*Ë‹ñ¸aÝÑ $j.c‰Z¡ÌèÜìþ¹f¢¦îŸ›µ'þì×¹3;ÿ¹‘ûƒA˜Ï~½-1nhë[vwÝôÐVë1Û§Ö>µö©ÝõS[é¥Ü>µö©µOí®ŸÚª`Üö©µO­}jŸûÔ¸DÈUøaUíÜò7÷iý¬}ãƒxkÀµcU¾ 8rf顽ÆX?ÓXÚ ÿÒ#ìpMçIê‹4ÊÞí’³ª2.Œý4H.–u,U¨r)RªsZÑã#¹x¡¹0 38Ã:‘( :«ÇÏu” ¶Uëry8¦N5tHŠyÏ!Y#$2A£Å^†ŠCÔ˜óžAƒä1:wG¿}ÂÎ4:‡c‹—gßp‚ø¬C{ɇ͞ùïh÷æJ›îSà¿7Z¿ƒ™ix«§÷XÙ;›Ê~“}ŽÇhâ¢f“îfeÖÝ8†¢kIÕ´˜a^¤1¸=‡rŒièñðÆuðÈ’.µ6¯\»”/Fö9]Zosç{Q„º9óÈ:£þ\[äÖ©$šD¬¡ ÙáXo» ]ä¢Ù¬Ú¡I×vèý­ÿ¸šœ‰?ÙVÕ5á¯#nŠe²;Ü}¯Ù‡&.¼he“i-mï’}™uú´‹ŒX°Iɘ—cTùÔµvˆçPÛ%ÙV2Œ©y§/gJ®p²–yû«ŒU-jéPK‡þ tÈ„};”ÈÖ[ü iÑÒ‚ó*år‹œ[äÜ"ç?(rn…„ß1Ë ­ÄÊæ7+Ÿ¨i8™z:\‹ËéêjÉ€«k 9ˆ~¢–ÖiGeWíYø‘®Î–Ÿæ¡ÙýÀ”¯wa´,Hêâá܇ßÇèÒ'Ä;რ—Bî¡^*ö¡B*çaÑ Õét€‡z"ñ[<°金dˆ.+£Úª¼†|Ì8‰³D€n 9‰GœfRÍ9 ö_>ÇÈÏNºÁ¿³‘‹{‹Ë4³ê£Û$›w›öøUÓlèZÌ9ÆÇ¶‡çQ¶ Š9EX«ø¼Lܳ…Š‚ÐK<³rè@¸¨z å_¹Þ´àß$:ÉÖ·œ•%çmÏ(ñ6,˜–/ tåÁhˉ\;[¾#¨YÐ[ìî>Èüo LjJ”„æ%Zòz$Z*7g–ÉÒˆ<ªh3’/HzãÔ(¥Eè$ÈÜ¡ä *cð AÕ(Z! ¬{éhÒÐðG)žpQ=IÈ–Ä$¢)§±=—ÌRªmH"7,daP¼öPs&žØTL€.µ×ÝVƒâh>Ñn|âÃ5Äæ` aÔµA>¡©d¸“¥Å纰|›¬xÄ£º‰‰Aò’tdüדhT&Æ<ýBŠá1™ÎwPg´¤¼ù”Ó ´s[nD¼ ˆäÅá¥v‚ˆÐ(‹_£HHœhGDãmxd¹U\g™ @瘄#aùÁhžÝÑÛzvÒ­ðZ/Àv'À¦ñ,õf°¢Ú‘ÑóæËÕå¢@I ÑœZ—˜úà°k”9æÙ—b@D¡±•°î¹|»6HËIaêžN³¼ åKsT‰Aj$:V[¼Hq^l«ÄÒm¤µiÖ†{,h’,¿bm÷mÑB~Ï¥Ì ‹b |—5+ÞEÄý0èW·„‰Èåíç|uM 1êècŒùòCZl7¸Èø¨ÙÌ‹üÞ©.œÌžØéÜô†òÛµ w¹/-÷æ<¶Ðm‘$§Šy^–üÉÑ‹S”QHÓg­8†£À¤[åÚÄ©Ço×ṕ¸ð¦Ó]´øýÈ©ê¯9±p?¦,½ ÐF¼dãÉ8ßÂ=ôó’ÛKºµ¿ÛtrZD`*=I`†tb´b'Éó&6­p°Ï?ûBÔü>sá6Ôêµ)d9z©“Í0›B÷L­¸8 ²C’ ˆBíÞÎ=þ´ÀjÊ•5–Ð4i“q¹PŠÙÖ ¢eÿVÚÄÅY0v¶ÐÚ~Ç$ì„Ï7k.÷RGš[%Þt“¤Iç–¿Üßåÿ@µÅ?¿ãœ¸IÕo¡e²ûÁÉs®*Ú"Ãc”ÎBj£Ã‚ |ïz(Ÿ3’\… 9P’%aÍ ‰›a-œô!|Ý@ÒH CìÒrá~`<ôm£€r\3™÷ÒYë6œ³bÐ:šc «„Ü!°c4cV•…÷ö÷¸›1M«"ÑÔK™eAßW ¯ërÞ§ÝÛKS[ [MíÍaÃÔšºxSébwµ5"ÜkFzƒ„7Hxƒ„7Hxƒ„7Hxƒ„7Hxƒ„7Hxƒ„7Hxƒ„7Hxƒ„7Hüš‰Ï3{Ä!©ñ‘T&n œ›¦CyHgÀ¥¬Ç–˜ÓíÀ´®ƒWôÁ?‰ƒA~ÄÓ8$C9¢åû@BU†Aìï’˜2™P׆÷Ä’ÜúÛ$&ÂŒ‹1$ î±gÞT`Ê}d;f¼Š©r#‚‡ó‹­N¡¿w݃<Ïf˜Z¥Š…”UB"fßtÇœ›ú>‰',^ ïä‘ÊËÉTå)\'[,%Ûåй_œœ•Ú„©’UF 5;,ž&9®¯·y[‘·y[‘·y[‘·y[‘·y[‘·y[‘·y[‘·ýÅmEY:Ê$²¬n'Ò_*V"©ùªuÂF„ª¿È(D+Âyš]|ÿÊþ¾4trø\¢ë¨NµÀHN›Fáõ<.{{)´,>E#—ªw¸£œn¯&ÉRÉ;tL/õHK*”©F Ôòèp3¬*‘†ÿ»¤)éΫ¥»l ñD~aÃ$;.Œ½†2ó'¹a^ÜOĈƒ&¾A/ÎÃdb,Qì …âT ÂI}•YÁ†¹J´#¦–8e¤Õ[‘“„é]«(R©}”ßÖj‚ê³uæž÷cíXb7¢®ë¨»Z¨ÛUéPƒ?hÏ(K׺ի¹ÆÀcéÁ»ÌryÈäkãK×öÝÌ*­‡ƒëÑR—üp]·ë¶õÁ°ÖÿK­ò% IpÝp3ˆ•OË5WÍ9¬¾ÿ¸¡´¶PY rˈÚy?)Ú³Ökú>—Eêg™PYÕÜøúnªQc¸É÷7ÍÌ7`SoJÕ/’U4a¶4I/oBÐÿ TmSˆý`†åT´1Ö`q°u!ôðªÒ¢^©–2…XÓ—`V!!(„&l/‰CTmfÄX¥hQ)Ѻ*øRwˆ•  Ø‚˜ÌýFCÔ/lgteîÓA‹F*†6ù 2!‡n+!Gªì¨š™ÝôÕ'q1#uoF›~uCDvìqjù”ž›8žuƒïè¿sV,H¯¹!L«F4»o¥¹Š4_ön‹‹®?R?² Â4ˆ¼BL™$÷êíŒé´ì,¸¸ö޾ºa{ͯâÖÉèõ ‰Þ»ó¦CP=vY‰…_%-Œ§ÇgfR-Bï½I2¤Ò¥¬A·:MÚx­kMWõJÓ3 ûíës}Á;^X¶ÑJ!@ÍÙäT6Š:NIm¥§ia ëvJº4ºI6z·áÁ@rAuˆÈàŽa®S6Z*",«…ìu¦0 sÈ!m…NÊMËþç»{_¢³Ù¢õac?f’ “æÇi`Ë>{ãzYH:ÞyMÚ_R4Ã÷8í6¾¶éb/FXÝ5¦"´ÙÌLMäDé<ÁÚýTÒ©ˆ»÷õ`ÿëóNðäë>‘öNðù×?þ&@ô´Š¢£vî+ÝÞÖæî#|emÛ[RÚÔ)F¹­Å'µvó^‰Ä.…c¹]åx5ºè^JwÔ,ñilmñWÃ(¦÷ß}ŒñÛhP]ŠÄÎ$æ.T4;”‰g’‚¢}6Ó ÒCs¸M¢åWÔ¹lè‘ ²q?‰ë¾=ÕØ;®åïîîºfѲùh‡ëlÝï¾~q±óOû¾Û?gTël´*Åú]|²]qjgÌ.š0K›2ªñ¥ë0$(DªòÊqZ˜R‘ðje‹"‡ 8SD9@&Ôä^ƒypvv°¹¢0(~ñx´áóü"Ó:ÔzM¤jü–_ †ê+ºBïKlW×:i5Û§„•þgž¨/}ÍöÈtÐbŸ¾Íîh!Áñ”ÿ™ôÄ\˜›r²@#<òØ®j¸uu(ÓèmÍh¸b 'jEÃÜÅE(eg¸ú ÚÓ³ÝÝ“jGò?­E©J)u¦ÜöÙEcãëä£ ä“¯®H¶‡ÏÙ­c_oµ€{N^ˆwNÌ;Ö3×Û°òto£2K|ËzÕ4o*¢»b@g8æuf•lõaI[¡l|‹-úK%w·WŠÝH»¤6’hRÜ áÁžŠì&n‘Óæõ·A`nTn{t_¬¡¾·ˆdnmé{ç†é/ ­•dæ:ë¨ÞO@rGP“öÙ÷”‘R”g•ªã«èá!Ii†ÄÓ;eb%aŠM‚Pši°m>¦fif³š|ÔØÞŒÄøB[5WýAEªÀ|­¯6¶I1Í.êk=\!³$Ûj­CŸ5uÔ=BŠ+ØÎ"ñ®e­yP‹¦¤~=jP…TZ§ÏPÖî ½×Õ÷ú!Ê›—ï±É²yáóvüBü)A…u±äLÆÙºß•˜¹ê5ÔQ7Yÿ'.ÕæÀ¤ðÝðõG›ltÆnåd³'‰ ¤ãÆôQ°µ(“Ý*Lgw8O€¡Ûnšt6;žÄÓ³Æ$Åo#ÑP½ß®Ýy$ÃÕ‡L~Ñj‰¥t\¹°sÇìœÍ4±Ѫ¾ZPý.0®þ¤¦ZÁ’ðó"{jKM¾ðê˜Æi£úTډ֯Ʃ¦@¡Óï±Ú•sŸÞãI¥á«¶œ]ú'eí[™Ú·W˜Õ`I“šÅc¡ò¸ú(¤\;û¸ûƒLjezùeßlÊÏb¥%‚KÍÚ¯3ôÎiáXŒ^?Wcç¬ Ü5›vªˆžÓÜsvï‡Ál}5½/âhºCÿ”gìš0>°p6Ïf-ÄN2ˆIèÌcyI“é}W…y@“Õ˜D럤âìDíc'T™µ[P) ÿ'%ª%\€ŽÚ¼äû:àTõ+¢ ý,[îäÍÒ`À¥F›öm6YLãÈOH»¿ma©:Œ±bÀ;¥#[©ŠƒñèÏá¥e£•³Wâ–”^sœ“È…üpÒ±&Q‰ü§ñ°·!ߦÁ"ååÔIÊIé-À4šÑ‹ÄÚ˜ÐZAcAÂV}9IL¸Ý¹þo?üå?åÄ3R’ɵ~•ü¼íRÜ-†zÎ×*[N]Îç*žbæÍnw:3׌J)g÷¿û盾¹zsz|yuzprôL²< Õ[´ŠµµàÏûÄSî¶é/ÿ.¿uåÂN#\á5ƒÉ6Ý,\¤bís)¹ymƒNøôC¢>õè„wB'x¨€‡ x¨€‡ x¨€‡ x¨€‡ x¨€‡ ü¡¡fwûûù‡½ƒØ;ˆ½ƒØ;ˆ½ƒØ;ˆ½ƒøOä Œ‡8ø£¸ˆ?oçŸß‰é}“ ßäÈåÈ™ZÀêâáïïu Ö»ƒGø¿ð~Gïwô~Gïwô~Gïwô~Gïwô~Gïwô~Gïwô~Gïwô~Gïwô~Gïwô~Gïwô~Gïwô~Gïwô~Gïwô~Gïw|?¿ãþ^K¿£áC[,VÓàïÀ„ Ë&nþóz }bfï‚ô.Hï‚ô.Hï‚ô.Hï‚ô.Hï‚ô.Hï‚ô.È? òWÌ4ìS {—®wéz—®wéz—®wéz—®wéz—®wéz—®wéþu²×~`nÛ8RïÏõþ\ïÏõþ\ïÏõþ\ïÏõþ\ïÏõþ\ïÏõþ\ïÏõþ\ïÏ}G®wç>$cøÊ±Þ›ë½¹Þ›ë½¹Þ›ë½¹noî­r¬÷åz_®÷åz_îGP‰ô>ÿyÒäÄåçîýÅ÷¯ÄçzMd·ƒûÞ:kh:˜R/IHÇnÞÑc˜uèŒâËÕîÜÔp/¡¯ËO·.èÛܬCõu¾ŠÒÑ)Š­b·Â,›-&ѼÌfÌ"Y/èÛÆör—Í'¤s_²B ¡‘݆Äçdø`3$þ äî<ë“@ÂÌw5ޤº7œÇ­ÜŠÃ¾ëÝë¯÷ëé^=ΑØùVºÒG­:$Ý>{ÈŸÕØ+DýI¨J ëF¬ý°nK¯·Ýõ‚v.žW3}™/ÚM=#Ù9ºAï± Ñ°­'ÿ¸i;}Qt÷[þ^ÛÞ Ól?¶¿t>#J«,(\ZñÆŸ×fÅGÝ~ø…ãaX¹iå³v‡@ÞÜ¢Ô¼éb­.v®siã¥Pô«únô¦] O¬ëj &‹5+¼·j…á»tW—ÿ^ xüõZãÂ73pVÙÎé×=ƒ—èãð×÷“”T€ðZ›_ë’dLŽQˆß©Ÿ´#NñÄ ’КšT†ß Äø3¾)(ç믙KWõš“ã²[EX³L<¤Ïòeuz¨Ðд`‘Q ‚d+¶ì/fÊj+ŸGe“ŽHkj-ï3/$× $ú4xù\¡G$±ìNÿûòùvM0ƒy™/W–ݳ S¯™3ˆa2€f!K\Úqª Á*L£– ¢”IAŒd4„WÚ+õEá8óþÄ•lÒ ^_“亘C¯C:¡¢õ@&5°d/Ð+£ðwÃÊMwƒÓ¬¸}9ˆË.z©é#Gp6ÐúË^ÐÉŒI$B³E9W>5XÑ1 <øúóØŠ&íÆhŒÁ’E›Â-È–ð"VIô‚QF6g,Ú=MN^ÑËZß«}…BÑ᨟¶\*ÓÅ`,6©a&~¶(-Œ«£R#‘®:<¥½Pï;ÕilÎenÞ¶¤šñi7H ݬˆ'÷]¶öM³¹=™ƒ²n8»9¬•ÔVa‡‹¨,æs¢û´\ô1z&¶Ü¿ê/2¼Â¶]ñ "eå&¾¿@——'Ã^3Åb´æVÑàI ÏÁúôN…1ö9fw KÃ!·²?R"Ø@^Ÿn²•äðèäàôp3ØßýD ?BaFÉmÌÚ‘ÛFC:8y[=HÅ8Øün³³yBÿÿr³Ã-_²ÝejM"„»I&ÙÎ4E;q1èÒ1¢Ûƣ͹]Ð-4÷IG€,X^iƒ<)hZBµžœÀ\‘²±z¤XõØ¢k¤Ts[í´<…*åûb÷нkânY ãÕ*àT1Ïd-1<â6›u·½ <,%6µ®7Cd V51§ànœÐmøöÀ\1=åI:[ׂŸPÀ^Êë‰=@–ضrcæ£F^H#u]7.Æ»:î)Ѿâ–NÖôð Û:­'¥Î«ªƒ…ƒ²¸#²ÕKËA^ó:µºW Ý“õëø]|¢§dʃQÇz4+pÑØ›+€ dÙò Æ`5cÜK ‘‹0{¤²EÁN ZÊWÑ"ŒgÑ>Œéöä8gg›õÁ§‹É„•˜·¡)Z³°Æ—ë|QÃÿr× Â=ÚÕˆaôB!@Æ6s¬î,™Ùn…첊FI–«Qt¾~Iß`ëÔªØ$ÓÓbZ%|c>¥%8¤µê) úâ&GM¸àiìãTÓ :a !öR¸Wy¸Dˆ2õèWæMþúÎ Ù4/“ä&þŠ­`ÁO‹Ÿ! Ùlp wÙ¢GLeîñE ‘IÎðldÉm2ÏRØ×…±Ë£mÀð‘i/e²ÈÛ"h©ÄÓ׉sÆùõúðا°‚ª·p¤‚»•'€h‘+Áä±qÍð‰×LfÚpr|qpÒ ^Ô8¼sƒh‘;\[EâKÁ†3Ü’Y‰{Ÿr鱞;&6xø|ƒ½UN³æF¼vx.óT‰²Ùëé t\ Œ´”¥¬&æØÛxýÍ7½ eqžrèpÑ e•îð•@| /Öœ´x—Vf¸[›ZÁSZ·ß—< [ä"hEÁ(Ëè2ã¨ô"œD·èòŠ€1 yå6Ó;lJ'ÖÁ?Nñ¿t,zÂûayUqÓ.Ë’]ZPT Û¡ƒHRÞγîÞNœîÇì=Ýòih×Ýq1ü=¥'W•¦O¥éSiz“©7™z“©7™z“©7™z“©7™z“©Ïâ'ÓgÞ`ê ¦Þ`ê ¦Þ`ê ¦Þ`ê ¦Þ`ê ¦Þ`ê ¦Þ`ê ¦Þ`ê ¦Þ`ê ¦Þ`ê ¦Þ`*ÓϼÁÔL½ÁÔL½ÁÔL½ÁÔL½ÁÔL½ÁÔL½ÁÔL½ÁÔL½ÁÔL½ÁÔLÿòÓ4%Y6!Lå§ŠÅô”j®)Ã%O*ÝT†*¯²Ôy”Nݯ)‰S^¥1:5Ú`ÞS£Ê y;;ãô¾É uÿ«Û¡îâ~ž®Ý´À}êYl8œ÷÷Óâ§E˜¤×Ù»Z‚šãægU[Ùgâ·Å<Ø_¾ýУá0‘i²U&ê)[3&É é÷áѤÁ ˆ~e"Ý‘^„KH|¢$ZLF pöRhIJžì —îp‡ÇÛ\zuZ¹DX®óðiU¹®‘”o.Ø ´˜DLÄ<*üBÅÏe3UšÄ*Øa瞘ÔüSú˜ò”™z/e· 7£9ñꤿ(½wh‘¨ ÓhFô-&Cü›(N/½TÎAËÕ Äzj?cL2É2öÒq–‘¸ˆIØ¡Cù”aÖˆLOOHrÔ£Í)lE›¥¦:A ‡x@Ë2ÍYÒ²/å;üY7ÀHÞb´r¼€9k®jƒ-'ïìÀi–Ä3~ÊHÀvþš8©;w–ü{N ±%‰†YHƒ\ÔK¹›ò·ØtM ‹Põv$ç¡~:ÖéŃ6ìu4S®ô»ÝoÌ.Á¶¥Væ*›ÕÅdXÕõtQó­{áæwü>tÒì ^««üç Vø“.†þ…1%´I4¹’+qÕÎ&éÚiGô>±ê6_¤ãèB‘1s‡ôtæ{9ö<¦vsÅ›݈±Š2ö i·¯¼¥Nz ++±“Ïow@.h)ða4›…M‰¿ÍjtÏ&4'üÚý)Ø8:(…êÉA_†#7¤MÃ…Zñåºkð;Jк³ŠK˘®ŸüyyØ|³E¹Óÿe¾Óív·+3¡ÌóšüŠÓöæüU¥YåÿÔúxÑgÞ?Nw° LdÜ^ó¸…™v-ŒØÎ8¯ô³èWX»`ɪ%ýV9li#H³6Ç–‰!Û_Iô1ý¾J¿‰½¡ëY6™P'´5ãx¸˜´8ðbÛ ~–¤ßò¶ÞÅÞÆ§;Ï‚Oåÿ袗 ŽãàY0MÒ-æ6_Ríž·C,¡³!»ã  €%r”P¶tÐ)g÷šø*ÉR:¢x:+îÙ1ä üÑÆ»ëdæì6 “l1 ÕŒ2‘m]b]Mï¹|ìõ,ÆÞ n¼”o {ZZÊh¬þ/Cµ%_ˆ»>ç¿C³êRÕñw4K:ò‹ñ`ýøx*Å•ØJ4lZ ëŒ_šüÃ\#x/.Mž9à?×ñÇyÔï'ÅôçÇj»ƒ—&¼Í&‹©ˆ›ÉÀ š_‹á¯=]i¶™7ì€<_×·œXûòG ?¡““Iqß°èK?­ÇMLjP–>z¾l}n@åùºÞGØimgÁ>ÝNïH·«Ó}Új§í»]ùzÕ¹oúµÝxø›Ç.Æl4O/ˆÈ¤UÐ×ÒOë†Á¼ã0ƒ¦Þõù¯Ò5)$L oØŒUo¬Ž~÷ص„Þ=[ ½ûU@a¼O¿&&,3,º%&lž ‰$å× ZÏù P°îc-oU…¼?ßÛeï½é¨…³Fœ4Чò<©Kª_ë¶ÒËÞ^w—þoÏé(/L+7Џk×åþ³g]óÿ·¹Ïê€"¦Ÿé°Äï5e'4|“{‹Ó[ ”‚a•ʰG´ÌwÑ}ÀVuöžŠÖ¼z̳Ea<•˜!)ub6¯ŒÚ°øb‚w»¼fb.¬z‘ä…¦þÞåΚþjw÷QýÇo ´81Ûâ ¢{½{æk{É1¬ëIñ>kˆ†CÒÂrRÆuŸ†À€êÀ«‘Úº¢fxp·³P CKüˆ;¨ÓÎ,Y©ºØJ(ÉÖhžüò 4i·kãˆà²ã°šj Fq1}ÆHpߨéÓØh\;§ÿ Ü޵”úœ< ‹dØÆÈ'ÈK¼m¦o\¦;º/õÒè}¨]§ãqTÚÇ=Ÿé›B‡6Ì‘]Q•–Àí²øåÎõr¥9Sì_²´ LDç³—ò`j´ 3JsvCÊ,x!€‹5ã¡aëêÈè¥ÄÛqÒúñ5ƒ|áVA›¦Ë´h’Ál²%u4‘³î ½Ö ï³îVì IINkƒ*cî„SÇjv¤u:¡Ö€«»c9ÐW½T­%wé³YôUžMãÝÔlºCv‚ˆv´,æ“À i>÷^*®Yáo´Obõ:˜ÆÉ­à„AÿfÙà&.Øý{±âUÓn’‰Æ_ ë4v¼†Lc¢þšï¨a±ÓøÑ-Ðò«>8¬~·5gO¹@ÀŽñ8£ùˆ(# ü Sáà ¶*¿-$ˆ…©ÀjRž¡ÿ5\°¶´ î&ätAæ0^H¼™µàAo8nTv„aÚ@À 8[Ñ彯ðË‹…/z‹ÝÝ'ñ×òÈúS>$b!}¬Øb„f•TŒ™0ÝE³R£ýÜdÖ‚N™åýʺZ]­Æ#þ`]{åu:—/ÜåÑkQäÔˆŽQ…ö³ŒˆÓ² ú(ÍIÿÔê1´pwNÙÇÈ#¬Z0Ÿ‰ŒP_BBTÝ_¢íz~o8cÇÁ¼}¦#€v±3&5gÇÆç&´]I:ÑÜ”îÏ0—srÒÒ)(ºD›sï0²ýØ¡rPÁ"-’‰ t‰1PusÍKM¾)0V@âÉ«*%݉´q³ HV@5<²o²¹ûVU÷)—•1?}YÚ¯”ø|XCü¾[󱨚­p[¾AþU·,Wo ›Šp4òÉÕ j!=^¼ ^úiˆAL’ŵ(˜X‚›ø¾”¦C}#!aö<‘Í ‡I^f͈þÜr€R ¦õÀMãj¾YP1o¶›ˆ;êRNà^ wÀÉcV@ÒÃ…a|6BH]:"9U¢ÔÀoO çõp0 ÀH™Niß6UdÐ ­^úÚòI4ÉFfyᡨj úÒ{7 ;qV›d½<_ˆêb›ƒ’•(.ÌÓØq˜«­G¯9+Híµ[@ÌÚ= î‚–ÁŽÄw̾tz¤¯ã§y· I%èür¾M¸‚Kmdt¥ánó¡_¤7‡íÕWas¼Ì¥C¯Ñ¹l]uUŸ²p±ò×U|î…¼A2n6T1÷ª#xðz›ñÎÅuÔŸ'GíèÒt¸šhÄÞFJ›v÷6x_·0ó²Kz´Íñ Í©±¦Ñ/0½ØÇkó>·t|>àwnáølD•§óY\šV–ò;4þ¼v<Í=.ÙÄïéÿmSüî¾ØAœLH½­í|í—vÞw÷ýÇúeE†ipBþZþÇÂÝ'×6dŸžsæÜÇëÞëç½~Øë§rÊÉç'"w8!æN“‘Ê^ëeÉæ¬Ñ²õoIÈáÑL`›é6ÉÚ!1¨jpðj=šuÖ/Î˲R 6J¥3ª¥÷Ö‡‹‹o!BvŽtc£v*©Ø¶ÆÒ®¹”§«ý 8:‹yLø8×%Á=‹ï¤ æ! ÷-žXFè«t*ÀQ¢ÖõœÎôÎm4ß¡bsÇöÕpxíî…Ñ¢· s¾|qàuhËŠ4üPÏ¡9!µÓ·Q2áíµ.LµÛy”O}I—,™øMrL!?[;ÃÑã˜ËŠ6ˆÈiL™šNÜñîVº¿ó2ðíJ­‚­®&k˜ì3hõH0¶b¯˜:hÞ3[QfH_@ö!‘öf·A®Ù5°0„bñp»Á…`híWæMþúÎfÛ<2FÇpØ~%öSÙ#®–°Ý^zÌFˆMz)¦Å±>›T`é+¢•L¢•)<ÀÐ>`ÓÉÙªÁ]ä@Árïüzý8`XI¯(0í"ÙuoH…å^•¦%ªô¡üÝ;“¼3éagÒÛOð(û™kv—t u£™Fð"È ›“T2Ë$I~äøKZÈŠ)TqÒQ„A\mš~Wk“χ-ró¢%¿®¹É$Ù²8é=ø*¸¹v‚·qÚ ÓI'˜¼tƒŸã颲”ôRƒ ¼m¯É|5ŸUÍVšˆà<žâ€‹[08zK—¼@”-‹YNöìíŽ.ÕŽõE®Ii@Ôç†ãÙbŽ+f3êDÃ!dÊü?†Å<¢8ОŠ{|Áσg^›päUÁ…¬ ¢4Å•Êmnî?aÀ ]­)8'â~MR7xñhØô+ß GÜn¢ò瘣–§’ú•‡)qKa%Ô—¥™q<¸¹Â'®¶«sB» ¶7dÄ­¥qøR‰ò¹Y:Fëië§f9Å´m$›q†‚²óÊãœâ bÖytòÚch¼¶y «BB÷¶ÈËëMîüZD”)Á¼¦*ŒD Ï$º'¦ÂFén5Ëd¦‡.>jpÚ,Q¯bÖDºŠj0²Ñõ.âet?ŠlM–IÕåYG‘ ¯w$!”ðAÎW¨:ÇÃà&s„3„h© ˆ)“äÇÊ/ËÁ::UˆµúU… G§T#Ïnmí÷ɧfHüОOÊC·%Ÿl7úÑ‚£sº×3D—g‹|r¯²6Má2IïùGñ¥h=b¦ýâü¤+²gu¿Ž£B’£ÌYïC¥qLî‹dÀ]ÓᦻM¶‘¬žè‚{;èQ4ÑHX’gè2åŒ)/¢”„$ª)Ëz|>'Å= ¢é,¢ËKJÊ8žÌè’åD0Î^Óç<œ—ñ` äû¤«±ÏDª¥%}NØ'ëÕ!I2~Ë A{®¡ÏÃh*\£å²îvOŠ@2è¥/ß“ŽÄzÉ£âÿg¡8ïO.Ãó³üï¼â—yVT˜"iŽD'du˜Xã‚äÕ7Ã3(s24oŒ~ "iðö‰¨Zx#µ­ås·VHy;ÌgƒÇŠC¿c>Š•Å>œé7ÌA´ìŒNV[ÝÌ£Á.Õre84%cãüqœÀ«lG3iŸU]Ëf_Ê/sd.¸bõ¤… ¤ ƽ°½FW\oc2CòÑxÖ f¢AŒI u¬jQ[®úd0ÝXD€d®+É—9©¨|IW ÍGzÁ” nÎjFTV³QW° œ?Z²J&CC-ù5„)š LǘÑ]Â’Q|Khê‡ü±–HÍï¿~4ÅÈìðÇX@NÏv¥Œ¦Å5>Âû†1磲/ÕrÄT,ÄÙjÛÒV{éT€â`P=fÌ¢s*:èñîM|ú÷U –@¢m“ë“Y˜ úDø°×$S`ÕðŸk|”€»úú3îsÕï¼þN¯£]·\NÇŽ‰:æRñ +xG©h $}FFŠY„#öÆ© ‡ ¾]W4F Kzczè‡YºÉ~ÔQ\Èáß1‰^û1­ ;XôœÿËÙKW¬çÎçÝðZé'Fù¸Ÿ)LºA&¯¿å æQpø9ãK^8Xcuƹh³CÓ®Ñr±'¸?Tˆ­5-Þ0ú+@(ÖpTë"íHbú_IU ¶‰25耗#(SSµ“ G‰î¯•Áãý34ß_5@{¥|÷-ü€ãìÎ\1þ$ #Ìò5'·¨»#œgm²>™àj¼Í×ê[šÇ/ ¬òp^õc›,˜ˆ²ÀëÅŒóT6™TYÂâ[è{Jg"æ0xtŒ†<1J‡‡ß%×EÈA46†äëÞÆß¶¨=n:ø/ûH>{ºÝÛz½j¤ûk±#tÅù–{^ð!Óøl·‰ð¡V0æ¼ä<"ÆQEA>Aû""ȧaCóÓ8É  Ԃ¯þ¾á/ØmÛ0{|éîŸ ñ~ÁJ–9Wþ¯°ÄÖçvHMJQíd)l߀Ýà/è¨4w+‰ñ5½êß 0qgI oVCogȦ>Y¡ÊÏõªÎNƒÁ#­Àž4¾¥Õ„Sî:^4|Ì&Ì)Ý«@ÁÒ!6Mj¡§A4‹úÉ$)pî¸ôR9Ñê;9ÒÜ Ý0m½Ð¼p¬€úÙçš%ß ª´.± ø§„†àG’kÙwsrðBÚx•¤‹·Áë‹ ®³4¯ˆ ñBÕbýsT ï\¨Ö,ØHâª9º¢•I¹L²‚^#Æ °–ކ^ßÑI£^5|ÍüZ–±âˆèòª%YÁC꓃k/éö‘+îï“(ßÍ~ÞJ³gXûBfù<êgÓt9ôÑ?´Dƒñ& “ZíŽE7îÿêòÿíüë_•›ÿ¬û &;ûLŽ®¶P¾ùþðÔÈùÆXÖcÌXzÛÌí)Òa6ô Š,´Agº±~Áœ?hì6k%J‰º©æJfÈ'%g­˜•7¢w™®^þ[y"'Úè|¾ƒô±o»³ñlGN±:Âv´ó!ߨíÅÍsÍÊ!QÅdÒ4Åö)‚¬Ìp)ö¨që`S‡·¹Q‡^*ñEOëµ[Öe²Ï…ÑIázs,À—-=<¹öû渂”~úô ç@Ò°8V\g«¤}›¡èÁáé°|רìYŒç\ù”sbqÉø$ÓF‚ôI’!·kàPœ¾5¥Ùcû×;óCXbw¢ì5±Eó“ˇ rÁø$¦àËÁ ¥âö;úð$†gßž=ëÔKÁ¼¾KEËoPšÿL¬,–ª¶R f’¥¥ÌÐÛ2ÓÃ8™RÐ…[à25p²RSR㢣u… ¼_U„oÎXª( @B¥–™à?Þ÷¾\¨_àQ-B;+u_ÊgëkNÔ«¿¬ëñý/ö"†R«Z\CM\¨¡4¤#ªã¾v56îŒ25¨ÌÞ¤~ 3~ÖSTVÊøÝ2"…+'››ä¥`êzûŒW©U‰X¬»˜mÛH妱Z©©-©¿vÍwÝl>ÚQ›bqo‰Lø9¢A»E4ïöÙßxg1kLØaéÍÀ6PŸ~ N1UbZÌ~4þÆiè‹]¹{ù¢E –$äeËþ˜™ÚÑÆÍ7IÒ›Êx¯û_ƃÝþðú ¢ë_~ùd¯¿ÿYôäógŸï>¹ìö£ëxw?’œCóÁú¡ܜ쨩¾!Ô™·4qjA¨È_üábØhg©2÷À9õÎ-áT5K[Dß´2š mµ’´mº]èÉŠ3µTqÃ2½%ö9#e”d3Õeêüè²Ð3y n$¸Ð°1’ ñÔb†¶J{±ZH—чNk¬tβ;¢¾‹I‡…Ic‚]dnðǧ4cYå̽gÁ}‰“ŽÐ[Ȉ¶^Œ(PìP…³ ‚}¥@sµø¢1úŠò|ÀåIÉŽE¡1&ºI"ª+¬ä\HNTa‡ýPÒ{º]é£_¥Ã?JŠ›ˆ%˜zawСÊË?®“bm)$‰Ãlrk07â³ lªæ-Ž_IGDå϶ÕzP.KeÖ^:àá­’¯æTHX›ŽIÄ„$Ù†pKøùM,|/e_µ.!9S`ÇîÆç®&øµk×âS5ôÚ)@N•„Ûô/j…>#¹+—)t6º÷%’Û¨È6¿d23Ý'Z@2¢Ý"‹0‹®$»H©õ„Ÿ•Pè8ÌïgMˆ ®…6¤sA×ê Äöj3$´æHkÜÒÀ|Še¼aéPäBµ“t i$kBðïl4}ÀVc¥9ƒ&#á(ºH99H8ƒ_Gwâš.@!&…¨¡Ï^Ú1å…¦q`½Ìµ)×&ýùgÏ0kñä_i”fˤ4âß•i…üxðJ¼dâˆ6íò‘W¼T律&W\´^³¼œ3š4n‰ÓHûL,ò–‚.V9¬2ÿ¿ÎÕqh"þ¬¶\¿> Íx Òfõ}ì&É1>Šrø s¢“x>·yH럖ë#)#µÄÅA0Šu”Qß=s ÅpÕ*Ø—IAätÑ„iE4¹‹î9Ò^³ÌÅö gÉ®¯;n~Ï>-̽[òaÁt5yª65E–3¸âà¢_⫝̸(R|E €x3-Nî¿l@gp¤¢`ù {ÍY+Mºý’êßÜm1¦«?Î&Ã5×4°,u;ÏîlÕ¿Õ>Û­ö©ÿ3 Õ¿’6ÔBý4p*ÝA|ð§R<Ú¶ 9›®Ö„LL´c˜ÿ£—†{šw^¥¾k÷owÊFðåá~ÀÑC[Ù—} ù£ÁÍbvE¢Îcü¸ÖM,Ÿ–TõÉšC¥¿£¯n”ÎcˆcIˊǧåˆ\ëUû†S>Œj’©ÏË~Ú«|$.×,Äp(›©VYI¡¥ýV‘{O‚§R¿’³!Èžq,Wyc˜:«DrOÂÏÁK41¢ë‚ÑuÚ·ŠD[bÖôá¤FÃEÙÑ8ƒM`ðŒ<Ë ÊUÁs“¬Q$q£G­òKÿF ³)èá¶žàl<:/8Nåâ>£$ÙMY²Z‚1“圃ÃÙ›ÉÌRØM ÏÈ©YÍu=†Yœ#È‚É1§I0wãt^?òœÕñÅt†{{ebfÖŸ†3M¾æcA0ECÁF>7HD7CÍàñ ¦ºX|}³±îjóöU-§ÞªØ"óUÀ_ Ë€A1…g‘¤Å‚o âÓŽF<ýÇ6aZ¡å½á6$¼…#!X&T³l=ÛýÓüüÙ'hRìÚàvWcw5>FÅ¥¾ÉM6ÿçþçäù&‰K2ØKßJ8@yX›ÖùÂJælA ¸"D’@¬Ôß ^DZ7¢³Ð*;+q!FL¾NæÐ i‹Ùæj?®öÍå7á6‚,|Ÿ2ý¬ÚayÍ™[†±:Ák*V·èúðèìüèÅÁåÑ!k!ûŸÇ/O_Ÿj´ž–²vÑ9æ,·êŒm  ] ´¶8ÙbYG%FKD•zÈ>mÃ~îæ É©ÁÖõèÇ„èr]&-‡A޳Fø‚´ ´sØwkAH„•FB è Y<&ÉûÃ%íIqè\ȃQ‰lÕ>ƒþ«„“(žÔÖy¬ØïÔÖvÃâ^#»H åæ’4”\°˜C'#‰)”²Cj k5܇.Ø'Àꌡ ½ôš¨ˆÍ| áø|Þf“Û’|°7`1³Ô^mgHºT–xË©mT{à ¸šÑñ¿Â†Æm ç$Î[ƒÇ 5Ÿ"äÍÉ™XŠ‹ÐŽÆÉ|À.ôÛÀ<¾¥#¡oƒ§ßÁS§#`œÎäçÿyÁ62vÉ•ýGHn Ñv˜º$HüăçVM¬[o%D”Œ/–þZÓCú’ÞbC7#VÙ|ÝÆµ¢% B™Q‘¸º¦Ãqù‘+a>ƒjª.‡ÆÛG}ˆ4EöT0õç<[‘š¯øÀ)ˆ#þnì´Ër‚Ñ›±lÚ$ã¼a8 ÑÆfê¦ÜfÉWþë>°  DÓiyÅR¸”l=z¥JäÐÁñU™ Ó!’&Á…þÝåÑ:)ÈÝ»£ gæ$Iþûà䕲Ïfy­õöãŒ]®RŒXßÂ¥ÁôÒµ£¹Aö"ÌxJcZ£3]f4p¸˜©,ãSŒŒbVÐû÷E-®m·ÒÛ4z»®7>/¢AL*›í(×®t¯ZÜR—;¸jŸy[>¨{»YÍÔ¶Z`Î=gëFšIÊœ0æ &évS [¯[È•¯ø=Þ‘'e™ð¹6þT ¼$â>™(ã˜ãyGêÁ˜_UW3£X‚Ë´˜ÏI»gnB·BÅ è¦ù»^H67Œîˆáb)EvÓÒ‡{è8(»ÐòÔgè•Ú"ÒHJ4´Zlõu©÷c>°:X |LeŒÒBæÚ¢js ÷t*ŽšÞ‚6¿ÊF®bßDê¸Ñ҉жQç‹ÒÚ=¾ÝÚG+š†Ôv5›Ó¹|Ûv¤Æ‰œÓ _W‚$Ã2ý° üjŸ éÉÐÍ1ÌOçôt)Ÿ#]Uþq\ý‘ŸÍè™Éy|È zć¾ˆ¦3V½ÀÝCÀ¯M—^[~'¡w¬•"ñ³˜žA-¦ï ô€žäñiÆ0q±,,§‘Ëߨ^Qƒ¿¥ßo5ÒÄaä¦é·wåq°µ ™éÀ¹J×'?£dúc§äOCÓ¥Mɯ~B¯n~²ÙKY6Þäk6ødñÉ'Ÿ õÊmÖn?­ïÿ~2ûÏWôß“pï?$³ýɰƒÍýš¶ÙödÝEI«ë„P%ö%ñ¶ßÄnÇØãV[—ö\ñ¶€Ñ¬§BðŽ!V‘m"ºÛŠ•=Hྨ³Fz„)£ŽkÎ2 ìÛUíø…d׃30eCb®ˆ·±lÚ¯8_{4™‘(›ëÆÞÉsßÛ„²iیŵ¤îËžÚìÎ(§g/-q»ÞÛÝÕŽáÌ‘.~þøŽqWÄ Œ¿5ûüØ~£r·#ènÊN)&MÔšø´ÞM/uv6Bð4Y¶—X'Fê˜Ì¾–²ªI4oÖj@5LµíÁ ýEÓø—ÆŸ.?í,3W­ÀÌGÿ:;:?>9:½o&:E<=Ž›.|´‘ÍÙ¤# Y\= .§eƒî ³Z딾?”9çv'Ƙ%< SË„bZ,Þ—Ý=Qè¾ìî›<¢uéÈ(&Ãͳe”‘e_€IU$,“ ›óeÔ¥f·)ŸÌ¶ûXé“ý›ç Ü“ìÆVÍ© HÁ–J€¶²ªÚ×¶?˜ŒrÅÓÇ Ö%—Â=-oœ–Ïé¨l,¸ŽŠœëé1² ÑQA‡Ž8‘ ¢Néûc2pM_,U309Í“_bÙ÷êåçtÍÒ­‚讣9êe¸6NXQ© "ÆãD¬ï Kè(Ò¥”?®Ú˜\+½Ó¦AH@<©«ä7j¢ÑË‚ôÒÛÕê‰Ûs“âcÎkƒÕ†¬?ˆ›‘²Ù 6•ÀmÂj±é`H6ÙSMü,/QˆŽY€÷¡†Ñú¬ sî´ ‘Î €@ˆ°ñMâõ¨ô©!ÊfþR09®š‘2¸Æå.5®Ì†ŽƒO{k·ÄêŠ8ù\«cÚç_ø°vÖîÃÚ?ΰvcîcÌ}Ô·úöQß>êÛG}ÿ‘£¾,*><Û‡gûðì?fx¶Ûã“¿X„rÀ%:Þ7BÙ؇Mþ"”K0*cÞ!B9¨(SCm#”ƒ”ß(B9h ŒšBñå%AÆÇðþš1¼ÒµyŸÅg¶ZÓÛúJr.‘Ôáôä_™šC½ôÍùqÎß»2W®Ä€1ù«•$s§`$G7²­{ƒq— k[ÛðÑ`oÃÔ*ZÆömüþ¡™|G|`æÇ˜ù˜(ÃïøÝ}·æìxLaÙÞ­5ô‘„V„ô§Oö}ÈŸùó!>äï¯òçãúþìq}M "›çcó|l^ËØ¼"I˘Lûî¤jß(!§z¡šð À%¦©æ”à$Sò¢—„âZ Ž°UÓjpÒ¹z=’l).K…ñû6ýXfÏs·h‹ &ÝMƒ ¤†$½ÑûE/±¿ kö^ÑK¢½²@ý®ÑKXˆ “ó‡ŒÉÙøÐ±4cÞÿ³ª<Mo7.ɯ k®ôôe—!¸·Ùd1Ãa|Îã ëû|ÎÂ.}[Â^·¢€vHc9—/Ž…Éƒ˜üpÒ±\µ·‘=Ia«“oS¢G|µA¾3=3®i„ˆV‰NÊ:ƒB– ÂR;·Ãÿ퇿üÇ™Y<ƒ¡qN€ÜY‡ˆ¿”~©%Dáˆxm§3)PÄÌ»ôÿþíŸoþùæêÍéñåÕéÁÉÑœ€º÷q\›"ÎÌe;ƒåß•Q±,À%IªÆ^’)@Aй-.]û|‹]Ô› Ù}3OØR±Íü†cÝLUq#3[GÏ/¶Í®vøûb¼(±z“ä:æh ©ìý3;l6¿d^Š’ÿûõ›ÍŽ‚çGG§Áç§G‡Í±å°×ŸDN+FoâÀ±š×Ûø¿Ö ¾³û†îâ?{»ò‡=÷‡Ï‚ÿ‰ã¢ºœ˜ï-€ÞFí ÐzBùI$8 ú©³ó&V]°,´¥leì(/¦QY`®!1E² ¢j-² .Ÿñßóï‘û¹ï‘ûï„Ü_fí ïŸí{ô½Gß{ô½Gß{ô½Gß{ô½Gß{ô½Gß{ô½Gß{ô½Gß{ô½Gß{ô½Gß{ô½Gß{ô½Gß{ô½Gßoxô½Gß{ô½Gß{ô½Gß{ô½Gß{ô½Gß{ô½Gß{ôý}ÿÑVÄð(~â÷(~â÷(~â÷(~â÷(þ(þÏ<Šß£ø=Šß£ø=Šß£ø=Šß£ø=Šß£ø=Šß£ø=Šß£ø×šU<Šß£ø=Šß£ø=¿©(NG¼1²}O Ýã ëºf»-­ï ·%†”§2²!Ý`ëÙî'˜æçÏ>a%›’´Ám)ƒÏæß¤Uü%iˇøPƒ>Ô€6øã\ftÏ`˜„ûÔKÀJ--dq¸Oây·î|yLDÂ¥S¹l¶£”¥Ò½ÊLK]úØ»àc|ì‚]ð± >vÁÇ.øØ»ðqÇ.ÐnÀ¶sµ˜·À(ýëìèüøäèô¨¢×´ž©%ÁÅ]r]ÔmDoÎ_-kÁÔq¯½ow¦ÆN˜Z--•÷#Mñ¡hì â‡èÇ´³ÜÉŸ!¾DÃ1p*Z`'¨¨i:v’-M–6ívŒ÷“„§<6ÆÕÒ„ :HD2)˜+ÆÅ;Ƶ`]º0·hÉ`o†mW3rB[dJï×¢$«—¾W\K`ÂZhi|\‹kùXâZ`Œ) ?jr©XõÔÿ±ÈòV±ÎЭžÄļ»…ûÏAceÆ«¬j@ÂeX½Tñ+$ÐS,'1‚Nó œ À5˜!œhP¸òq2›á 04°j™#L`Åí\ÌSï€ö¾¾¶ª³ù–E0·Qu¯ À 92"fôÍU¼†ÔÔ¨Ág\:pÇ1zØß§Ùb4V«~BTCd Æ( d˜MPðb³U¶2œi~Aø˜Ðsö£M³[,Ÿ:ÓøŽ?sŒ'|] và Zw c¡xÚ—$ö–©½§»¶P9¾y€hYò€7ÝP3"‹plmª IÞ6H3ÕIP78Y¥ì‹´Dí‘~Ï@œ8b¯ˆzpèаÍQ}ü¥Rñ÷ŽTüü©Tô‘Š>RñãŒTŒ*×’ÿ\³ÇN4'z®ŒDc îáX½ŽŽôÁ‘vÕˆÀN²‘3}°®WûšÅô¡˜>Ó‡búPÌ?(f (kú#iMTL`¾®;Ö+MÜ´O ƒ(Cs{5nàÜ "O5Ûj|„§ðôžÌOh£4òƬksl´iˆfXeª{ËŸT‡h ²-€â7aû±«Y‡s¾Þ æÌh7ŸìæáÞxS,á“øºIºï¸74Ë Ðì7Æê0Ëú+…·‚gÆÁû†·öRß¼[xk/µðäàÝÂ[ªÆ·­Ã[I¢Yß<ÞÊñ—­â[Þúñ‡·U2ØKßú¸Ø_=.öðèìüèÅÁåÑ¡ ;~yúúüè°¼Ai)‡Æ©ŠKÄ<ƒåêîŠ1Ì8P¬pNmt.–õpTb´DT™“ÁöSs{Ôõèƒt8gð5d‘¥nrä8C íÇLÚ'5ì»q©“y ïƒýOÁÐ@dyLÜó<éôD÷Ýà5Γ `ADu Ï ÿ*á$Š·ÅîUŽ'uí÷h½î'Äâ^ç÷é …rX›…”è›15šü¢á…é©-¬Õp{¸ŠPiàÅ„:c¨B/…·ÍÂo`öwj’Þ²;Õ ]ð,f–Ú«í >!^/s“pjÕ8ïŠ Û€öÎM#::ÂÒ+M4ëW>€ÚP›¥p)Y‹`,踛ˆ¯Ê@‡H*!W'é 38šÝ «vÆ[û¬2¥+á R[k?ÎØå*•Áˆõ-\L/];sþÛÄœ³ÃŸ¥ÈrL`”ãÆÕHÝ—îãÒ}\ºKÿKÅ¥ÿï'³ÿ|Eÿ= ÷þC2ûן ;ØÜ¯i›}ÈúŸ=dýCÙ\}¸»w÷áî>ÜýÏî. s¢EËy×QÂH Ñ6­fJs7”®òR ¾Ú–g7£ÒÀÜKßGÂNœ òžÑq{L-zð:/ç¿3ñ6>†ßÄí/Ý8ü± ½£_IZËãí¢Õ_¿º<ÃoÄa!C¹RJ1Ôí6Úø1îãÅ»¸ïÊŸÒ ê†„!ã$:IÞr®?µ^òªHT£a«hCæ°0Pº´^2B4þÒ ürž-fÁÁÙ%u–'Gb377¹Iê\<Û9˜ꬩY~L¤½ VÖ•œ!Ø[ Võ¦¿H‹EðêòfŘAfýø>cá3ƒYX³ìRùL+ln>µOmð‡Omða4ŸÁçDð96|NŸᯖÁ8äI¹àO’¢MýtY7§½\;ù¼™èüÙS/Ø|íe(¦Åâ}ÙÝ…îËî¾ùÇ(Z—ŽŒ¢Ù$VIYø–-§P‘0¶_šóeÔ¥f·éo`þÁdšîCÓ}hz»ÐôëŠX kçÞpøÝø,ü¬)°Ý}cÃmÿ6C‰5’Pà¦)uv Ó &ÞÀ’%¥Éh˜ß§¥U•­ñ´ƒÜV®q÷5À’×±`+ÂbƆ&v·êµGóØ† ù7˜5KýlºŠfÅˆÇ ¥‰Q]N¶e¡:nø°%½½TªŠÍr³Ñ.!+ßr‚I‰â° w¡è:“^•UûT.˜qÙjŒÝ{#1ˆšøI–cƒË•èÒJ,­¤v¿:XâmÑ©< ¥aÏ’É+£É=Båï_©Z81„â\­Ê[œùPÄ߉!ñט6Œ…V]ÙÙß ET ?Ù’0p›à¾:ÿ¤m†ƒ¦©â4ž ¦è7µ¸Ö_}ª•ÙäˆÖäTOﱟÒ'úˆvto7”™LH¸,çÛnª°šâCgŽÝàUŒøK'OÑì*Eõ×—G_±?þÓOOŽ?ý´Ä sæÄNà œº’²“–ˆó%ÈŸ9FGP(I\Èýe5#>ðáŽ%ûÍŽ$:c_á ¥½L0Ïà÷ÇÖsÎÁ×ãQ"PG¶þB¡×$,­ w¬$Ô˜:%W’³âJ¿´Óä¹8ÄØ9yICnêEígíGâ5ƒC^júk$îú&šI©ŸY¡áûs¨Dë7qS¼èš±eÈeé0ÆåVœ‰„¨ô²¿ûô Ók#-¤ûj™n5ŒU墸mDš P!éýŨyb#à*[tH"7‰½pˆ0PC"º43L”ËÍ嶺ÁZmøÏ€þ±%°Z±í@µÄehöÝÝ]È)OÍÈØG ÝÆkVý;z1à%ÑœMXGÓpô?¿×*4âŸD˜‘ \Ð,2ˆ™±¦Wg¸áŽ£¥ã!ñ']L"$Æ©:âJÉß³EýäBaµ=!”F•ë<ÕÍGØ|L”Y€(qz4g—:Ûë– ]3Œå¼M#·$ÄU;^µ§O*msŒ^KJéêi6dí.3y–K;›dÍYA¤Þ´IçÑO Çæ7ÖFÂçiª¨pµ©K˜Žé]<ôº »š MMÂÎ*)§«©éÁŸ#"é-ñÐ6Àܧ ƒRj‡e·œS·H¬ÖH â%z€Æ8mÊüÝ©€ôq5˜2¼~`¥”?˦ÏÁÕ}¢„ðxvºrÄïŽqÄÿìиIKVhqZ‘ú(ZÇÁ5_£<\¥t¾ó`0IXƒp{Ý0²Óg›ŽÊjô™- G •;Ü¥³„8‡ÝÖ`¶Øþì5^Xþ¯ö¤2ur–ýxñòØ5×¶Ë%þÏ ’ŠÓpD<íNRÖ<úõ7\Ÿþ“{&"Ó,ƒBúiäîÁKûy ¦£¿s†zã›J{ bÍ Îø¡éåÅ DÂ-Éø¡üˆEvX”C8Ín#x€ÛppvÌ‘f÷´vç6Chž$•Wl®Žº;v2ËÍõR§½àáætvŒøFÚ3ów/5‘’ˆF2iÕQP¾ ±¹”ó²átœÃ§c[?=~ªœø„§þÃI¾-dÍŒ#¶â«{/У«y ÀÈÐn‰q’‹?)»ü>»è/^ÑʘצY ã7ü“=z6èn÷Ò*ÈÁ® Ãs><2¬´ùž•¿*Õ•ÅtZ¼Wã]×5i$üokQ¯BãËDâCœd€¹q¿^= "ŽË?2ŒÐ°÷\™.¯pR®¤_&ex]—kX Åך¢FAßý˜ .B3ß3qè¥HÔø&s›Ê«Ä´Ö½¢Qv›jú›«øuª,7Øÿ¡Ð,ú‰†Ý¦šµ±‘¸sÌ2§£0H½œ»$+ £–ÕfÖù\t¸d–Ôý¨Hæ‡B¢,†W„Ñ<ûYE8YÃóõƒÕ\¿®ÃéW!œ :lý W‡= …èäû3¥kŒBDŽæHC·únH¼e‹oY Š(/š¼ eŽ$hUYÙ<Œˆ2¸ bäÙu)§H=ƒoDg³éðXMžgýE®<ÿŸ—my‘ά?ÏnÚbÀÔ>Vh=-´?tͨA<»Úþ\§ªà¥PÌMÕ‘ÆîÉGk»G¥þÒ¢£ê<ßS+²ë×T‡¦I>h§ ñªgù0tbmÂ6…#.ËÄ1ÃdPp²È¢bæÓa¶˜‰öæ…¤ É40V*êkÅ™¾s7Á¹Y¥`Þ2‘&¹YºsÔ"–0OIߨà©Ä/ÐG†Ã›J„®é93ùœL/½ÔtSA ÚpØ‘t 6¸Ô©ì"ûvy6’)>ØŠ&òŽÆ˜—æÖú¬-`tHl#{ê¥ æWô$ÕqÀû|ØÃ~’9Om³«k“µ’•©^¾=(k7°Ï8IµxÖt Kuäè6œ{…ÍjÓ˜3†K*5òB©‹™q1Þ•KNé4-¼fÐ̯ñh z´*)¾L²ør°6V/-Å‚E]£xöt÷3Øm2 qžZ°ÜÄܱùfQÂò/‘%.b½ë”g™Ç,§“2 ;•ÿév»´–rÈí*w¹]äävšäC/M“‡¶€¸Žb£;•C8¬4-…gSÇÔ+¡{ /9£âÔK pÒ )øévMZ;³_ý{GƒÕq›pBõ M"©çÁ[¨_N˜õs’ê8<h–§RT+xÈwñ=r_B¥+‡`r&")媊¢šÍiY]-ŠbiCÐR6?/ŠŠ9ÝWÑ"Œg‘8V48;;Ø\¡åJàd“«%ålÕÉŒË,Eåçª5-Ð4z꾇…ð‰–áÁ6 TI7ÜÏtAÐ0êkI1Mý lûºˆ•ì¦1ñÆ“¿nÊÀé½ÉÛö±e $ö¶&Éj5Riðc¸àÓ›ypþü0@x–Ü+†,0ÖZžqKš›‰f„oJp6§2 %ŒÆ3Hþ7ù‘ ÿKôSeb†&ï™Ùµ+ìbÄS(˜‹»”sÓ3‚旅Ï5ÍËÆ¯#RDiL,Ó¶Ñ,_>oQ°·»ûr)èÙKö 0Ùùèl_ÆèµãZ¸ÖZ·Ôß…úÜX,¶´–§~ ¯hbAšéj«ÄMªU_\¼ªä…¥nͱé,§CÜÌ®¯qŸ¥òßÄØÃ,s¥‚N@Š'ýá>~8ˆLíAÉÉVhŽŠT²µ™¶+»‚× ±wI® MŒ·WË,@~³»”‹89¢`Ô&crØ@åò o %?¿D“àß»Pwà£yÞ9Æß᯦šìÆÌ ~RiA4W͵&»q¼2> º¦¿ESOj"²§YWŽÑç$û`4·ˆ+™d£”®¨e™ Ï,‘Ÿ= ÎŽN‚-Úlôž2X æ’Û®‚ú=,Ïw½~zÿê>Ûý²²)Iêö®Ùʶ$ÓnÐÛñŸçG/OƒGç—Çߣà?íml¯‹¦j!-l95„ò˜Î/}¾i*—éÚÖ4E¸ +2âÈé^?ýÙ<¹Å¤©V±¥%ÆÝm¿çÁÙùñ4â໣¯YˆZî7¿æªŸ}¾÷¶?5úUdZC` »ªXþXHPÇGÁ™ÁSÎ’ÖK5yS­Qkl–iÌÐRN2È¥$Z…„UÉgÃrRwWƒd8_'ÆÃÇ–¿”•Cªè´Oµ–­qBX£Š@›ê™«»R]t54ÜkJÓá ¢Þ ê ¢~ƒ¨7¯}`óÚoaÊðo0ðƒ 裡üJ©{üZšÇǤrx¡þC õÉ$÷%ùÁMU|—ôDÑ<8_S@Þ4•”­˜²,ÀÓ-1íæ!ìØ–p?Œf…Ö¦¡^ʰOIáǘj± ¦ZÝâç4”y$‰ª¿½¼£÷-åFØÑúœ‹Ù¬×a—ÌUÔí£8kkÇ€8BÖ@öñ‡M,˜wTG0›PØ•0N'ÏB ø‘='1^Ú–TÃQ,‚ˆh‹²a0°9°Ö© ]A^¬ÝKŸ¿‹QSÖöÅEÃÇõXÉØGí¾½wqäü÷£³õ=-o²á ®Ëg’\zŒ{w{»o“Kϼõ¨¬}¶œ‡»•åß¿i/è»ÿ^ò oý™PüÅíúñ.3gÊÅÅ·ü—‰ÉòQÅjê‡ðTЉ ŽÜôS[#-M'÷ÛËÁ”$C¯š-â[ Ü6ˆSc€‚‹£çG—W—¯¿;:ýZSÙMœn›è¦ò‹Jsõ ˆVyîí‚3#ñÜ‹£üðàòàùÁÅÑÕ›óWàüèðøÿÜ^ž”Ô|ÿ%ïÕáÛ_pX±©ùTï¬÷ªÕJ>'588?xñÝÕÑé¼uçǯ.ø¯ÆÕ­îÏá‚ý6&ÍO‹så¤s-Üò<ìÑo—uJÞV¼+§#Âé\˜dOŠÈï§ýŒ†Ð‘€¼oá†)D‘© d²ê–Êq¨Á ìŽL >žW×í²J0˜^Y&óèÐÉ•#1bÑ\§ËÒ¢Ü,ÎXU»[ôæ6}dò)›ƒIò'ÉßLÆ‘{Ã1X'9“{Ý‘]bÅuÇx—ÊŒÞJÆr@KÒ-h¹+³J»/T„Ý3t‹\ˆ p Ág ÝÃ8°dÌZѺ’áÜ($Ž&„oc‰ 0‚c†$½Ï…2r.-{›!Ó$îÒz鑞ܗ¢v*yšÔ¹"âtTd&Œ”Ža2[LÜ ªÁ…æñkú´}‹á09ÛR“•#i?GÅÈ\tlÊKp—æñ„õFS ¾TbmÙÐÅ^OùX,4…›aœbÖÂÎì°Q?”ÔéjŠi—xÚ¥\)j•}}‰÷·Šå q:À¥“%OÅn—lùµýÁ;®ˆÉ"ï¼ošä6¢OóÍÎ'Ñm>Yy±ùw¯?¾{ÍS­yÿ×½Ë5ÍóQ»µó¨Hfé«Vì¬2÷·ÝVï4ߌˆÜ&ñ]?‹D˜Z"!å¯. 9ççÁsü‘-LŠ*‘šëf€ø,ù¾Û@=œï™ˆp1S©úŸØmäÆaŒ4·ƒEéoƽӚ©R–œš|ñÝsõÒ?2Ò~°z÷õ­ÒïÆ”®?GõSúﻜ9‚ÄÀ@XN•D#<ØšMÍŒR†Gdä.²*·ü¥†,¢ÆØ½››IºZm.$Ícc2púàÊ"ÙœœoÇd%7ôÞžoè\goƒ'ÝÝ¿ão>†D̃£·¤Ó ¾äç/³ Ö¶ãyG“`ê–[eØÙÂI) Ê}“3†ìQÄ!jýîww›»ý_©ŒxµÉqÑÅcÁL-8rÞöÐpüÝ<¥–Y>&¾Y^Ô‹ 1oGQ?`-ŒÇÒ¸•ÄüÃå„x«Œµ3·-Ú@œ!|œ^fõ ' ÇûÿpcÿGó-1ÿ4B§¶#}$15Aõ¢$…µ*”®[ñÌ.énív1Öo¾?<µ¬^é+’p ø°ê@¦6 þÉùï«aó0dIyãªw¡5ÿÖ¨³û«Kµ:ž³»}Ù¹Ä/0ÝåÐî„î5¯Õk¢áV Åëß UjO³yëµ·Þ×b²5Æ…é¸û×ÝEé6änYV¯»•Õ%y÷ËùÛÁb:b})îrå,ý´n$‹ÓB¥ñãè„V’ uÓ–Ž˜y¾®w~ûÝE®wCÉMHk-z¨ÚÃE1‹ûY {ÐÁ!Lâõ2Øáÿhf0gCKŠa^ÀªgiiŒÂ|qf³³Ø‚n«ZàÌ”t±vÖ¬ürpZÏxóâtƒ›Z ôI›fäE·‰7’Òf݇¼žCĪ%uà_14×g`#P+AJJ>7i©Ðc-­éÑ]ݵ÷T¢ìø$‡¾ÓþÐJ ËP…Zè§EË£\ëE¾]Îådñroíò‰Ôº±†ã‡&ƒÓßNR)[¯žýq-ÃHc-N̪öëH•'Ïž<û|¹ƒyVdƒ¬E\ÍÊ~L î–°$N±Õb§_É{zE<–Á´c„ޝY‡€8Ӏߵ(b,œAÉmzï—–þß¡VU§—²Ÿ¤Ôû^pÕCS'ÚÔÌqÖ.e]¬x×4\fóY•ä¨ó@ФŠù òÞ eqÀh‹4K•ÜHÜ´z6è:Á¤ˆ¸ŠâË/9‘jÛ‚êÝ–5Ë’L:MÞÈæ1ï¥Zùau’¥ z2m~8ÏÚרËìÙõ-‹˜oZ ¸µªêqÂÜ¥sÀf=@î­†l9Ys ÎÌÛbg©–ËbK⨕ç³\3ùÂIûëÈóÖRÂ9ò:Õ}ˉ$Z^_óz5ó£0Ï%$)NaJ$ ˆtüÞ0W-h*™‹†LJ-[Ø›+3Öa|ÝÛøÛÖÑïˆ]û¯ÆAm÷6‚^ÏÍpTýâíÇ€=8ßö4ÑQËX±Å†pùv[N±â+çÈ«”«â‡,¤Rê] ×盦ÔÂÌ,§Î#iÂhÉ+¶áÙ¢ëQ*|´n Ư‚ÚÈ¢ÆÛæ¶’kxº+©¨$K,©Ùáw­0# Êx‘¬ éw7¸„×F¹Ý¯C(¢¢{ºà×Ü#~qYªhã(oQ¶ã[zË 9̳œqÚÕ³¥)÷B­à0ÉHjyÔpã[ßëãXl²Ž†ÁxÄ‹Êtîc©ñž0Vw2Q$«”²^ŸT»ÌÇ-Ðæ§mâlÁÖ®š“Ü›ë0Ó¢Î㼂‚–ø u9l¶°Ê·9`f­”— xvi4Åú*f#¼‡Ž,•”\8td éãD%Ó¥±¬ãk£B“7B"e†&¸_é´Òޤ¥0]óë(”Çþ½® @wHrAöW1¶Ê©`®P7ågNM].­tÕK¥ ED±SP¡)WäìQ»EÂÎM•0Iü;tÒ2»œ4CÓ8+¬­¶[fÊ9 "5Ö€6•"|âèàðäH’ÏÆ0ÒK%Õy;VY-cR£Ý²4*Õ-—î!hvØtfD±vô/kW“¬ü ùD BðÚÙ"i{ÈùvÈ÷ÁÀöȺ˲P&¶µ…:øŠ©¼dG,ËV–][«HXŸíî"=‡¯„ÃØØ<×YX8ƒÃ?!4VM©A¢Ü ÜŠ2r ¬Úp b>#bq(õDJYlíà÷"Ûá"lÉtôß¿áÅ—Û¤¤(ªRï¦&ÿ2ÒÝPŠ3õ%{:÷¹Åä5Îm)GH)'&èJ^BeÖûjÄ·•£6Nƒt¯_u9žKSJö6¸Foc©$jTÀhÈ/i F<å›Ñ€ñ½TBŠÌ€¶ Mû[UŠßïÀžØ‰zà ÷y"{Ù<[äK¡Eù°/A½ŸèÕÛ«3Ö¾ÕðA:¬4Vïm×ëú]•šÿŠßyP½_õ±Ñí»å^#[<,rºˆæ{¼Hõúª"%Õœ¬ÃÅ,©×:´×úǃóÓãÓå@æN*Þ­ P«°j´ÁË‘¹‚àœÎÐ-¬©œtúN³×ôRW‚—)–9KñJ± ¸—RŽé¥WdνIS^Æ>štŠh‚sÛ48òj³ß[–cîSpçp·I†‘ß*Ò ŽGp!çJPÎ,í ‰€Zª3/‘@,' ƒt»Œ•9˜1hîB:Ô0·+Ô+˜•GõÙ)Ó9H˜è?Žˆ[ ÆiFjû½^˜“C1ò@µßÝ%~4 ’'¯yýùœºVNÔæÄ1Sζ£U–ªç¿Q}dš±ÄÊð‹‚3 ™¹øþ·®í^fS9>ÿ‡ièz!Y%ÅŒ,³Ïáó“ ËÆkµ=lä7‚HH“¾j!=jä4º§à›¶CËÜÜ}•ýb´ðm¸?ÇOªùÂbÁˆ§I«§¨ê;eÿèÝ<Ì€M¬Å$[h‰6|Ü?÷h“þV¡Mæz´'·“X¸Æ?/Ω~»ŠêØÏ,…ìÇ?£ÛÈ6õ„¨€iÁ˜¨G“¥š ºá' LQL­ò×óh0;Ÿ;ÕåÆyÊhÓþ^Bº¯`ôÁºû©ZÁÆ»EÛ½+hBŽHø(ÐÊ»@'ÞÇnÑGÄ÷œÕ0dí$~Æp`‰¯†³µBžÜëÿ‡ôÞ>šÇ–èù ½[?| ˆª”~û´Z#N·2GGrk¹ðXy͈½Ã,æ¢-†»šš—p ´¤,#ò›©dŒ6µÒó<ž’:%Æ Å¸òµ ܃o³;aΰé»Ô=°Lìop²H b<ØßfP(kG¨$°§'ƒZ¾J x/ç°<~×¢«ÉÕºÁn–FU`8Y¦ö ËÅ ©%ìT®Ù·þÈœÔ7-ÐOͲùÉËuªîi78˜ÜE÷PàòÅÜ-’Ɉ>“Å¢üÜt`3¦“Pìï?íîâÿ8Ù.Òï?ù²»ÿì™ùë°²9]LC’`‡%d–<1Ã,¸SÌÂæjç<2nL&}hª=U®Äþþ<¸§•kÑ&oÆòòº%©Ö®mrŠÃTª)÷3àKû@´s¹i¤ü¶¡œÔL*ƒ›UЖkƧÏ>ûr™£Ó¾äp:5qtû›ËÓ‘Ú t&Iqß Ln6¤ìžü: ÀèPV?ž7¸WÜ0>Bƒl*´ –aÚqÑ3€iJ‘-\“>æÕà–þ<Žn¤Ð$å¼§ôÛ"ÕÔÁ5½–s|Àm½ßž¼­® ÒäŽ%”±ú¶Ô•¥+=…e¯<”ÀÙ™ )¬ÉÎ#¹% ü?0»ìÇìNgO;zÆûŸ=" Ë?øø‘ãZÔïÚ ì/Ÿ­O“°xlš8WÞpâWí’JÉË’£…MÝìè ¢[¢\·[{Õ0ZûûEÃì\£Ä&O˜ M[Ò81sc}MîFë}O‚‹É¸’Å ýhÖYü ¾513zM3»ÙÌËaç¿öÛ'—ž3F!0?+³AÚî/p×[IIŸK¾¾&7"ÉD#]}¿VéñÝ€+|`àØ¼ù5©uƒ¨76pû9ZãŠcûæ$ÚnhFÆ âÌN—ÛDœoh¯3;9Xl… µM_[}ó"qåkø ®q÷Ð/"@’‹´M÷SL“>þÞ.žJ©Æ8Ÿ ±–“ÿ/'ø‘ßÀ‚aBé«nr+4¿6(ÍØ«Ã·G¦¯|G¢âc‚Šð)Á}R¦Ñ-Ä0%©`p²W{yH lÄjš‚ºÅÅÅåüS†Ï”®†îª„¤‡fÚàì\¿°N'ÁMˆ.¸VVÿ2eÇØY)¬ÍU¸Ìáv]žjšát¹kù ÏÕ1ÄÎ<þÆŒ*í˜YAñ™0´(üzìYÊ"·ât{ˆqÛhÑ߯Çbt6·èl´Ý‹2í!¶ RUh’(Ìh­gI@z¯A¥wFÒ‰¥>Á€óµqë5¾–á¸Ô‡Ÿé‚.ÚÞHʇ ý € ô!)Š€g¨Ñeš¢N3 7šØ:7åppBed{Ÿà˜ûƒu¨×ÃÕ@"bf,3rX­sz¥¸^‚-—wR±¼Ú\óØL³tÆÀ®1–œ“0KdK±¿Åç7Ÿ(èÙ%#R¡R¼¾²?ñ5D]R3»ÊoaíÔ\s/4ò”ŸfÓ‡/2Tžâ³)Ù¥¦¶—$A”,îjÛ×g§Î ´¸¦u't(í=A¼b$ÀV`€ëˆB¾#Å= •# áÌåèÑóe:È㟔x¥‡ˆ;r ,75ŠZžÑm–ËYa(+¦«S»¹ªxÑÍ|9gÂ1ÉRbÎË–;ÝÓò4¨tI‹ì±[’ÈÕ Ì Íó˜4ž§ÿö®eÎ~|-—ôÅó25£/b$!"{0Íkœ©ÐëÀ’UŽPŒ•ò~ß8íj=Ò‘šõØfˆ”FÆ¿ò„_a‹ÂxUXþD½Dp¥ÖHÊÅo¾¤ü¢j©ê§*!âŽÚ¨OÎeñoB2üíù·4ý;R×Â¥iû?‰ëa‹KÃo*Ænƒ²ÝÅc“-€ Hö$Ó* ×tƒê$ˆû¢cœÄŸô}£Né¸:À d@é}¢³û1†}‰Eú{'aä.-‡‰PpŽúç܇D""{ó[—BiI~øf󦸅—àÑ*8+,ŽQyÖ=¸<\ˆI)“Êpˆh¹WRªÕ¶ÕI uÃÖ¹f|%¾øÂN27ã¿q>næ0)¶šSv·Æà¿y/y>ÿ¹ñó,Ífco4úà.™× #p/ÅYtˆ\l*øò]ïò!^bZÒwåÀoTÒmCbÒiØ’¾\e°uœºZ¥o)¹DskHÞ2¶jÎÞÞæð¯Á€]†´í~BŠ­ ZD¥ÇƒžÜ ìà Éh*W]é+M"N$¨cgDœ7„fÀ)’Í`1[X¤Š¯¥E/ÚALK5¤i™à—ZpRËÃ.»»ƒ¨Õ€ØB椉È+âà”ÄD}Ùµ%ác"PÇI9¹]Ý0šn"¢«þq±ñËp¦“ ¸*Úû=•;@ÞŒ~%.¤j^úÑ0ͺIÜG?Ú¼ô•s)˜ƒ¯5'i¾Heu»èâpU0+}fPÍY ѳÁ ¦H>Bü2Ñs:³‰‚+¬SÓrFŽÄg`wælcê€åô-˜Òd£9!åc“Øí¢ß#.†h%ómÅ8½¿÷Ýb¶G±¦u³µØ±;ÖbÇßMì¡®%µ¤±–4Ö’ÆoTÒ˜f³|:ß±ž+†Vz. àX¶æ¶Ï7 ŠŠ=›YöµYº¸I6,žÃÃ4uÔßÖ­é$Ñ Ô<† )+ƒÄÚÉVY?hÆø´PµŸ°¤•«ïfyͪ\×Ö4Hs¿†þÕ¨3yqµê t’óáµ¢OõX¼|OL}\*\0^Rª@ÏàW²|$ `#Ár9Ï”¡ häÅ(¼¢[¯,K¨‘¤ûX_ÈE‡'ŽÇ$M"XÏQ6Øþ‹‹XtŽV'¼¥˜¬¹Rf¤HJ5áÊÖç¢Û%Åmc JŽx$ìÆšÀîMò Éþ X0Л ¢ó¬¼X­GßµnãwÒ™­7p+kâ—úþ;½i†Và«$÷Ù¹…·¹Knª–‚~–¶ WW‹ÆiêQB šQ‡×ÉQóœ5c´3ÈàdçÓE‡‰VoÖ²§s«zw’$-^ŽŠ«¦\¶ýÓò.žöTr<êè0+"¸[vTøF mþÏXAçqëXÀ/‹ ÅYPA»×ÇÐe=$^^3cA‰åó†|"B76M"»Dö¼h ¡”œÂË &YØŸ¢Û˜)…ÃAæuÀ¤‚r·[éò¸˜ðñvËR‹§RTŽi¯À,×ÒXR<¶MÛ²¿ÑÖ¿d‹öv{[„:LÍîhqzÀâäR–^ºÊ#Ä„Ì9ß\©÷&ÁÝó®·'ÆÑ¡/¢á,° Üí­W祹RÇ3³çU™sKQýfѶˆž9×e¾pô¿{Ôå  ’®5 Ø9¨´{ \oC;•U–j²Á=A C+bÚ,Rš– ´„.`ÊxPÛêÐÌ-ÔÙŽ@ŸÉ+|1ž·U¤â 5§ˆyKM0#Ü0]ÌâîY“=Má g*Þ«­ðÇ£¸4PE[+ PùÈzÀi|j÷q 4bTX¥“ §l÷×h•’×oÎ0`ë89 ’Ž07k­Ó©KEH].¶ÅyÑŽ~÷ŒO–Ö~¸ …•Ö]ñétÙ5€< ÖxžåxFã9CºéÙÀùöèî)P¶4\’ÀNTÍIÁ0 2’×`[õ¿ø5†Çý<Â$k¬NåßoA3k™ãš„2ág礑µÌeN8VRÍl´áŠôrÖj…¼¦lKFºW¯N8šcD¨ªœ]Ä]__·Kcn§Ù𫚪+´’9(É&Q`í÷‹-k^6 [>j0.¯ìÔ]6#6 ƒT;ÑíV£ÏJfºÿüÇ ÒS¡ gA †iÙ0¨œý;*bߪtÁæÉ*)Z<Ž;Ïß>~8ùkP>:À*5¼|‡í ¸’¤76K°,Žï&-uiÉ¥UØ aE°SCYGæÓ¨òþ0bé[õ~ê;ËF¬²¾mõÌò%)=ÙÙkïïHëê‚`Bα¢$7,Ús®±«us·Å–ÐÒB±¤òÆT(jé×Z„µ5Õ\SÍ5Õ\SÍ5Õ¼jÖÅP­©æšj®©æšj®©æmTóñšj®©æšj®©æšjÞI5‡a2ÅaÀ¢lžCƨ!¥úC•†JIÓ1¢OPN2Ÿªçú8ÅìÀ?så×èsnf®š_€™'&æ&3i²fÐ}Iýrdë-C#†7Û6´]¾ØXòÖ9‚ò²mv¥å^Õ3yÏàW¾¿77¯Ó«ë_KkÒv®^±ÿ¼e=Ä-Ìfn+,IW-´„7sÆc$6±J•î»®Ô}éù2 ëvòó‘:Âñϯþç2`‹,ìõâéøçÕhikÆ|–C=ïî«Y¸cmw—þ¡çÊøKШ•€à=&ÕÚÐ #óº-­þ´l^±û•æk«2á ûqžþãe}{ÅhVížKl%,÷è+”çhYLÍRųF 0¼¿²Pð…uäÞ|&=ù©6Ø7ÐRŽø¡Q´™‡5ˆû@:>W”z´%†Å%Ê¥!Ç€’Wu'QòOŠa—®d:ë¡ÊZ:j›7À"ò…B=ý “pU”àæyÀa$ ©'m·MáªM­/*}tÛ‰W‚I"Y[RE©R$Ø… 0”"Ï$º¸ÀL¶€ È E §­Ã΄±K’ŠN£Â³ÌÙkÀõ(…WVc6Q7‘È X.|(k¶ˆ Q3”x­6¬ãlÛ¬«àï7[Áü¹Í?|"%–¤GQ½6j*\†A/N KÌÚ )ô»R³©-¥ðâÐÉa1'A9g‰±tђƧõ½l`5r¤ñ°U9N ÕÒ¸ÇÀÌo[a]àìs…MÝ{Tc1’ßJ£3eèmÊðo˜‚Îâ˜\qÍ%Ž-e ’åÂ,^pûÐ"¯áV3Ü2âç1@Q¹¾+§0z èjÐÌ8£v5ßd¦O–d4ƒeeë;õ’hÍ#kq·–u9`$ÒÕ²3µdˆVgó³4+¿,o8¹[}nÔLÛR™»{´L‡¿{îµFƒ…HÐ[üKwwÜÔ×R;Y´ fÓ«¿|…ù£ÝÅ}÷Ÿ.5œ,ÛózcMDv@´£fÚ‹¿ÝûÄ¿«{U|Û‹¼Ë•¦«E@êM'8B ôK>È—„»Ö¢:VEqòèÕSÔx>M`™GêNÂéuY uUòý¦P$ùŽÓ ÀؽdöjÅÃGû{ß~»ÿÔvšŠ½ŒÝqxÓ½éÇ,r5˜Uaü‘IHl|bl{š1 !œaB“9SyÀ6Òˆ£¼áÅñ³³â#Øef;è@“4=¥¶¿ØªïGÁMeêœò²ÿø‰Ùr±÷¤«n‹x€¡t’½ö Ò ´‚£‡3ënXHÒÔ<Úýý“ÊÊâ‘.ê%Ži/ §]›•³Ô@¯‹·X Ûˆ±l'.S]z%:¶7¬Ëµ'¢¬Û̾ç/¯"` ß3äN¬±-לM9|©üÞÙ¤5r•‹¯ÙVÆeÖSôlgž#1Ur¨¹¦'\:ሿXH Z˜·[yÒÅ‘¯ø%; Mܲc:x±nG'9j·˜jÂ@â5ïö"f\›ÆÚ©Ú™¸5=â÷Ù“eåjþùGbV90{»í‡»H?™Áû½Ù77D<.ò¶kÈ´­c숿üàÉÝÝÝí2 Iå?¿£^̃én½hÔ5†tÃÒùÛÅt3<ûrC#˜Fï;É¡ û,Þ.Ø™ET³k¡ ¡f«`õHU¨ÜZw¥5Keí6Å¥ö\™V×\œ¸Üi"&Õ2ú-•š»ƒ8ë63Ôò \2ª”˜×”-ޱÉQBëLÚ¨ô^¯ƒ!1LsQæ–&0%àÛS¤ªÑ†ÅDÍ_Ó™ÜB6W»;,š³=”F™a öX d\0;‚l[ý‡Ž7Û_PŽäl`ÔbkÇ79]Ý>›ÊaªW‹ÀˤwÊ©}šÔBfî,Ïv¸Ð—ê/;¬f`ñ+K~›ÆY27”s×P§oMæizðf¡àç˜8»Ýéå/ž‚ê‰!‘e¼bÔØ&cS6¹`‘9¶(3V|µò¯·Ây?JÂ,N«ÝY£pÂ×9gˆÝpJ^ƒºzõ”­Š#,àTúÂhŒ»£D¦ä‰‘N.ÛU’á¤4Ë ˜#fº´°PÍè|3 [äR­€Îa#?ƒÔ»ðA àž«0Ùu*“¬åÞ`A¬|”»÷ü)ŽÏÓctЮs~Î=°H$׈¿>³ÒhÑÌ+8¬zÈ»c‹šcMö ùðOiï<ƒ«);0ûÏŸÊCxHÜXäÑy˜t/ÊS]2)µë.,pYpøC‚ÙS¬c€ŒÅ\àù V«Þ¼“¼ ³Ü<Òl¨úMï @Þ¤;ûÐïå²É‘Zó,=QØ¿,åìwvtå³ ˜Åp<µ¾˜TüNél KóìÖ{õ\Že’r¤sðÄ|ûø›¢Ä6’ѵ=[)YRæ£{¡òëp"“:-¬š 0Žoð«pò.Ì´ln(NWn…D®o&§×[  ¦R: N¡-Ű…ª|Šý^­KPµïirœvÁdº½ a6UøfÒ‚ÈW.;”źŸg(7(Ò­0,û¶IùDVš=º*s©¯éêRã2„Ù@ì,¸SÜÆDS¬t¬‘¹ùî‰&tÇLœàƒòÏ\éͲé©è‚¹Ï@ùøP¬4éDëT©—ŠÂ¢Jvû—ñhÐý@×»›N¦ -þD¯"}·>§ls“Ke¸½‚„Éb9תq/µ: ñà±Ó9ny¬Šf;I!š ÌÑn<ø£äì²aé]_ fðU>=f}e® :íË}©†ºùÇM-¾° Xêe(ÔfKncÈuê¬åsØ7EQ#`ãC`7(M9f)dg:–Š%\{{*"ÓYÖZdDɺ\ÆÞn³`Ó›¨?SÌ!ýÒóU§þëá»×íÄœÞpzùÑ‹Ã×ÏÕ•õœ‡Y²QŠþ‘_Ò½'âf+“ÂGMŒVÄ.ÖÂCæ´à@0ëÒ“¾,ep—t2ßÒη̈́”¿ªDø¸4\,t—(qN ­{B®^2ÈW ì‚ª(ÛŒ+åi ˆòÉ•9 n+X¯G$Ë{ ¢Ü^9~Köº4âqỏð†@—$ÛU%j–|”e÷çí…`“¸‰[è¼ðßiðüÛØÕ]*K‡á(M† v¥‡Ž  T!"D¸ÆîµËn’ƒî¹aÙž¥v®I“O© IáV)óã*~òZÜp·>Üj.øCº2pç§3"ÍøˆºIt*f}”lQI’§uÓGâƒäo•5‡ˆ ”Mç<"Ìɾ&ÅXDöŠ3S4ŽIÕ¿Éè=®ºyÅ^N­zDšF®\„ Ó|9rÆCßÂ|!ÇOoÓ'î —Vds'“/Å-¹_œØÕÚ] vµ¶—áÀ¾VGõE °2J®­U°©.\ä]å+±N©>îX'yË?ѨòÖ¶s8µš¨¢±²Q¦[ö¥,F¢ØPÔ»\ä ¨—õ6ÚÚ¼¡ËëØ‹cFIªC5ábJ!|P+ï®pNÝFAë°˜êõ— Œµëù\ºáìÆ†7Ï„:‚Ë‹êÃóÔb‘I2Vò!æ+[^°å­ËÆØ]ø¤Û‘DùËÔܶ¡“ÓßV‘S;z!Bß­îî'kw÷Úݽvw¯ÝÝkw÷Úݽvw¯ÝÝkw÷Úݽvw¯ÝÝkw÷Úݽvw¯ÝÝkw÷Úݽvw¯ÝÝkw÷Úݽvw¯ÝÝkw÷Úݽvw¯ÝÝkw÷ÚÝýrw&uy×y¼ÝÏk¯÷Úë½öz¯½Þk¯÷R¯7N0Ü ¸ÀˆyT£8gþé—´øk?µõ”¡Ÿù5ʨ®ŒY´3Ìâõ@4ذký€÷=Dkþ¢!&ŒIœB•ŽÒá×¥Ã@e iÓ~JÓQDÚâ ÷èØy e8ÿ–¦?pþ˜–ŒÂõ«¾³:A)¾Šn“èÇ’„+k Ó\¦b¼.)Ïl~J"­é +e-)®ÚkV/à ‘2¶ŸÑ S=2i:Ê -ı¹Zx¾N—ißü<‹¤Œ4Kjή@Íê7›+<¯´>í\mû§>\4éAÌÆqÎ*=´¡bìÖˆskÆ€Á^Ü!Å÷&aeî$<¶ÂÒ‡%a`x¹þü²ðH:·Ü= ]LUwØÙ™‘.Žc 3ñ…áU›ôàÔ6‡ÓÂëyK @n©Û†?‡}TJlŽha¥˜ˆ‹¸S­#`Âá0‹†!<‘ÒX¬ÈðY´ц‘CÎ.F ã?}1­<µŸÖ½±Ìšðýÿþ_:ðÙ¥D*bÔÆGeŽV‘5yü,§Ál½U"-è§gZ>!ß¶ÆwŒ<‘û•U§†ÅCÍ ²n=}¾'Ñ. saãl¹ßQšnf"—Ò§ñkê´¢ŽÛˆ>U¯>±ÚkÏÏ×òðZ^ËÃkyx-¯9Î?²<|Ÿtî… €Bß°ÀÊ­6X¹e(@ ÷â!rÑp¹9ø´¬,¢þÿýöÙ‚erûkÆxU%>/Ò@¦¶hp„Uom\‹VkÑj-Z­E«µhµ­Ö¢ÕZ´Z‹VŸ#Z=Y,ë*l. ]^ñͺò¸¶˜§Éo©…ëŠÅœ¹_|i+ãÚð5"Nµ½–9O?F$íii"sÞ¦£¸?/*îØ)MÊíséÀÞۭܶ(zeibÁ§“°´"Ùèî…ÿþŸÿw1Ä…6c²ŸUîôÆÛ ÜéræûKÖz]:m\èÔîíW/uÚ°ðú`'rÁ®éx/ç4‡xߨ÷Û†.©KåzÀaÝB¹¹äÚúý]ÛtL—r%‘ÓÐõÔ4 rPÊëTjÂ阖„|‹ò,-Çš<,ÃC0·ÍØJý.>~øø[×CÆ›ÔlÆœ¢¬*Až§\Mó€¹1ñ_Y>µ¥45n¤˜Û”­i×ü2@5¸™Vx凋մ¼,œœ™?Ò1‘NaOŠ-@€P¿c–<Õœ.Yv…!«O°."ñ”ú í‚H*Pmt9‡¦ôÑ•®+%qC¥P§hÚwìÍý¥¯üò® ¦éW‹-uáèõßî¹\l©áJÁØÛrÿ–Ö‹Û`Rê¿L>ÆÛ}ûéž=ÔHÄ¡ÚzžÅŸ>¤0~/š§É`»¾»u‰Ú{,Q <®Òh¥Š J¾Ù ã_HS©E™WIˆqM–‰Ä3»£Ž™­BPß1)e0(1ÆÝ(RÅÍ–Ö&-Ëûe»ž¨ºaØ¥ø²áô™¡v¤.ƒ+¿k+ÓžÉn$tÝ$ß° -IMÜú‰Ô‰Ó×Ï[æôõ³7-s|òô=ýãäÝ»7ïÊ3Ö¹´ýº<ñº>ßÝ=Øe¥’y…—w 2̧¢°ýLóhtÑÆÑà|ÿy”oz®®¸¢ &©í´q}æ§N«l•Õ W”™#Õ“fY”OHVå8ÇÔ¢9ÓñÏ®Mõå&h>ü¦éw= V´JÅç»m÷ïß¿´à³Ó Öåža§É0ô‚‡56Pý­d=d™5 ”ÅÒþŒÛ¬Ørft^ùœ½Ná¼tç"ôurüTÊ ]á´¶HD:k™¼Ãà"‹ŠžˆX µöœ¾¼ £p'3T&°åZª•["ƒèfÞI¬ºUàõµ8¨•'ÎÉÑï~'$ö‚‘ZJ"¢¹ ÈxsD?D}Ñ×í¸,’[ ñÿNJÛ€vbdQ*óŒŽu;ˆn¬àØòÐõ€É˜ ÕJ¾ÍñÅŸÙ/LÜ„.4½ƒGï< GŒý"ŒG'ûk›xéÄ!šyc"qÄæ›yݲ{~íÖlEíaÛ€‡§ù6;œMS¶/òû8÷,N*IÞGâÓ›­?ïïþžñ6åbJî½×Rs˜§–éÞð©Ù&éåÍË4Ÿ€ªm ±ï"1QôØ4gTÃâ#’×uUiQ»ÄºylŠwi±8XJy"œ[B¢]Õneš–­Ä¶/ ÏÎHÌa83í'Ë4××çF>e™Ù´mƒ<\𸎢ĀiÕˆfßâýø¶èÝ!/?R?±ÈƒÄÖžu=0åY2¨äü^ÒiÙQ7¸¾ºázÍ»r² ÙŠ¨!€I¼y«µ]»¬Ä»M¤U’RÃÁ cãC˽·¸zHÚjJœ_2qÊÓ¤/ü ]±!t½IX:”w0ãM+K:\CkŒ85Æ—·²-NIe¥ÇÉtç*ÌvFqÏŠ•;]IùŸ7< =&ƒÊoÉ;wcJ‡‹cjCÇZWSË5o§ÎP,\à‚Í IÿÛݽߣ³É¬ñaË!Åé .`1éν}ofÓØÂ¡±tºóÆ\‡ñ´î~Á1h¶ñ•Mg1#‡e ¶n¡§;5‘¥sÎQÛýTÒ³¼î}ÿSËìÿ®e~ß#ÒÞ2ß~ÿÓï`‹5Ñå ö2†ÿîêö¦óZf?h¸¾n¶ØWz°ÛêUKì€^Q0aËm|¡«Á>7^ê²/C£„R}þ£›ãiBÉ„¼fÂ1¥&O‡Ä3I@Qˆ>Ûié¡9\Åáâ+ªâ[z$ƒ¬ÅÒÐX&iJFõ¶}ÁÄr°³s}}ݶ‹–fÃ"¬¤ÓÎwŽßíüɽï÷‘W\!Ë—ì‡hnNÙ’7fÿ”Î.œ°Q SëµjG…ÅŠ%(DªòV<¯p*¤³)C>Òå|îv<`1ÜçO8ªÞšF§_0Ò›ðy~‘ĩ±c ½µèԬЗÛ[Èk•´ÚíSÂJdé”. Tvrn°O/ÒkZHp<ålLH‡¹ÌN¥rìm*<ò¸®*¸uy(ãð¦K}«¿P 1wŽ‚ìE°,p;ˆÓ+õôxw÷U¹#ù£±(åìqN˜*yK*o$rÿt™ Î&B9Y²=~ÚF$bé:°Iv6ZèŽw^Ùw o9‘Ά“§;§ÀˆÆã<¨© rË€Þâtº! s‡ƒbAÑ©…oTùÄ\FÄlß*vc ÍðϤ¼|×îì©i €Ô§2ðY}±†úÅ"’½µE†57 ˜fè@ËE%‡½Î:ª/üTä£ý=Æ,#¥(O³&È–Ç$e$)ÂÚwØr,Ò¤jʵ‚P’^V"nkø”š¥u”t ù¨¶½ ‰ˆˆn4Ö\õ©ŒýxÁô\´ï²èj–ˆï°¸Ý³$ע֠ϲGº—|IÌK$80Š2ÌÕ¾e­~PKœ“úµÒ ø+X÷8Ëo“Y`1gB?€ï¸xM–õ Ÿ7£àgì`š›6A Éü*c…Å£t õoˆ6è}`|°&&3KªÂôÙaP£ÎÌfOýˆúp¶èJ•Ãè!÷’î \æ,†cp»nÒéätð*7˜5&)¨Øh>N…“÷íÎnŸ8dò³FK|$9AnaýÒA.„„è¶¾Vˆ/²®þ¤¢ZÁ’ðó,†+hò=Ã_¨O΄ºu/ìD+8ܹ gÄjçÁÎÎñ¤Ôp·)gW£>;•µo¥jܾŬK’˜ÔBk&BõQ¸d4ëÞFYIM-ìÆÄ/›\ÝG~+ý0æñÀ̦Ñfõ¾±Bz‡µsšÜt_DâùÀeµÙ Å[9àʰ3ž#Á:ò¬ébC\vâ¾ä2ÊKª˜ŒçmæI™«1‰Ö8%眘œ<±µ¤˜ʺ%”¨p‹—ç%ßWc[ldê¢#õ¶Ãýc9R¡¤k³éh6Ž"?í^tÓÀRuaÅ ·Ä« °R•¼ð¯´ÍS®\FM#Ä„ÏÞt ÔR4¹×\"͢Ħþýøª(FB"*‘ÿ$t6þÞÌ^NmÍè-À8œÐ‹tWä-΢v®ÿÞ >ýÇF±Ñ„”d¢U#çWiÀÏ›.ÉÝb¨7…·™ºÌ²ø÷P°óÎÂX °q:O1»ÿ×?½ÿÓûîû×§çÝׇ¯NþÃe#2ÀEzÊkÁŸ÷".㈰¦êïò›Tb§Q¥†ÁušlÒMÏ\äFåó-NJ10¡sÁW÷$g›‚”²²a2€‚æn¶NžžmÛíoñ÷ÓËYîm_0"‘Êü 6N¿ävˆ‹µ˜¿¾y¿ùã‰yzròÚ <ð串Ӫöþdzý×ÃW/‘‹#Ì>ÐÎÆZÉÎî¦pÝ¿½Ý–{¾ç?bþÖÙ@XHiÕY0Ã.ÕFå¨Ð²CbBÓ~’zDk'Jž0ý©T ¢[ÍÎÚÐ\ÎÆa¢¥rð‹”hñ#ÇqtÅ)• ƒKrõº6ÓÅ .†Ï£$–ÿý© næ‡ÖÀº×VÜÝÝ^Ø¿j¨'ÊÖ„)hm¤ðjŽŽ(¸ ÚÜÒd: M@h Gøq°zKÿ.Hýt–ïV¸§†%;n(<Ý:³éæõä¿tPcVO9ªz2…™kE$h/Ù[Í|×i6ÒâŽ`Rê'N)ÇT@Ò*Îy–öhixøNs0*½8ËfYßšì|› yI²s½›¼'±Í¥®ŸÜ Ã KÓ»\§µ½2Bm :»ï±®ü°l ¯7EÉe_‡9¼’pu×ÔI€¸ç_|tÅ…¨Y‚Æ“_mÚ^ŸDýý–/í oš#¾(6!B«¬*XXñÚŸ—æ–VÝþ†Yw‚¥YË7úÄ‚ýÈüsYô^ D¾£ïÚÀ_2‘“Ýäþêò¿›åS®t½¾Jn¥Œãk&V®Ò‹d„\hóK½ßqfÕ•Ø–s´\\¸ÄÛ¡µÅøJ’¯ÙV'eŽÍ@³QÌ÷ß3—.«¡¯NÿrrÌHµ¼Be¹³ añ(2þ¸w›1›(«-}‰ó66ÜSK¥•Ÿ?ÕÀ:‰XƧ?Ÿ?­FbÃÀ—+ÍþÙ šùsƒÄý©]¨'ô!a1c” …² ¨‡r…SŽRñp G“õ$®¤£¶ycËë~J?£P"›åBq ŽÐÛZã57Ý6E W£s]tÛWCïA—ÔºÖ-)â\Æ—Sƒåb®<èâóØ GÓKT.ôpTµd%+ðŠ1‡ÉŒl¬hΑ–sšœ¼¢—µºWûuG‡£zÚîð€õF©¸t5ƒž!.³^* OaÞõíèBDö\:\'@ª×…vƒ´,æ´Š6gÉGñЏy—i %àT–Ç!Â"ê@´V(Ò4"F$ ¶Ü³ua±m]©ãšÁÒíÃ8 Oœœé*%zåV!;‹ès°>½SA„}-›¥á€[ Øõ­úLVo^o²QëøäÕáëãM³¿û+!Kÿ?Œ¯¢Ä,i è¨ú{S>HÓK³ùÃfkóý÷ùf‹[>g3Ù0šªñHvìcD¿&¶mÊÀÓØ%­–Fв‡Þp.¢TÝ9ú•}“¿¾vÂp½/9$þ0ЇهY¿ƒ¤îÚ:⛈f˜%œÑî’£ä*ÎÒDbÜeb„9d㊀ËcH ‚n<Šc#¥°8ãüzõÆaX캅ª7ð{ƒ»'@q”`J^ªŠk–O¼a¢0)Ñ6p€WóÓ³ÃWmsT öâ뇌¶c™ ¶ŠÄ—©±Ø`9*ì'¿‚¹O¹‰ôX‰Ó<%6xütƒUeN³äF¼ñx.ó^5–¯§/dÐq™bä ¥,eÕ1ÇÎÆ›gÏ:Èlåº?yÎ PZêŽKÓòW–§0½NJ3Ü­LmÊSZ¶.x–ÃA+4Ã4l)l1 †v9˸ä­Ö'yåG¶û{lJ'ÖÂ_^ãO: áý0¼ª_(^,K²[göwè ’”·ó¸½·%;ÖÎõ¬gän_NÇ£¡§ô¤ËsUñ…çêï6‚}½AÃ."ÊÔ –wˆª-¸LôîÝÓc©[Å•å N•gÜG‡ñÅ7-ˆ¸ÎßÀx—±%pžò# ù4wÍZ¨œ‰EÛûåyí•ýy’ÂÞØ›×¾?‡-`GÏf´zzjƒtÆêßÌË(Ÿ’è¹ wS2¥6g žo¶5‡——_´É¡JÀ8[j–È©buÈBþáõéÜÆ³öS)æ%hµÍ3,Žäès.ÃÅ…X§µ }.70Ÿ#ø!ùP¥ÐÍÂü²¯ ²Ò†G›6­ n—9ÜGò§(§K2?76þò>ÙjT7¼v§¬Ý)kwÊÚò%Ïbí³Xû,Ö>‹µÏbí³Xû,Ö>‹µÏâŸÔgñ+2û¯­Ìk+óÚÊü›´2¯­¥kké¯ÀZZ‹‘·¶–®­¥kkéÚZº¶–®­¥kkéÚZº¶–®­¥kkéÚZº¶–®­¥ëﵩwmê]›zצÞu@ñ: xm"ÿÇ7‘?Z›È×&òµ‰|m"_㳬­÷këýÚz¿¶Þ¯­÷këýÚz¿¶Þ¯­÷këýÚz¿ÆgY㳬Ý)kwÊÚ²v§¬Ý)kwÊÚRïN¡ëR?]7n$–§Šs¥þ=ßÙò†$Iú Þ5Å[,˶¦r„ƪ¯ñr•[jbÆl¥Ëùߪóf£¨ÕIøßá$nÉ/®¼>]~ 5äقЬ8{ÍJ8ÜÂä—Ø}i_dj Ç?O¼ð?—Y#³°GüøçUí¬}R¢ˆÇi­ðüÙ×þ¾ÔðË_­<]injv@ž/ë[N¬{ùWku¦“C2òt^³è ?-q:ìÉÊóe9¡n¥çËzŽÀVí›÷éj|M̨Joôi£v﮺þòõmç¾î×fãáoV]ŒÉFü$š‘Iʾž…Ÿ– ƒ?øÌ`u½ëó¯Òõϳ$°±¥â5›qÛˆ£ß­: †·;ýBK=n_ÅÄûô5]A©eÑ ]A¤C!‰‹!.tžòžùOŠH»jEƒ’hÓËövÙhg;jV _ S“¾;T!¸Wé¶ÔËÞ^{—þoÏë(¹¯‘à&âj]î?~ܶÿÝ-jÈ£pØ |wÕŠÿä‰O|/&­nÏíéà¸pS"²~. á¶Rœ‹YnÒµ^Ö}ý¶XBòÚ’F†¸Ì•×u> ©üTÛþÚ}&è]h,–KEË[fõýÀiE,ÒýÁ*ÿÖÅ ;bìJìæªð?…ÓãGo_i[¾Ì¿]Ôï·ïÖ¿‡__ ý¯? _0[ÏH7gǯMN\}nsŸ?Á™+~I L?³öÆaMYí…™u4wî9®íÈ–beØCZækR†Ù0Ìúšh æåC®RŒN ·`†¶b[uõ7*KÀâ‹>ï>òN؉ùþ£òE’êúûœ;kû«ÜÝ•úwÆwÝØgÐ+šêªÚKŽa]ŒR.¸‹A8–G¹Ý'6ýëÀË‘Úê&Rj5¹šJZúüA½þñ­#+ ö15úm ³øÓ§ÑœýçÛ•q„ãÉ(rãpšjFq6}D¶—2óÂ|j#û Ë( ·ëý—ú–q8ü©»\Ì:Ù'sáÏ«õ-;O]Ù¥,L¬øõ66w$oˆ›T µjI–^_ÍxÇâ"ìeqßÓ:Ú4[º¸uŠȨ„öì*êlð¶naâE—ôh›ièä¤eÖá8ü àÉÑ>^[ÍùÜÐïy‡Û¹ßó·•þô÷ôŒÿ² OwWl?ŠG-ï|å—fÎwÿýUݲ"ÂÔø ¿–ûñWáí“k°KÏ;sþãe~¿µÓoíô»g§ŸŠ)¿%—ŸHÜÁˆ˜w0އ*z-%ìY£eë]‘ŒÃ=¢ãši׉Ú!1¨r÷íj4«.ŒŸ?d@L”JgTIµöíŒgg/¬BvŽTc«u*©àŒ€¼v ´ëÓU.åëÛÝ 8:³Q±|œ«‚ ß;Á<˜„ÓË7Ô.^·Âu`¢ñd:×tk<§3½sf;´S,bj¯Û½ œM/Æ£Ÿ½5xʲ8u ¢õÚR9Æ^…ñˆ·×y0Õlkò0AcOH«Ú§ð›$½0¢™Ýè§Ëˆ}Ë´PÙLî/"‹HkÉÔ$:{>Ĺ[ê~±ýnó+K‹ù§óðŠc%Š»{eϽ%Ž{•šFƥЇâ÷µ/iíKºÛ—ôÛv¬d>ó­î’Rµ™Å‰¤ÆLÈH*™¤’š/âGª”,¡j‹“&ˆ"ô£rÓô»ã˜|Þm˦ ùuÅK&)ÎÈÝÖ{p`>^[æ&JZf6µÌè¦ß6?GãYi)é¥VA.a~ÙKÕYR1eÕ½å[´Bsü!L†©¹¦+^ÒsÜ™­‰ðNǶ]kJg*PÁ‹(vT¥iaŠô¯°?e æYNRjIÞý)˹HE`ÂÄÁ šŒÒ¹:☵hq<`”©¯®$ÎjÄAL€s¿ÌˆtâBfé`&¹ƒŒ¦²à‘sÀ*‹zÅQ¤ë°Ö{FV@Ârzq1¢ëÌÖóLÒ ›…-ÐÛÁ0J"1¸¼ëw:c’ztvV«n­Åеz‡šGDEšx®ù½Y²>XA¤ê3N‘Üz¾B3ûƒêÔÀ *Bªëx€¡ûT±&­ìZJ‡âÉ#:CðÏ ÌÊžv |`e‡_ÇÓ€]éΓü}gã_·¨=nÚü—éq§Û Óé”C¥üÿ¨wÛ} ¶÷-÷¼èæ7[ž›»‰ŸŸZÁ˜óЫ߬àéGlA­«ß4÷ô¸W͆®îù¯‚¤â°‰4 ,5ï%yêLsc Ç·–îþciH_²àxwåÿÃZÐL#v¹4ER{Ÿy°tÂ@vj"¶s#ðeZ*Í]E|ÿâ_­}r§Jÿw.…†/h¡Ö &IZ}Ã×>søêÏo-JŽ¢z€ d¾ÅÜðêÏD‘JjĘ¡\Ñ\K2*ÆQÆP{P,8AØÝËxxI<²À勲IFÌXEsúF0Š]g:<ÜdbA%Îû¡XÕÇêÌh÷>ZÁs™^zgÁ-iƒ ‚ßt`Å?,ºë—ëÆîbý:â_)ì§³x~UäOÛËo üs­®ßcê‰ÎÓŠ2–$Á­³VÄK䈋x)[ÅYæ1‹Ù“}Ž0¥^E;¥?Úí6­eÛpÎ(·k1=Ú§Ø×$zi n~´eh°Q)n]É­ÒˆI(7-edX»©V¢š ¿i¼#ÍjjJC2?BNĆé]˜©›·OõGÜ&œP=hã™(`ǹق›Õ<çh4n \bÊ-ÒìoßÒf©t²n;™ª52¤±û\“’êˆ145Ët*|ânÜQv~›o£´&BXÇ·ÙÃø*k…ËØvDTÛ\¦éGCÊhZ¥×±¿¾Ø8ÈAK“÷ììšAi;Û}oйýË0ÛŸW©ÛÆãçk ÄÏJÌóøˆSǶE1˜ºµÇ¦…¬`(¦PLùßÑ\ÅÖ2¤xª¹ª%†«ÐKNŠøü&ö€t¨m—«0ÞìulQ!¬“‡_"Ö äVåRÚÔl S é¦@<à×hládJÅ+×t2œè•º#9çÍ7¬øeÊì 9/8@¦iö×Qíxe|—AÆ>ãu,²§KႵ]bc{=ú!òÁúé0¡+ê(µ(Rx_̳æíÉ+³Xê=aï dµÉïÑà3g3Ãõ_Úw_Ú:Ò^ïZ‘b‹S‘2à?OOžŸ¾6G'ïÎOŸžŸðÓÎÆö-cÑ ¨ÒÂÖñÉÛw'hò˜Ž0@óѦ éÕ+´m-ÑtnOík$hiÞ‰QÒÏæ,1Û›/»³CóöÝé4bóÃÉ_—,D-÷›_óU‚'ßî}Fژ拕dZK`-»*%±…È!†‚BSuP¹Ä ÆS »/;­SHÖie0†~"Q”tXr0cþéÿ ó0ùf;úÎe8HÓÉ~¡T~)ÙGÏÔ/‡ñpz=¯‘I¶ã’Xp>ŽaeabËÂ<_pÃb¾ÌZäU¡\4Q˜Ç@ "’‰‘Q¼ž1ÙÆËjÐÌÚ9 “á(öÀ>iX»ke“'õÂ%¸<3§}´¬ËÁÅÊ•Â,»Ie-â•_–Î7œ|i‘,lKeîîÑ2çÝs¯5£F£^Oø×²N"Hlq? Ù­Ú,PP³ÁÕ_¾Â\ÑîâûO—¦j-Ûßúô0’Gàט×L{ñ·{Ÿxa¿Rר¢ìÓ’†¡Vy—•Ÿnƒz瘑jLP«CåZ<Ÿ.Ò¤þÉ£WO·NÄhF\¸0 ÊWf( sn†Ô\XøWÑ¡ÌûPäef»÷ðÑþÞ·ßî‡/šŠ½xÝqxÓ½éÇì›o0«Â.“`:JH;CU3«d_•á_æLѧñ#:LŒòÇÏΊ`¦¦FHÄ ¶{DÇ63£¶?g*¬ïGJ,éê²agÿñ³%æaúe”^o‹°€¡t’½60i.Ók,‚=ënh·ÔУÝß?©¬,é¢ÚƒÞ½$¦5вn3σ?KõòI]ÔÜɶåš#"g ë$þ¡¡ œGék¶Zâ ypV®g»€ ‘Eæm%"уí•3µ«óöëû53…Ë¥ð¾g'é|sœ.ÂóÕæZX`œÍ‡ƒC:ÄÍ!­ÙûÅ53CÃka¥-+¬E'WÀ?öزŽóÒÄY·YÆ ¹Äg@ýV'bÄÀ(ÊÖŠ,¿y¯;Ì€0S8šÂjfiù–èfš…ÎhÃ6އ—¬Á†Ðæ¡èHxÐ_Ó™l<ûmܱ %®88lÇØØHÅ¿ñ¸à|À…µr0 T΋ÿ¶ÃqYëÒB ŸèÁk¥à%aýÃcDíÓ¤j\üãH)*¥î°8‰å¯,úm L)ž¤l(Bò'§^sÙ3¨9›EP.D‰1åœeüz4O0æ2^3jl“˜H” 6͘鞥.t¦pó8ÓTF^Poó>©¡L¬õúÐeNÐ [”¯ {¡®^=µ• X'—¼ÐhŒÛ£ÂrÉ1)•–ar;· j[°d°í Z°>ŒÇbO.-,Dp:áØ ¦Ü l­˜"˜Óé 1>¼¦ ‘u"C—¨žR½j«QcÝ‘âuŠhȽçOqÐxÖ˜hö¶Ås~Î=0;”‹Ä_ŸYI¤h敵ö„ ¯ãè¡}C>üSÚ;ÏàwÍÌþó§òydn,òèœ4E÷¢<Õ% {é•·p&Ž¿ºVø0Í¢ÓÓVÀ)õ|wKoÞI^†YnžGi6Ô+ý¦w§³œt$Ü=˜tJ_ÍKÂKrø% éú{o`¨·×‡ÍÌ–N”t”ζ˜¸½ÐÏ€ÜΠ™¤\‘ÙFb¾}üu¢NÓ)ór)F)DôÅ^¨ü:œÈ¤N ˆ#†Ç7xáU8y f}9P¸¡8]¹«È F|z½µ Á€d*­Ó¹àG»Žè>nÁHôîäðøÕ #®è8 ]8åUãØÞ.+õqÚ›±UŠRÇñÂÈWZƒ‹{”.8V$Îøo ™”Od¥9¦¶GW…c˜ÃÂ}‚Ëfѧ b ÖòõÈÜ|÷d[b&c¦Nt·Æ¨¿©Ç«¦é:¥EHh,äþP´ñ"ª¿^ 5¢Û¿ŒGƒîºÞÝt2mžÿ'zvºB䙲õE.•áö &‹UÀ‚åóq/µ: qĨÓ9nyÌ ±U‚z`þˆvãÁ1WH¤y0½ÓŠœ|•OYV+‹NûZMK€_7ÿ¸©¥(S?(GC°Ö(ØlKn#èZ'¡+؃|Ø7¶Œ/ÇN³ÈRÚ)Ç,‡ìLÇêdd/9Ló,ÙWô3Œ‡!êf¶ÄšM¶´oÒH«ã/ãœá± t04˜à/Nÿ]íØUiûŽt•Y§-ðed‡Û¼#íCã–¼á‚d !µð•Ú°”(¨nlEÀÐ÷wwÇ…‚Ýýöºz,î_Î.¾j™ËƒGŽÀÍaùìY­‹Í`QY&@”¬{1f¿n37äÉMÔŸ©+^¿´Ø^ªOýõðÝëöbNoÌë7çæèÅáëç'æÁƒÊzÎÃ,Ù( EÿÈ/éÞqƒ„4E#Ò–»Ð}!»X Û.o’_€Á|GwLú²”…åêg¾¥o› iU™ðqi¸Xè.QâœZ÷"„d½d¯Ï—öyLN”A¾±QS$´àÂæ(tX6¾D$Ë{¸`¹½rü–ìuiÄãޒѾ !^‘è’r»Ê£dBÍ’²ìþ¼½0 a71÷Ÿ)‡.1ø6fñÖ »VþŽÒdÈ1 zèlžD5^Ä ×ÂØ½vÙü[²#“Ä9šâø’-ÆÒ§¿¼“À«Ë‘³Z¿¡¹3ð;^‹IM²ëí҇ ,Cº2ȈIgDš.Õ“nŠYŸ³CE’$Âi3]"ITào•5‡ìSÄSâÓ9s²¯I1v‘½âÌcÒcÎWuÓþ{Y49×âœ@Z"MÖ{ç#8–zt8r†…ÞbÁ ÇOoÓ'š:×_ª›FªýHôSPD6w2ù º:àL‘Sý½¨  R¼kRn9ÓÎwy+Pmõ&¹ÝiĪçÁ=µ«ÍIÛÔ -çÁ}Yš+¤}÷±sƒ—ÔýÌ æ@Â¥XÚeµ h°0ˆ½™M„bÙQiþX. z±åùÍsÆš™êhe«Tx” û›ê"Ž¡«üo%Ö îã±N>ò–¢Qå­Ê@­ñ…Õj¢Šˆ#›Ð$.¢iÿRö¥,FJ¾Á%lÁË©—õ*I®ˆË»“³sZΑBÄŒyq~Î>C(÷UdR`=±…sNŠ»ÂfS ­#¯EÛ·— ŒµëÙÛ»áìÆ&­6©4 Ð\^T§>×46Ò†ŒU‡<vuË ö£¼ucÙ» Ÿtû#’(ù±“šÛ™e´åô·UäÔŽ^ˆÐW8>¼›ƒI ~ÌgñãÚɹvrþöœ¾ËÒww–]™kÇçÚñùùŽOœ`ø=0.™³³hÝ ˆ´–kFU!̵¢U©‰CfËIª;Ã,X·$„k³Ö÷vßC´Æà/bÂC¤Cëä”çûç‚}ÛpÑm³ñæ2`àFÝ÷haªðH+9I3ø˜¥Ý1>Œi‡ÇTá(~íQº¬9Ž|ã‘6¨‹ÈZáU3iÿ-Mà6ü1-…ëW]hËå¥`ïáZdZ‹Lk‘i-2­E¦µÈ´™Ö"ÓZdºUdŠú“Ûä%ú±$-s. 7œå[õ?ˆ—]',î +n-B¯Ú§_  „e¬Ág4ÁR•õ‚Y/å…rÙ\7<_çÌ—i0¯ŸDds®j¾ð`°ÇÇ Üá\;×ðˆÓ©‡íÙIÆ<ïq.UƒgSÏß’j™1õ \14g´”NÂc+|I}8c†—ëÏ/‹ .ÙtFO9‚Q‹1Ç‹e¹¹Ç1Ɉš@ŒWilÒƒPÛN‹À¶Ô‡æÁcÕ×iºLFb»;êBUŸÓ½j$€x—h—F³¡º»ÄØuøt‚w1tiB>HìIÞ“KDüõ¥ªÛ#™&jÍÙpµNóÏøÄ 'a¦?9LBœÿvNͤî¾Ç‰ŠÖ³U.ÛH ~y“¦uMV—;Ù|"J|—|éÒ=[Š)s÷’Ö –Mt˜û“c?C‡¹Gyö³ÅI þÞP–üFƒnTcãîâ’8nãà»[Ø$ÿZâ“,p†„cq¼…"M2dÜ›M¹Äf8ýÔê Zˆ‚ÅÌö¢Ãí°¹Â±P"âHé¹3ÌN?@%E Q»A‹h%‰Ð"ºž=cÇ5 ʱ±ÄtõBDA/µ‹i ÿi•"§Š#™;ê.QBA5z 2JY»˜Y4Ëÿ£uMâqo–û—S,EÔ²¯}>}ÍGáUIÝã/-—µ¬ãZ²àå?ºþŠgËúôßüU*—wݵiôé–{†_ü[öŽÚyô©e¬z)GÇ(4ú•DÝÊA†$[ÌhÿrÉýïÿõ¿Ñ¤¢â³$á/f/jãàd³ºÀ‚U‹ãzüì%푼7–è¼KW2ý£ó@{3çL ß>5ÀË+dN–+UÖ¢†áeÑFlk¤) UiY )õ®OáÔrÆôçÃl Ž`^X6¯¢dŠì¥1K$ßNƒiLfÀ&–ÎÒû\3>"‰“‚ G$ûg€—éi@ãÌŸº™‹¨) rG"Æ4á˜FÏ0šÒ©A ©,PMŽºÏ×@“;xŸw¾Dîjn¾OЊû0ÃÀ¿oýT7š™¨Š,0x·D¬ ù¤¤‰H(To©3 `_ ÀUW–[ Hü}³±€ñ”ÊÑ„¤¢òʲFIÇI ƒ]do€$ÀÌ“>ê¤Ð@é¼K¥tèB ’¯.4ÈïcŒë¢ÁR÷Ô¢´Þˆq Ç*J@8 D·-KÛt’Ó„^H’‘ßì2Œ¿V’aäŠa/P,ǘ®0òúZœãÀH%Œ)ÁdªXl¢dÉ å-RAF¹L0{ã(ž¥ÙlìFÜ…?醸—¿åêfàß‚«X¿½Gk ²¦ k ²¦ Í)HNÒíÇ;pfø÷`×£7JI ³$ʧ; ´¶3™O/Q[KÔàýBTzc‘IJÀ[~‰K= ð p­Ó?j,'.ןÇÞ5©•út6ÈÚßÕV^`“8£þâò èÃ-&ýX#ÈMó Ùgƒ4þ%äÜ0.‹¡ SÖzAÀëx'¥WÛçE LTK¬Ê(Äæ™OæÆÍôA‡ÄÄ`žI¡÷‹°Ç¾9¦2¢e¹LX‹' °›¬wH¹øõ¢ùISHqÝVMõ-Õ_–F:س-eSW…õnÝgÕ­)@²î ²¸Î‡±ßþ¹¬}é×Q¾4Åú2ðÐ{½¾äßKµ­hl_\¥³Q…àC”ËõQå‡eÝ/¼¾Ê Æ)Ø’¿À>Y:k÷ÞJý±«ÁëM]«:(–÷$õáÊ…å—ö´XU~yO¤jÇyI[Ç¿—Q}«yO÷`£okkÕˆ†L»ÀíJÅ¢®M Zμ«UÊæ£&«š4¡üá n-»L‘6:Êí¿§Ÿ0jo€“xòKŒŽ{-¤Ÿn³¢×9¡Bl5[í¡‹‚$*ÏAD·'sÄ@ôÃþ̺Ñô£Q”Í»áè:œçÝÈš–Tá¶ÐˆKUà™Fœ¡‹ð„0 -k. 7Œ››Ð&IíYĹ}eùëè¼?2¢Ú݆õOÏ¢)\ÚdÓ—*¨’\I%óLönâé¦äµÂÕÑ@í…5îÜeëh4èrñôåƒ9¤ÔDŒ`8–Ï·_¾|óÓÉq÷Å›³ó3¸t$… zeèé–佑´Nú ãöÈSÎZœõ茙ӷõð¦:¡jÕÄ%ÅèéóŸnôÇ'Oß?ÕVÅÀšŒ0íŒïH×—}–,ŸØ¢=Èe…ú±½?°Pº€Tûýh2UÀŗ󇋊(^g}Œæ,2 Pò½ûìäüüôõóî_¾¿HÓ–ûç_¿ï…Ù]«Ú|Џ×*ªÛ[í Õn¦ÀÕ™±%úeŒ¸í¸Þ½À ÇÉ¡?)î “†T_ÊP°ŽØa§³-ZrøÊÆ4YT¸Ïï<ürWåýåKô£­——)€ø÷(jh²*64q h"³y†ùlb|ò×ï¿ßm×~ÄOBYô8Ki³Ðê–­,á\â[Ê•÷Ê›ji²It=L!™õÏ ÂÝ^PWûgåNÈBËß¿ÿ~¯ýø«¯³–[Úgà 99ne†’„0KnZ±EäÙÕKüÔ_”t›Ÿ¾5á “‰ã/M…ë]GÅ‚Áv!…JÝï¶ùÿX&&ùH(x˜5!m'L«éÝ™àð“Eè/µTG…;¿MÕLWWMM¦£gißí~Çb0èZÛCW½²M®—lQ’,h <gƒ°æ Ïk¹» ö,ghp÷¸ìk!È~¡Qt`,‚âÍàu£D üÀl÷Ü¢êoMXв·ÎAŽùA:n¨›!EbøŠpœ½©EØ—Ý©®ݲFb"¢å²¢"-.'ÒÍZÒò.ï¢b† ª=Zg,knéìÓÚ 6ÿžmºv‹˜å#äR³C XÝC.øjK°3ö>G ²LaÞ¿{™›-˜?Z¶öæÅt²­2»°9dÐV‚qPjt\NÉ ½NFi8ÐhP‘músclA½.Fs…îçèDÏ~ÉHäËÁÎé8y[è¾]<dˆ JìdÑÅŽZ™ÂÁ8NvþÅ_¢@ÿÄ1 ¡U , 5ÈUË"5‹ÑGFìQ™¸eÞþõüÅ›×oÏ_Ø"=vô ]óAêö²0é7Öš&©‘$<¤n&(\ÆyAQ{Ľ †?é”p´²YòÑlÙða•;Z†7|OÁnŠªd%Ä8Ô0¶¶ ˆÞVxˆâ€3nb^Êu‡«•_ (IïSÖR5GƒOe„|‹ÃœÑThÛº+-‹ RpfƒfËyÕJŽ/t’æÁÙ¬çÊÓü":Z|1ø”ÑêuWrÑÍj™b€šôøém'AÊÍËÞèÈ3^Ý_`Ölóa®Ù 'S†Úû “DÉ^KˆŸkÙ†’û§TIq'¹f‰ÈU²ÓÔ…ucdF‹è‹Òát´ãÒ`žOTÜÒ¢LõR×þY¶€×²°Bîg=€>˜Iì~ø]{ÞÄBÒüôfÕ6B´ddò©§š?¸à›ó“Hí^Ÿœ?xPÒ³¸Ú Í(fÄâi¤‰IÏíZÜ]8eI¥žõFµÄ}çe¸{R½ríy[05—鍨ì‹ÔNy@göçTëñ•n`º”ñÎlw÷aß·ð“hG~`ayÀ¦f1>TÕw¬¦cKùRý¤êkÔ·TÉ'îöUh˜_Šav2w2TZÒ&$èÅ$A°¹´é‚¤E_m›7P.‡-è -‘þ9ØêòK²—ÞqjCÝ‚K½+xõÈzÅám6¦Cþ†“Wm˜„@OTíUu½¨«qÕ~ø³¶9æÃrÄo÷ oÂIÄ&«ŸgéT¹3®”Ñc@¿i~˘€À_®üµ,‡Èƒr¥“#ìÕ ,õ²¿ûè;ÛßÍÕÓ‰+Þ FùÀøjj'ÆAy :”ZŠZhó| ;DÑþ¸­¶9T#¬ÄûÑ_¶¤0 ­Ø¶ñJq”—úúú:°E xdˆêRCWÑ’UG`–á9<_PÁ‘˜^YfÛ0’¶›Ÿßڃ봥³²,ı¦JôæŒ?Iºô-G Ãàˆ«fã u¨sÈéP L.N  +œÉõ„ê4PS¢²ë±ù ¶#mBN€<ôhaß,âéV­¨»vØì²È[\Ôv|Û–>,µM½åó §§¶„¤(aãZ¦ «Ñ4í£dÿçêÈ©çꙡñ§Ûô‡1‹¾Üœ:(¾ã =ï;ºAĽ"bmCþ+`ã³$¤õ»gG_ÐâϺy ù4Lè¨æ®NªßãÞþwå.? ÖÞ0¶ã®Ä\L@ncÛìòŒÛ¾ÕŸÌ¶ÍïÌ^íÕãÿiNó|a秾Ӥr:iWD¶ÖÆ¥æ?ÏâAqq¬ˆft3¯ N]|mÃP}ëªÊ{ó©oñŽá8³ÅµüÕöâDËY ¸Ÿ¸F°´ hU…b£Ûò‹<;KG-X(‚÷Ìùfi3߀Ë=ZßCé{~à´W­q¡o¡†õï~g³ô²¸x½8 \íLfž{UtNßòg'g§v„$ºÙ Ʋ{Õ°x8i >Û¹#ðþ~ câÉË ˆ'0ÄÃI_ÐOñþ…§PÙKt ¡+A%aÁNìŸZSÀ;£¿Ã|®ÃKçÀ&ôºHý(XXÂq ="1w Øòr«íRäj07z¤ìVû\³Ð[>bƒ`1ìµòÃÊѯ 0Æ8*P<´R¸fù‡e}/¼¾jÐo] }?C±îç¥IÑõ}QÜnƒàäÛÐîÞžû 䃑ky5ìyáz9ÝËêœä$±bÒÑwålX¤’3«´ªÌ²žØXäf9ç\h¢iU¥_è2 YQGUe­)UŽU l'Ÿ¤éH¸ÅCÝGãå^)ƒ¶ÓŒ“ÀVHé‰W¸ׇ<+JìTÉ•ݸ;^àq¯mÎDgüe)c¼hüšåðÚ’Ô±µ¤f<.–Qð Û;E‰¯ºá׌_×Ñ•÷ñ4Ùé*ÕìÊÑk°•—©­= • -í‚]ÊÍcÁøu#:¿Ä7â´¬_…a†„'ЊÇ#‹ðÒ]ç£V¹®=Bµ²8̘5#å>»é¤*ÿÝrÐõŠÆÃ­–ßÀøGÂØ6\MêóÔ¤Q‰BÁáª6ÿèÑCfÃnS‹ˆ³×Ó$ Hß—oÝ7b. Í7³˜þ›˜ÿf:úÓÙø†¸è7ÙŒE›o²«Î†Ñw_äú·>±rzñ?ßE(t•ýM~¾¤7ñFp8$áÉûᛳüà›³Kn¼YœU/ª$ölO/²$m뺥ø½ßï †O71¡™—* œ½~õÖègÚGt£¶1çs%$²†wq3—ï4TfÊ»z°³sŒÛ"Üò@Ûi6Üy¦âÜÎY2žü ß}sz Ù±ˆ´*I5³\EºÇchðíbJ”X*/:œ2É>­É)ŸµÓŒBÔ¶“ *š‰ôñ”˜Ñ¢qbV•0—=δ|²;†Mâ¢ä<•œì÷f«½·Ó~²«±Ã¶¼š«“IÛ“‘Ôù!í‘À)ÅڨǟB®yÀH-²ÒÆZä‚oòb½¶ÛíMgp`à®=û ò±0ØV97˜l!DPkò¬ÙòÜr[¢ÂÑ`Ñ”—„Ã8UlT©âÃem:Z\ \f\3PýˆJ_V;Wð4sèܲ²±Ã¤s ~s‚ Û²œwY2`ä!MZKNù¼¹üT™Â²?󔥗ça |°÷][~uÖp­ íJŒFÉULb¬aïdKkzQ̤±ð}Twb9ŸKŠ¢â-D/š^G‘¼]çýôvBïNy–,ÓŠEˆ¸ø³Âð9p5”dä3âÁ´S9p\UÊ‘²Ãlgæ­Å¶VÉ>á¸ÏA¡Õ¥µŸÎžiy×ËBZ‰u!UNã‰g#‘¾ŠM5˜|ºP!/Š^‰÷ÊKêØ@}bAd@†F¢@•9é[ #ÒEô/ ýeï†Øñ¼¹®Rת±_ß¡±dIgù%Üã¸~MŠÍž½y@Hüëá«—AA#¬•O[4¶ÅŠWró?;VB¾¾nKQ7w6 ýF#¹²³AG Ø%ø÷>? ýÿx²û·na©%bxiBªàhÅÆööwÿö·ÍÚX¹ÆÈì=hê4ÙÊ·t¢ÖÍr”µ¥%¸9÷±.Õv³ÈƒˆU¢áxÔ6ç"¦;êâHH ‰RÈœµ"è·ÈÙpƒÉQËYè':KNXt1¬Òn[„ÎF@7B½âàÊò©kÓkD“ á*$šÆ‘òñ<°C,lPÖŸ)@iÜ ¼«ú3±4¦Âù÷‹4móªçh™ïvÿCŸ÷¬ú|qƒIs šñ¤ÉFÓC@çÐþ§Éâéo™Ó·’I¤—D4bdI$.Ám£ÝØÆÐÅRÝÓ $‹5P÷¾e“"ö±éW“ Œµ–EYÕy“[„'UÓ¤*éÛe‰Ä=ÚÝC{ù¨ Îòù=­Dgå#¶ý°‰qq˜Úl3Ç×3xgm*e©a\!lU, »– ˜Ñ´¯¦Ëúvß¶ûÙÔ£Q<]Íä$xëÜèÇÏœšmösg†éü •0!×Ã’C)ðuâeu„Ïù ¿°øÌx`SIS1/÷}\üÒñ{ÄEy¯â12‡ƒÙü17ç«0Pél:LK>ÔÒ\SVoº½pŠ)ÁZ–240i¾£ö£æü/ÁSm(€ÑÈŽâu…»‘vÑJé|ªý‹€ß!bgèÑ ƒ‡{5nTù­ä:=ÎRتMQ›Ms[Ç'‡ÛŠ æd<Ú’# Ôçi²ƒ™6+&rI¾-fîn’¼ÖïW#ÑëëöK\·Yx» €~+]6Žù?ìs¬¸TÛ˜Ñø’©²¶Þn›³"þ¹¡€>³é†tX©½é\-œïc‹—äDÀo"8Æ; b 甄7ݾÎZôK>öô1ÒH, ή.f«8çE² |n€Æ›Â~Ž.yqÒ*—œ+C‡ÅÂD¶&Ýϸ2 ÀÎ𬞄ÁÅÑ °Œyggoì )1¾OÇûÎlYĸ h$—! ­\:3WTöD¨FÓIBÙFI_“RJC°>fàºt.tÝ«¤Ã<ò¸³NâmÓ…Üüs+–¬­kkÅšà߃µ‚îè§€uÞꛜ2Ý×zíÙ'Ðälàv Ûžše—½·6 »a(Ÿ}a†Ôp0ˆ®r—T*Ìì×ð±j鯂½A9»3>-s\ˆx-óîäìüb6j/ÃWæ„q7Šš¬jv9ëGI´0ÓéQ¿•Z¯ž©‘B¯iÛVáHd9a':§ç±_°ÙC$ºŽææð*ŒGì. [úIˆÁŽyÃh@n¥óë”­wWœ9´áå©á­Üo†Ÿ0›âG†bTQ—Y°Rû:ëÌŽQG…è´%ÆÄÉ¡Ðü¤£6#=µ1éWðR›ó(A€)XŸÐ©³½éýw!j”fyâKßbo:—:ñ^?å5“…2ÅÙª®QK8—XÁ–ùïÿùÿlóèT¶ ¿ ½~øö4—Ö9óË z,O_KÞÿŸÂ«u$(v‚/íâAÌH¡VôM¡vyiùÀ®ÅÏç%í DøKÂÅU¸“TJ³af"浸j{`×Lžk  ÅÂRñš±,ÅvZ­Åî#¯ÇHðç+sh‹¼`m~✶§Ñe¬"É(_Šh¢(™ËŠyÎ-l½¦µÓ52QÕÆ¿=­TN–[ºpzù^Ê5°’ 5&/˰‹EgãMz-[­¯DaÓ·ˆéf`I…U”r’•ƒ²£r2ý·hÇm’çtjºÔ`vÓääyWáD!–ºß*¾žB¾MG¡¥ ¹ÆÜ¡Gè Jjf–€í›—$U&¹¾Æ`rà«&Ò›ÓTi•³˜ä> Øу?’h“‡ãøgŽ¢àÈüfpÒå«úd™ºÚ´únmòr˜ù;%䮩¯Öñ=)ÌòrƒÜ.f¨A#iPQ.ØO·ìäh¿ÁE·'h à"0ƒ¢Ç•VÎhKŠîjyÏ‘•¸c?…Î(eɉ¨H PC&H[ Ôu90Û{)¥Q7{`>Æ=ÒNs!Í>V§òÞrôÚ0nÇZž¨|U"åyiÁ-žØœ™\¿ Ï’DE$¾ƒ‘šíßYýtçþ}Š mŒÔ·hu‘‡Li mŒ]mD¥—¹æÉ™ì‘¬ºÔÇ!BÄjF9T¬dαfðÍkÓ2jDÃi'aYDCQC¹Ezæà0qá8,áM¾Œ›Ä)F#®)Ì–RS=Læm‹ ¼AT®ËÓunvÇCæm¬ø#ö•‡T>ÄÅr~ßÙØƒåýa{ï»ÝΆi·Ñ|¨´Õ\¦,¢sEÌbß;8TÅóùÃvõp^H 'üÂ8ŸÄDú@pÒ¯<Þ¯{ù§rž8 ó½¨*rÎÀ5Ù©8æ9èòyÉÊz[;m}QˆZõTÊϲ™¡Å „ñ'¥Dž5ë’Ú˜ØSÅ4£jŒ¸ Žif_ahŽH¡³¢öRÃúG¼(‹‹tÿ)׿ÚLgslÌ—W)бßPY¤æžR µúöhý"–žÆì–(h¦gÃæÀh±pf¥ê^gNÅ+øêÏõZÖ¶å&P÷ÜSÃj'Œ‚ù gŒ#ÏqmMù’b‘.%ƒhqÐ^Ô7ï7“Äð:V‰üë§3k—kÝ"¾Ú=<::9;ëþpò×횪,®«U¸·íÊCpä®ÎNŽÞœßÙ•;›A³ýyC{Á6gq;˜ÁÄSÕ~W®†·R9®Å=g5 àá¤Ñfs›þ©…túYJ+ì%¥x=…l ÕÜöÚù¬†]ÌàÝÛ£Z!#{£0ùi…õ~†³f=·“®:’Õ ™Ë#±ß~ÉHˆ¬ š±ÿ1€!hŒ> ï ßù¾H¦ïß½”4›ƒNb½ŽtèHWO¢6„5‰mÀýAŽøÎÕ~{w§ÁðVÛ²bxßÚè¹Iø4Þvò‹n›Ké~§VJ8Ã¥LÙÊ´j'ò•v¢kÎâ0G[PùK»Ú,`¿[iió‡AbeÃéž=4òúí5çÔZ sk‹×ñÅt¥AKr†ouÄ•áÆ»}ReÒ[@DµB¬±9ð$H2†1©ý%!AS!Є±MÔÏ¡ Þ…Ðöe^/ÒkxêHad X.öA=TøD¤ÁÊl‘Æ2ˆIÖD[àK³]`FQ‘^'MÅ_dÕ–¾Ê€iiŽ×Q/ä]‰þß(÷…H¼ÏèŒ>»­³Ë”:‹#Wø&iœ”xêò2ñZÉ¡ÁˆüÚß\¥›U#ˆü`C?Œ¨EÄqDãDªÉ¸ŸŸ†‹Ãé<hË„ÃËb†³$¦cY>Ad6íînš`fXÃZ2x:ibWi6 ûv>U–ûøù›7oϬv·x³‹®›‹;$Í•z\s9~õy“u‚uÑ¿šæ›‹‹ú³ø|鬯_š(«/Â=©K:ÞYŰ÷N„¡dâ/ç³­R—5 Co«Ä„¹¸«öÁ…‡šÍÔmÛ]Xªe­™Éí/pÅF¤¼“ÄMA°Î±r5 [¿"ý/zZ…ü]5¡þÎ06¼¢µHÂ<Ú˜¦i4Êûá( €¿Ø,¬ Ù-/ú&³óht†WÌKzÅ<-^©Œÿ¶×–Ù£†è"¹Ë#ºÛ$sëDV²XÝcAîUƒÙû£(Ì®Cø"„?–\D•Ÿ–Ùoè(ØH¦_¤BñÊ0|#q¹5,àŽVŒ·Èü€Ú^¦:m>@‹o$a &·þt–ïT¨“û‚̸îº*îÚ[òŽ~=’_o™Å;ÿû%î¼áüsÙI|øüíÛàôìh}öW:û-Wf»d×G«W8o`–]‘òÝÒuÊ÷ÕîýÊÕÈÿ÷¾µq•R·Yž7ëöG¼mÞ™óó3öTíÚ6É t·šËI`Ý]–Ò µ›ê"Ï}ŠÂ˜š- ¹à8,ì2G%ѽÜ ^YûY¡Ý)”Jn¸ƒ±âRB`/-[cXØ*oàÆ·ôï“Ë C3迤H²–d‘’nuXìáxÈh°ÿügÓY8zÁøÄŒ¡Ç)1»( ×ÈUEÕºOeªd.uñ¸EÊþûDú{qÓ€…FZ¢w"Ž’TïAñÔz#Ëa74éŸÐßïîî-?£C6­;©Á"®¸ƒ”ã"@Ó I–$µÌ8îg©€7J·"J»Ö*¶ÔY8¹!Pj÷3Xnë¢é©Ã%È9áò¤’/f…ÿ˜¨Ëüw¢oÃ}8Ÿeœ¦ÊE°zÅ_. ÿçÆº±my Y»Ë©ûOxÃ/{¡È8J‘RÅ‹c'XfË*5’ýj{“`€Á‚Yìî+Ij`@Ĥî>ÚŸJqB’C ùP‘X®{úg`ãë¿lxyþ.Rd:ë_V¸âÙRŸ·÷æJRÜ8M†iÉÃnŸ,Ý{«%oDƒ’ˆ,ÿ^; oýôae±>h\QøÐ32{E­Eaÿ«´Ô6¯˜p.rÁ xÄHÙbнi8„|söâpO`:ã)—’Ö¢¢;(-åEo-Ü¡!(ˆ˜4-êO7±Ôà—Ù¶R”\5³q´ï‰.ªÖlu<gpmÄÔ¦“1c¸á18ú›‰¦ývÅý^ö``M© Wá´Z/^ë;¹‚J%jÈðDè(ï›X-Ý9àPì-ÚÈÞ§l§Ýno—fB?Ø)4r+ßrÚÞ¿{¹Qdáe²ŽÇã,(“Q¿×F,´kA(e†JýHœ‡óüh‰Ã†Ž â.ã {l5*šÝ~.æ¤lH&YR8àÙŒº;˜5ÁÍR¨ýP }íwUMîbgãÁÎcó@þŽ!z1bÕ[øßm¾¤Ú½”—¸}ñÿqÈ·ê"RìEk5þÿì} oÛH’ö_!ò¾‡Ø QþÊ$™ \b{v<;^;¹™Åù`P%3‘H(ú#‡îoÜß»_rõTU7»)ʤg&³§6c‘ýÍîêêêªç±ü‘†õÞ‚ÿ+÷X‰þ¯…W·ýûöfgË«lÍåÖçëæ«ÀTŸhü +Ë™qGße¾{¾©9×ñÚÑnj”ˆ™•|áfBª†É¬ÌÊòµœÈøª *; C76Á7%6¢a]dÇäE•Y¬ ßȦñ¨-žýçF#IÝtRÜYFéÂ'”…6xÃ8[Uë[ð%õjÇS]IÍ+þ­¹ÅW܃”2ËpApV0h¡ô³~<&fi…㈙m \bƒ$%C(®÷ìQ“äB1kñ:Ãÿ·é9¯?Yt&o¿éáŒÁ° ²êxÑqáªýãêz™úÿ<›Ö½ÙÅ_ —ĨÓjEs­Ð|ñ%œ®iƒÐýxì„îáŽSú±³1è§Qt¾ÙÑÐr†"ñÁ¦3úäÜê¼s‘ª’KJÜÚÃç[µ» ÌÕn%(|}ŠÙ™}(^ê˜#¹ŒE¸à `—í‰XÚšÜxö–ôTåH.Þû…™—ô› “)ç·?&ó¼('ä?}£ñ ü®}ľ÷æÚOØç½ãÍ,»É—™$ÍÜA`þü& ¦Å cŒ=¯]îâè*0½Åü*Ghï´¥ó0|Æ:@Ï(w¥¿ÒJKæA†9È­âb84”a§ŠË(‘,óFbü|-æÍ·ìçþûíÌ_tò°“q«0·%U¶ s[r$wÈÝ“¹ÇÁøÍØï\ÑØrßÇr¸à„˱´èlŸ²ª Žx)!…p”§#’éˆÇ”cy _²IRõÝÙÁ!Qš6gÆ‹ÑåǬ×ò¬ìPs‚kÄæá÷\j_0ýùU1„J»Ê8©º=‹_‹0ä¢BÞÅòÆÚZ;Aƒ^û¥yË=›Zãô7Ë=‘µšº®%tj [ò¹  Ñ„ÑqÐ Ä%o”.›âÞ2eÒÀ*Õ‹å¢Ïàséæ…£¤Â˜t¸6'v–#1e«¼–%=Î/M¨~‹^¹4Î’IF°wøh€1`F‰ì5¯v>ðñm?æi·`¿ (ÄЃ³Jý°-=¨û•Ú/YZµÞû¸³ž^:;-=º¦L:m ŠU(tÑwpt ˆCÝ"ü xQ.ÒG!FýÀ£1£LîÌ9Ų£HxÉУ  ðøQh›K³ÓÙÔû<3æHTÚ÷… ŒdÇÔ[²!#Üs™!kÙ!¤Ëcˆ#É`eHIà†ø)_EmñyÞ/tqZ¨ýÍ™óRtO_9º69æ±þB!¬ÔïÚk´Eé±µã7Cm2 Å8Ë>Ê˜Ò ¤½6~¿ížYï?K¼\Ÿ%Ög‰õYb}–XŸ%Ög‰õYb}–XŸ%Ög‰õYb}–¨;KŒ£Ïwìõ\Bü¤yíÕ× õÐ TOœœ›à8ªófFZŠÜ V¯ÝÅxÅˆÏÆÅ=®qyðæèäà{q(°;TBºØhQåpx}zd.*§–g †YBÊpLÇ ªfœ|–ìÂËØ©ó¸KvI ‘’œëxÐm1ªN,Ó½R°6+PŒ¹Bq¯ï^”û/šTy| dYMï“à:‰çü¤ÆKØ~Év{Èì ßÒ…þÚGåXÇÑÛ|>½Ù´ßÂE”u`°Æ„£¥à$A(di78¦­ 曫dt%p i°³½û¬²÷^¼Ø…‹WÂ$žøø*Mfô«8š9Da¾9~#[+ê•. lQÁ *àú*+¾¢™ëZ`5™øãªÕúa½ÊPIŽÖóç»/P/[-³¡LªÕ윚G̰œU®sÕ¾‰nò j©d]¥»‘1¦¬ÒaÆõ×|Lõé´qΞ´ ná /Õî‘°mF_ÀgN7ÅP2ñóÛÛü»Ãÿîò¿{l4„å·:Ü/8|§¬ÿÑçg)Ö}Žî~÷>É£·ùÃÑ£·õÅó—ÜÖUæ‹Ë H¼˜ÁMfÊ£’Ù¤ói‘SûWüÚ2‚v÷Ze m¦…‘¬L¨—;ít®Z#çZëZk]k­k­u­µ®µÖµÖºÖZ×Zëzd­K.tjá1ý÷®&úÓAùªr¯å½¶<˜þcs­1‰a#Lr±ñ•ð«’ã}6ÍÆÙè.ø)šy‘“Œ¸ ¹ÃަÚÒæu¼EæB8éƒU½T:>ûï$ºc’¢—œ!ï $ð—@x€éÍLG? YÜGoñ4xÐUÐî~fe Eˆm4ËœËgâ-o€æÖ€Ùå‹vƒ}ÃZù¬UÅl¬Ù˜XTð¼ -×3Á¡\ÞÖѱOt½xÙB½íÍ·¢^¿Ë¯j©·€'ñH½5EÝ×ßÜ-äŽï¬«ê7l™LÐ Ù)à=³Y /<‡ƒôüooƒ¿àµRÄ@¬ã7É(8ˆæQ‡–Ô †HüæjDØbüõ)—ï^«GÏ R˯\K\Öbܶء'ê3k& áñ€&CjPÑá7m™€ÁŒ0qöH™E)scó%û(E½; jàFF‚tÁ½ª ½!YBë8¥Ê¶h¨ÿ9ÿ\Ñìéy1UoµÅÖÙPðY9¢æS|xxøÒ—Žgýühõ )>WYÁÛÏ®:è­w)œûåËf#¥ŸU˜ñm•övà™¾ÎücQù´1ÜK»’ËŠ%Hsì³F×'办2fÞWçûýÚd?pƒ:Ú©˜6z¸K¢•ÎÚÒÔ”§žwÞõŸ7ÕÜf„ï«~á¸í?oÆ)k3³~v¬R--„¾0.é¢7RœTw›,¾e€6Âl…9©(óR eÖdV1… ñM¦þ©Î‰`Rå– ù8¹•kx¨¨q?‚û02ýtü‹­\Þç"i!`ðè1,cåéƒàTZγDgtä¼ô…Ô*1WM[é èmÙ}Û:^HÇ‚“²³YZíRJœhÖQ*R>¶âž+{ƒM ‘£‘€Ö 03ÜÕÏ ä¹9–Zßëh\¨rÏÒ7ªP Ðày"J•Ÿ¸µ„î,ÃѯËé„Q Á}lЪxˆ ñr¨’ÎÂyÌ–iü õÆ\П¾x<,Ý–ëô¬Å´Ѧ;S/æ/Æ…º*ÇÕu– ìv ‡\_ÊìpUÚ0w"âc1}’3¨ÒГ¸u¡Ôª9;Øëh1O¶ÚY¹[ZXd‹¼WvÑÆy©ü/—h6¢<:|y“ª´¿¦e‡Êúé…ZÈÌIÛw.@›§©¥±ÅñÍ_ìS—4ÆéT7pç¿@*L´|.êÈÓÃ.¹ò£[s^VÒøÊŽÞ0K_3º±Â”ad°œk–â’ÁÕu|OFYº“)©Cºd•Íœ§±+–&¤‘ž÷?ÿõß9û)‹Þ $‡ n()p@ ÿH’ÄŸX-¶½èÈÑ\¦.|FÒDÉ›T£¼ûìí£ñ"úHuÝNn[}ç;mºØ –á‹•:eÇÌx±cS–”>‰ï)ÿÜT~KÒ0MPÞ—4cÛfxÆ bCÇT6˜ÆþôoÇþmÛ´´‹¹å£ñã´+8àf]2a}\bu³¡ÔRYþòË«ãèö”Z|NU/íÍ6,ÊHŠ÷ ƽœžÙdJšûìi.D^\0‹9s&ßøHkŠÎô±isÛžSCióçSZ µ_»´’°ß&™™¸Á¶ñ»‚£ñ¦©•Ï"!ÑÊ,kBÅǖטøís˜{«„@t¾Ô6ºŽÕ+‡:™»LÕ¯-²[gdÉÇ'06²Rbw /”‰½FúÈŸ(1iÉEÒšäJp¬Ö0Qš"á,—Ië*í¶MibÏæg·sÞ”DËã4N¡U™=Õá•Í6Á¹±SÍpîƒ"53&¬Ü­’ÖÍBÔ“Û<'±š÷)éØÊå`#éR£lVrø<¼šp³#X–^×˯-—ÿ†ú¿°æó?û…¤Žoqγ÷¯©µØ¤HN¬lÎ.ã‘$ku{1шösñn¸„n4QæîÈl½cQÍÔŽì’Áb47Y7œâR©_Ð^ß‘9ÌAÄy%æ’©°tÉýåì݇ÓàÍß;žmÏ€K^±ÖÆ›7§ÿéÝÑI§Rðøm¼;;8<£â•D塈»}C*4©0Ã"嵫ˆÕ{õ|^Àv#!E:gøê,5ÑsÖ†¤7ÿÜü€@Ç·É&ƒ«²ƒu‚c Bnd¦ð˜)¼‚”È”¡GvŒaÇ/W#R‚s^£íMá´`§‚¿ ÐS%»ÝJp4râÔg‡L× ‘‡É\.lÊl:zÚ!ÌB!Ú ¶»ß=7_މôž_5Ÿ?퀂¡¸4cÉS&F36Ì#}ú3Rt'$3R¦á¿v·`¬ªK×—þGh[V“.uF­¡rÆ“ê=/a‹ÇiòpR(@«°»W—g^{ÖÒ×%’¤0Y³FOSÉ‹\:ûÃ,Dç4¦¹Àǰ¤ÊS<”ŒØÎöö¿¸&ÓéB·sÕ†SVaq‘"Z¾<‡ •ÜÅÑ,7K•-/iLûêÏWT^•`ÓtÄ1áì¸ëËQV1Q8aÔWÆàvÌnY6QTl”„©ÂÕ8ŠÓö4½jæ\ØYh/ÉÔö·5kÊï†Ó±"ÑütôÚ$’EzUÙG_ÿ?‰]?Ù—»Ë¸©¾’ËÌÊ`¾Ñ`’¤-,uKo˜4,æøÌ‘‘l¡¢»A“x¸˜þb4aC†±t°ÙÀêÜ!{vøúàø°; ‹ ´\'bø©ÒJKÏ>¼ùûå_ö/_¿}ûnÿòíÑñÑû?¿-ÿ Þ\žž¾}÷úàÏ[E>Û'=üÞŸðiì’F'¡?»yÖÝîît·%ôÝbÏÞß÷¿`[ŒFÁ†æØÄ΢ÁáàI¶+¡Áë:诩f°ˆ¶ÿXÉðÃw‚¢DÊìÌ Œ&Âa/°Ükm•¢äˆh›©üË`Cã{¶˜Ó'VÏa?†þ8aŒÑ(:‚¿Àâþ«'&*H]$è(mÕ&k–q¡¥A_ÍÝ„-Ö*˜ˆFˆ ²>• ÅQÕî®;Ÿ‡Ðy’h’‡·´%ÄëeiÝEe5…»¿¡VŽ)p*|ìÁ¾Í²0PmrÜ¿#-ÞóaTö²‹™QÙõˆKÑxöç.Ó¢}Ô¸¡hBASà‡«±æŽÇÜ'y\R-“<{7@XÛ+ž9Í¡lNs…Y¯»ý›0]³Èùäĵ…Ž©Ôxþú¼®B"ðˆ—ÓØwX¼oƒ 7È’Ö_ŒZl Å.TZÌÒKŒ#¾7oÑ©g'A™ÁF?–ç_.þ.ø9î½ß7Bn¡âÏYÚb×Á2² ü?gi\Úsô~à>W‹Faf‰?—K3CZгCkZñ ¶¶šµÎýdN½·vGÊ>¿oÍsÊõrÿ']îùwg÷/xMã…,ò£Õ}»„›eã1ÙvëŽt‹é¼ÝÐ'Ì—_ûnÒÅ]¼š^Ô»!Iƒ³¢'4úL‡L>§Æ¡)ÊÜ¥ÓD—2'Ê 4àBUÒÌæøReðÃøã^N\8Qp6å ŸFìTG"¿^ÆþeÃ~%ÞcFš åï\¤ò™fq7ØP¥mõó.ãPJœéé–ú‰n¹,,[Õï^Í'ãÍVÎ×fÐêì7‹ïa$j¿þÃÃÔl÷8Ä?›6eM´Ú™”'ÏÀ¯u“l¥ÚŠÈ]1øÕT¤ù¶<úýËÔ²¢Êé`ƒŸ5Úû„´sõ‚Æ¢:%é­+•¾ßÞ€á@~_ºP)ÍÂUKñVüà‘É]H/# %ãÌò`P2Ítƒ}‰;¢=h‚ÛdÚM2#€9›®c%ƒ9:}Z‰b1Œ4iÌ^Á­|1hËŽFI ȉH)£Ìf’ñ^ÏE3RË#„í³ðEê1ØZõÁÐKŠJ:×1'žž¾~ZÅ+zòlÿÙÞöþÞþîËÝ—‡Û/ØWŸ´ÍTáoïÏš™Ð–„Ì,Ÿž§:«s04\=/ú4J æynÓiôjr7£Éý)ÏXнÒ3"îô@³5Øf¤¸M$‰ŠóÉ]WGŽåxÑ+ÒyÁ[‹Z ¡ &.xaH”èGŒ+}BàwãõŽ€\@—K¥…JöÞ§C6©ëWnÈØv÷ÙËî.öS•;âho' ´šlL¥õ‰pÐ<8y÷Þlµ–[v¾O¤£XÜ ^¤ŸÚuÙ‚u¸‚^ÁÔ¶Í÷ž ­ÉXcøØgÐ*| Gk¹M³[£°äÌ-ZKP$¯<µD-ðE՞СtIr†*é_±‰Á KæžÊÅ)àKÚmÇ’+ä\>êNåM#®°éäoOø%‰[Äó8:Ló‚~]á“O0ª¢¢‹”K@7ŠÜÈB.µÎ—¼œQ8gñ![!Ñ׸O3ðYöJ#P:ú_Ç/NkàFûmà¥òjq}°‰ë‚z¸/7ÉÑYT.L;CiÈ"+l[ôŸG‡h" •/Z¼þò%ƒÜÞ+®Š †DN¹2û•ñ³Á¹p¥i.Î{OB8Ï}¼"Œƒ›·41©ùø°}F«Þ_e³M±S0—xœ\; /R“ô,»M€WC]¸‰q-‘Û#Z–¥0úuƒ×©´ËUÔ‘×ÄFjd4:RÑæœF~æpm6‰˜ ¹\•–³’¨{¹ƒc$2W/€r.²Ã‡ ƒ©@Ç,¾¸`Ès:³ ÙÈ`áü*cËuís]qÚ¦y!JB@SF­íÇþé0/ìܯsª3 ü][‚Ó_äÖ™07ÒA4ÙhÔä”ü§à-[6N óìÆÛ_ö7ñüõ$úLÓåç¸ÇßÌÁÎV¡V"– ‚¢ÁÓV=ÊÁGZêw¬´Ä>¾×ŸÕOøõÁZÞ "í«Ìö+¹ì|Ÿ¡Ý–9`æ¶o`I`.æâP.7@*ÏHÆAà†E®¸H¾bîˆÓKÒ-)æÃÕͯ”—ž±Ièè‚°óŠãÛ!~úœóÍᆠIbäf@O[™bqósÆ:yÅɃ`Ç ~Ñ NÂZN`ˆ—ßX!x6c!H«éî×i4ö¥I „o’Š ÅØ½Òº4è2Ò8{q9÷9W ¶¨°˜’ô'VØâ˪¹,½¿$•HGL*ú´Bí–T´°˜µÐIxˆË·a¬Ú}8{û¤ÆÍ¥?H»è†\æälgÓów.Ö])¤]½¦Jª!×®kÆ%êe ²0‹ã²üÙ”/nó@S’m$ (=„¦Dˆ`º¨!È>åò¡4#`5H9!ØY2"vÌ ¸(¶·÷ú:‡ùKñ“8”ÍÇÑuLjÔ ½‘Ø›p-IAÚÞ56×"…ß@\âµÂ*!­ÈSå–­›ñŒ&²ãžTÁÇ,I w ?àÛq® ä\eBºOPŽ&¦"ˆ1^®@/Òw˜É6¹¾¦› J,æ…40@bÕ£W¯•1‹¹,&³^ FäDjR3?$qù‘d™u6=P®^î¤å‚x=HÒÍ.œ ®2í#‚Ïœ>-á™ýVÖÍ=Þšæ­ü$D]T›&@…A°lì[X¬C±£o¢Û,0úW¤QšÝøö™ýC[yk°3¯~“Ë»zèîv·épáä¾¼S WºÉ&º6ÍLû1h´b6wsÚ¦èç³ š ž?ëÊ~­jDëR«Õðì1»ŒŠ³ª(33¿ù°à©úE‹+ÅP³Ì"dÕÈ©¢†"É% ö7'XàFö¦¾fê3ÇfÑy)a04ÑâÉ”>é½J¦f¼’†oÛ9 Duböú»ÃÞv4ønøý0Ú‰£á‹hoïÙ˽¸¿·ÓF{r_k¤UÓ’…ͱ›S/¶{°\wpa 0|,ãrr­ka…@²ÂF) nÑÌx•_…Ÿâ;:ö%×Ѽ¥9g‚Ô|ÁùùÁ_ã»E#®)¿è‘N¸zñœmyé@ó•µÒ¡,jÂÁ<@qèd…j享:K úåçx]OfðM„sÎD©j\»Ë’Åk™ž`®(¨‚wÛH‘%H&“xÐàïhð!‹ûS™„ßýƒyÉìVù YÆ·wFþÛó#~è²<œV'ŸÝaz£¬Jl¢a“¹¿Ü’ä$ w¿7' I`üâéÕÒa––¿œEƒ,ÝÔ&ˆqñ™ôóV‚ÚŸ’^”Fµ¯Œ{”ºïþoÿÿò¾Ó‡ charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/migratebundle/package_test.go0000664000175000017500000000017212672604507027010 0ustar marcomarcopackage migratebundle import ( "testing" gc "gopkg.in/check.v1" ) func TestPackage(t *testing.T) { gc.TestingT(t) } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/migratebundle/migrate_test.go0000664000175000017500000005226512672604507027057 0ustar marcomarcopackage migratebundle import ( "bufio" "compress/gzip" "encoding/json" "flag" "fmt" "io" "log" "os" "strings" "sync" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/yaml.v1" "gopkg.in/juju/charmrepo.v2-unstable" ) var _ = gc.Suite(&migrateSuite{}) type migrateSuite struct{} // The charm data cache caches results from // fetching charms from the charm store. // If the update-charms flag is specified, the // contents of charmDataCache is written to // allcharms.json.gz; otherwise the contents // of allcharms.json are read and the charm // store is not touched. // var ( charmDataCacheMutex sync.Mutex charmDataCache = make(map[string]*charmData) ) var updateCharms = flag.Bool("update-charms", false, "fetch and update local charms for test bundles") const charmCacheFile = "allcharms.json.gz" func (*migrateSuite) SetUpSuite(c *gc.C) { if *updateCharms { charmrepo.CacheDir = c.MkDir() return } f, err := os.Open(charmCacheFile) if err != nil { c.Logf("cannot open charms data: %v", err) return } defer f.Close() gzr, err := gzip.NewReader(f) c.Assert(err, gc.IsNil) dec := json.NewDecoder(gzr) err = dec.Decode(&charmDataCache) c.Assert(err, gc.IsNil) } func (*migrateSuite) TearDownSuite(c *gc.C) { if !*updateCharms { return } data, err := json.Marshal(charmDataCache) c.Assert(err, gc.IsNil) f, err := os.Create(charmCacheFile) c.Assert(err, gc.IsNil) defer f.Close() gzw := gzip.NewWriter(f) defer gzw.Close() _, err = gzw.Write(data) c.Assert(err, gc.IsNil) } var migrateTests = []struct { about string bundles string subords map[string]bool expect map[string]*charm.BundleData expectError string }{{ about: "single bundle, no relations cs:~jorge/bundle/wordpress", bundles: ` |wordpress-simple: | series: precise | tags: ["foo", "bar"] | services: | wordpress: | charm: "cs:precise/wordpress-20" | num_units: 1 | expose: true | options: | debug: "no" | engine: nginx | tuning: single | "wp-content": "" | annotations: | "gui-x": 529 | "gui-y": -97 | mysql: | charm: "cs:precise/mysql-28" | num_units: 2 | options: | "binlog-format": MIXED | "block-size": 5 | "dataset-size": "80%" | flavor: distro | "query-cache-size": -1 | "query-cache-type": "OFF" | vip_iface: eth0 | annotations: | "gui-x": 530 | "gui-y": 185 |`, expect: map[string]*charm.BundleData{ "wordpress-simple": { Series: "precise", Tags: []string{"foo", "bar"}, Services: map[string]*charm.ServiceSpec{ "wordpress": { Charm: "cs:precise/wordpress-20", NumUnits: 1, Expose: true, Options: map[string]interface{}{ "debug": "no", "engine": "nginx", "tuning": "single", "wp-content": "", }, Annotations: map[string]string{ "gui-x": "529", "gui-y": "-97", }, }, "mysql": { Charm: "cs:precise/mysql-28", NumUnits: 2, Options: map[string]interface{}{ "binlog-format": "MIXED", "block-size": 5, "dataset-size": "80%", "flavor": "distro", "query-cache-size": -1, "query-cache-type": "OFF", "vip_iface": "eth0", }, Annotations: map[string]string{ "gui-x": "530", "gui-y": "185", }, }, }, }, }, }, { about: "missing num_units interpreted as 1 for non-subordinates", bundles: ` |wordpress-simple: | services: | wordpress: | charm: "cs:precise/wordpress-20" |`, expect: map[string]*charm.BundleData{ "wordpress-simple": { Services: map[string]*charm.ServiceSpec{ "wordpress": { Charm: "cs:precise/wordpress-20", NumUnits: 1, }, }, }, }, }, { about: "missing num_units interpreted as 0 for subordinates", bundles: ` |wordpress-simple: | services: | wordpress: | charm: "cs:precise/wordpress-20" |`, subords: map[string]bool{ "cs:precise/wordpress-20": true, }, expect: map[string]*charm.BundleData{ "wordpress-simple": { Services: map[string]*charm.ServiceSpec{ "wordpress": { Charm: "cs:precise/wordpress-20", }, }, }, }, }, { about: "missing charm taken from service name", bundles: ` |wordpress-simple: | services: | wordpress: |`, expect: map[string]*charm.BundleData{ "wordpress-simple": { Services: map[string]*charm.ServiceSpec{ "wordpress": { Charm: "wordpress", NumUnits: 1, }, }, }, }, }, { about: "services with placement directives", bundles: ` |wordpress: | services: | wordpress1: | num_units: 1 | to: 0 | wordpress2: | num_units: 1 | to: kvm:0 | wordpress3: | num_units: 1 | to: mysql | wordpress4: | num_units: 1 | to: kvm:mysql | mysql: | num_units: 1 |`, expect: map[string]*charm.BundleData{ "wordpress": { Services: map[string]*charm.ServiceSpec{ "wordpress1": { Charm: "wordpress1", NumUnits: 1, To: []string{"0"}, }, "wordpress2": { Charm: "wordpress2", NumUnits: 1, To: []string{"kvm:0"}, }, "wordpress3": { Charm: "wordpress3", NumUnits: 1, To: []string{"mysql"}, }, "wordpress4": { Charm: "wordpress4", NumUnits: 1, To: []string{"kvm:mysql"}, }, "mysql": { Charm: "mysql", NumUnits: 1, }, }, Machines: map[string]*charm.MachineSpec{ "0": {}, }, }, }, }, { about: "service with single indirect placement directive", bundles: ` |wordpress: | services: | wordpress: | to: kvm:0 |`, expect: map[string]*charm.BundleData{ "wordpress": { Services: map[string]*charm.ServiceSpec{ "wordpress": { Charm: "wordpress", To: []string{"kvm:0"}, NumUnits: 1, }, }, Machines: map[string]*charm.MachineSpec{ "0": {}, }, }, }, }, { about: "service with invalid placement directive", bundles: ` |wordpress: | services: | wordpress: | to: kvm::0 |`, expectError: `bundle migration failed for "wordpress": cannot parse 'to' placment clause "kvm::0": invalid placement syntax "kvm::0"`, }, { about: "service with inheritance", bundles: ` |wordpress: | inherits: base | services: | wordpress: | charm: precise/wordpress | annotations: | foo: yes | base: arble |base: | services: | logging: | charm: precise/logging | wordpress: | expose: on | annotations: | foo: bar | base: arble |`, subords: map[string]bool{ "cs:precise/logging": true, }, expect: map[string]*charm.BundleData{ "wordpress": { Services: map[string]*charm.ServiceSpec{ "wordpress": { Charm: "precise/wordpress", Expose: true, Annotations: map[string]string{ "foo": "yes", "base": "arble", }, NumUnits: 1, }, "logging": { Charm: "precise/logging", }, }, }, "base": { Services: map[string]*charm.ServiceSpec{ "logging": { Charm: "precise/logging", }, "wordpress": { Charm: "wordpress", NumUnits: 1, Expose: true, Annotations: map[string]string{ "foo": "bar", "base": "arble", }, }, }, }, }, }, { about: "open relations", bundles: ` |wordpress: | services: | wordpress: | charm: precise/wordpress | mysql: | charm: precise/mysql | logging: | charm: precise/logging | monitoring: | charm: precise/monitor | relations: | - [wordpress, mysql] | - [logging, [mysql, wordpress]] | - [monitoring, wordpress] |`, subords: map[string]bool{ "cs:precise/logging": true, "cs:precise/monitor": true, }, expect: map[string]*charm.BundleData{ "wordpress": { Services: map[string]*charm.ServiceSpec{ "wordpress": { Charm: "precise/wordpress", NumUnits: 1, }, "mysql": { Charm: "precise/mysql", NumUnits: 1, }, "logging": { Charm: "precise/logging", }, "monitoring": { Charm: "precise/monitor", }, }, Relations: [][]string{ {"wordpress", "mysql"}, {"logging", "mysql"}, {"logging", "wordpress"}, {"monitoring", "wordpress"}, }, }, }, }, { about: "multiple element to clause", bundles: ` |top: | services: | wordpress: | num_units: 3 | charm: 'cs:precise/wordpress' | to: [0, 'lxc:0', mysql=0, 'lxc:mysql=1'] | mysql: | num_units: 2 | charm: 'cs:mysql' `, expect: map[string]*charm.BundleData{ "top": { Services: map[string]*charm.ServiceSpec{ "wordpress": { Charm: "cs:precise/wordpress", NumUnits: 3, To: []string{"0", "lxc:0", "mysql/0", "lxc:mysql/1"}, }, "mysql": { Charm: "cs:mysql", NumUnits: 2, }, }, Machines: map[string]*charm.MachineSpec{ "0": {}, }, }, }, }} func (*migrateSuite) TestMigrate(c *gc.C) { for i, test := range migrateTests { c.Logf("test %d: %s", i, test.about) result, err := Migrate(unbeautify(test.bundles), func(id *charm.URL) (bool, error) { return test.subords[id.String()], nil }) if test.expectError != "" { c.Assert(err, gc.ErrorMatches, test.expectError) } else { c.Assert(err, gc.IsNil) c.Assert(result, jc.DeepEquals, test.expect) } } } func (*migrateSuite) TestMigrateWithSubordinateStatusError(c *gc.C) { bdata := unbeautify(` |wordpress: | services: | wordpress: | charm: precise/wordpress |`, ) result, err := Migrate(bdata, func(*charm.URL) (bool, error) { return false, fmt.Errorf("oops") }) c.Assert(result, gc.IsNil) c.Assert(err, gc.ErrorMatches, `bundle migration failed for "wordpress": cannot get subordinate status for bundle charm cs:precise/wordpress: oops`) } func (*migrateSuite) TestMigrateAll(c *gc.C) { c.ExpectFailure("all bundles do not migrate successfully") passed, total := 0, 0 doAllBundles(c, func(c *gc.C, id string, data []byte) { c.Logf("\nmigrate test %s", id) ok := true bundles, err := Migrate(data, func(id *charm.URL) (bool, error) { meta, err := getCharm(id) if err != nil { return false, err } return meta.Meta().Subordinate, nil }) if err != nil { c.Logf("cannot migrate: %v", err) ok = false } for _, bundle := range bundles { ok = checkBundleData(c, bundle) && ok } if ok { passed++ } total++ }) c.Logf("%d/%d passed", passed, total) c.Check(passed, gc.Equals, total) } func checkBundleData(c *gc.C, bd *charm.BundleData) bool { charms := make(map[string]charm.Charm) ok := true for _, svc := range bd.Services { id, err := charm.ParseURL(svc.Charm) if err != nil { ok = false c.Logf("cannot parse %q: %v", svc.Charm, err) continue } if id.Series == "" { id.Series = bd.Series } ch, err := getCharm(id) if err != nil { ok = false c.Logf("cannot find %q: %v", id, err) continue } charms[svc.Charm] = ch } if ok { if err := bd.VerifyWithCharms(nil, nil, charms); err != nil { for _, err := range err.(*charm.VerificationError).Errors { c.Logf("verification error: %v", err) } ok = false } } return ok } var inheritTests = []struct { about string bundle string base string baseName string expect string expectError string }{{ about: "inherited-from not found", bundle: `inherits: non-existent`, expectError: `inherited-from bundle "non-existent" not found`, }, { about: "bad inheritance #1", bundle: `inherits: {}`, expectError: `bad inherits clause: got map\[interface \{\}]interface \{\}\{\}, expected string`, }, { about: "bad inheritance #2", bundle: `inherits: [{}]`, expectError: `bad inherits clause: got map\[interface \{\}]interface \{\}\{\}, expected string`, }, { about: "bad inheritance #3", bundle: `inherits: ['a', 'b']`, expectError: `multiple inheritance not supported`, }, { about: "inherit everything", bundle: ` |inherits: base `, baseName: "base", base: ` |series: precise |services: | wordpress: | charm: 'cs:precise/wordpress' `, expect: ` |series: precise |services: | wordpress: | charm: 'cs:precise/wordpress' `, }, { about: "inherit everything, specified as list", bundle: ` |inherits: [base] `, baseName: "base", base: ` |series: precise |services: | wordpress: | charm: 'cs:precise/wordpress' `, expect: ` |series: precise |services: | wordpress: | charm: 'cs:precise/wordpress' `, }, { about: "different base name", bundle: ` |inherits: something `, baseName: "something", base: ` |series: precise |services: | wordpress: | charm: 'cs:precise/wordpress' `, expect: ` |series: precise |services: | wordpress: | charm: 'cs:precise/wordpress' `, }, { about: "override series", bundle: ` |inherits: base |series: trusty `, baseName: "base", base: ` |series: precise |services: | wordpress: | charm: 'cs:precise/wordpress' `, expect: ` |series: trusty |services: | wordpress: | charm: 'cs:precise/wordpress' `, }, { about: "override wordpress charm", bundle: ` |inherits: base |services: | wordpress: | charm: 'cs:quantal/different' `, baseName: "base", base: ` |series: precise |services: | wordpress: | charm: "cs:precise/wordpress" | options: | foo: bar `, expect: ` |series: precise |services: | wordpress: | charm: "cs:quantal/different" | options: | foo: bar `, }, { about: "override to clause", bundle: ` |inherits: base |services: | wordpress: | to: 0 `, baseName: "base", base: ` |series: precise |services: | wordpress: | charm: 'cs:precise/wordpress' | options: | foo: bar `, expect: ` |series: precise |services: | wordpress: | charm: 'cs:precise/wordpress' | options: | foo: bar | to: 0 `, }, { about: "deep inheritance", bundle: ` |inherits: base `, baseName: "base", base: ` |inherits: "other" `, expectError: `only a single level of inheritance is supported`, }} var otherBundle = parseBundle(` |series: quantal |overrides: | something: other `) func (*migrateSuite) TestInherit(c *gc.C) { for i, test := range inheritTests { c.Logf("test %d: %s", i, test.about) bundle := parseBundle(test.bundle) base := parseBundle(test.base) expect := parseBundle(test.expect) // Add another bundle so we know that is bundles := map[string]*legacyBundle{ test.baseName: base, "other": otherBundle, } b, err := inherit(bundle, bundles) if test.expectError != "" { c.Check(err, gc.ErrorMatches, test.expectError) } else { c.Assert(err, gc.IsNil) c.Assert(b, jc.DeepEquals, expect) } } } func (s *migrateSuite) TestNoNameClashes(c *gc.C) { nameCounts := make(map[string]int) doAllBundles(c, func(c *gc.C, id string, data []byte) { nameCounts[id]++ }) // There are actually two name clashes in the real // in-the-wild bundles: // cs:~charmers/bundle/mediawiki-scalable // cs:~charmers/bundle/mongodb-cluster // Both of these actually fit with our proposed scheme, // because they're (almost) identical with the bundles // within mediawiki and mongodb respectively. // // So we discount them from our example bundles. delete(nameCounts, "cs:~charmers/bundle/mongodb-cluster") delete(nameCounts, "cs:~charmers/bundle/mediawiki-scalable") doAllBundles(c, func(c *gc.C, id string, data []byte) { var bundles map[string]*legacyBundle err := yaml.Unmarshal(data, &bundles) c.Assert(err, gc.IsNil) if len(bundles) == 1 { return } for name := range bundles { subId := id + "-" + name nameCounts[subId]++ } }) for name, count := range nameCounts { if count != 1 { c.Errorf("%d clashes at %s", count-1, name) } } } func (s *migrateSuite) TestReversible(c *gc.C) { doAllBundles(c, s.testReversible) } func (*migrateSuite) testReversible(c *gc.C, id string, data []byte) { var bundles map[string]*legacyBundle err := yaml.Unmarshal(data, &bundles) c.Assert(err, gc.IsNil) for _, b := range bundles { if len(b.Relations) == 0 { b.Relations = nil } } var allInterface interface{} err = yaml.Unmarshal(data, &allInterface) c.Assert(err, gc.IsNil) all, ok := allInterface.(map[interface{}]interface{}) c.Assert(ok, gc.Equals, true) for _, b := range all { b := ymap(b) // Remove empty relations line. if rels, ok := b["relations"].([]interface{}); ok && len(rels) == 0 { delete(b, "relations") } // Convert all annotation values to strings. Strictly // speaking this means that the bundles are // non-reversible, but juju converts annotations to // string anyway, so it doesn't matter. for _, svc := range ymap(b["services"]) { svc := ymap(svc) annot := ymap(svc["annotations"]) for key, val := range annot { if _, ok := val.(string); !ok { annot[key] = fmt.Sprint(val) } } } } data1, err := yaml.Marshal(bundles) c.Assert(err, gc.IsNil) var all1 interface{} err = yaml.Unmarshal(data1, &all1) c.Assert(err, gc.IsNil) c.Assert(all1, jc.DeepEquals, all) } // ymap returns the default form of a map // when unmarshaled by YAML. func ymap(v interface{}) map[interface{}]interface{} { if v == nil { return nil } return v.(map[interface{}]interface{}) } // doAllBundles calls the given function for each bundle // in all the available test bundles. func doAllBundles(c *gc.C, f func(c *gc.C, id string, data []byte)) { a := openAllBundles() defer a.Close() for { title, data, err := a.readSection() if len(data) > 0 { f(c, title, data) } if err != nil { c.Assert(errgo.Cause(err), gc.Equals, io.EOF) break } } } type allBundles struct { file *os.File r *bufio.Reader } func openAllBundles() *allBundles { f, err := os.Open("allbundles.txt.gz") if err != nil { log.Fatal(err) } gzr, err := gzip.NewReader(f) if err != nil { log.Fatal(err) } r := bufio.NewReader(gzr) return &allBundles{ file: f, r: r, } } func (a *allBundles) Close() error { return a.file.Close() } // sectionMarker delimits a section in the bundles file. // Note that no bundles contain non-ASCII characters // so the first byte of this string is a sufficient // sentinel. const sectionMarker = "¶ " func (a *allBundles) readSection() (title string, data []byte, err error) { title, err = a.r.ReadString('\n') if err != nil { return "", nil, err } if !strings.HasPrefix(title, sectionMarker) || !strings.HasSuffix(title, "\n") { return "", nil, fmt.Errorf("invalid title line %q", title) } title = strings.TrimPrefix(title, sectionMarker) title = strings.TrimSuffix(title, "\n") for { c, err := a.r.ReadByte() switch { case err == io.EOF: return title, data, nil case err != nil: return "", nil, err case c == sectionMarker[0]: a.r.UnreadByte() return title, data, nil } data = append(data, c) } } func parseBundle(s string) *legacyBundle { var b *legacyBundle err := yaml.Unmarshal(unbeautify(s), &b) if err != nil { panic(fmt.Errorf("cannot unmarshal %q: %v", s, err)) } return b } // indentReplacer deletes tabs and | beautifier characters. var indentReplacer = strings.NewReplacer("\t", "", "|", "") // unbeautify strip the tabs and | characters that // we use to make the tests look nicer. func unbeautify(s string) []byte { return []byte(indentReplacer.Replace(s)) } func noCharms(id *charm.URL) (*charm.Meta, error) { return nil, fmt.Errorf("charm %q not found", id) } func getCharm(id *charm.URL) (charm.Charm, error) { charmDataCacheMutex.Lock() defer charmDataCacheMutex.Unlock() if m, ok := charmDataCache[id.String()]; ok || !*updateCharms { if m == nil { return nil, fmt.Errorf("charm %q not found in cache", id) } return m, nil } log.Printf("getting %s", id) ch, err := charmrepo.LegacyStore.Get(id) if err != nil { charmDataCache[id.String()] = nil return nil, err } chData := &charmData{ Meta_: ch.Meta(), Config_: ch.Config(), Metrics_: ch.Metrics(), } charmDataCache[id.String()] = chData return chData, nil } type charmData struct { Meta_ *charm.Meta `json:"Meta"` Config_ *charm.Config `json:"Config"` Metrics_ *charm.Metrics `json:"Metrics"` } func (c *charmData) Meta() *charm.Meta { return c.Meta_ } func (c *charmData) Metrics() *charm.Metrics { return c.Metrics_ } func (c *charmData) Config() *charm.Config { return c.Config_ } func (c *charmData) Actions() *charm.Actions { return nil } func (c *charmData) Revision() int { return 0 } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/migratebundle/migrate.go0000664000175000017500000002131412672604507026007 0ustar marcomarcopackage migratebundle // import "gopkg.in/juju/charmrepo.v2-unstable/migratebundle" import ( "fmt" "strings" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/yaml.v1" ) // legacyBundle represents an old-style bundle. type legacyBundle struct { Series string `yaml:",omitempty"` Inherits interface{} `yaml:",omitempty"` // string or []string Services map[string]*legacyService // A relation can be in one of two styles: // ["r1", "r2"] or ["r1", ["r2", "r3", ...]] Relations []interface{} `yaml:",omitempty"` // []string or []interface{}{"", []string{...}} Overrides map[string]interface{} `yaml:",omitempty"` Tags []string `yaml:",omitempty"` } // legacyService represents a service from a legacy bundle. type legacyService struct { Charm string `yaml:",omitempty"` Branch string `yaml:",omitempty"` NumUnits *int `yaml:"num_units,omitempty"` Constraints string `yaml:",omitempty"` Expose bool `yaml:",omitempty"` Annotations map[string]string `yaml:",omitempty"` To interface{} `yaml:",omitempty"` Options map[string]interface{} `yaml:",omitempty"` // Spurious fields, used by existing bundles but not // valid in the specification. Kept here so that // the reversability tests can work. Name string `yaml:",omitempty"` Exposed bool `yaml:",omitempty"` Local string `yaml:",omitempty"` } // Migrate parses the old-style bundles.yaml file in bundlesYAML // and returns a map containing an entry for each bundle // found in that basket, keyed by the name of the bundle. // // It performs the following changes: // // - Any inheritance is expanded. // // - when a "to" placement directive refers to machine 0, // an explicit machines section is added. Also, convert // it to a slice. // // - If the charm URL is not specified, it is taken from the // service name. // // - num_units is renamed to numunits, and set to 1 if omitted. // // - A relation clause with multiple targets is expanded // into multiple relation clauses. // // The isSubordinate argument is used to find out whether a charm is a subordinate. func Migrate(bundlesYAML []byte, isSubordinate func(id *charm.URL) (bool, error)) (map[string]*charm.BundleData, error) { var bundles map[string]*legacyBundle if err := yaml.Unmarshal(bundlesYAML, &bundles); err != nil { return nil, errgo.Notef(err, "cannot parse legacy bundle") } // First expand any inherits clauses. newBundles := make(map[string]*charm.BundleData) for name, bundle := range bundles { bundle, err := inherit(bundle, bundles) if err != nil { return nil, errgo.Notef(err, "bundle inheritance failed for %q", name) } newBundle, err := migrate(bundle, isSubordinate) if err != nil { return nil, errgo.Notef(err, "bundle migration failed for %q", name) } newBundles[name] = newBundle } return newBundles, nil } func migrate(b *legacyBundle, isSubordinate func(id *charm.URL) (bool, error)) (*charm.BundleData, error) { data := &charm.BundleData{ Services: make(map[string]*charm.ServiceSpec), Series: b.Series, Machines: make(map[string]*charm.MachineSpec), Tags: b.Tags, } for name, svc := range b.Services { if svc == nil { svc = new(legacyService) } charmId := svc.Charm if charmId == "" { charmId = name } numUnits := 0 if svc.NumUnits != nil { numUnits = *svc.NumUnits } else { id, err := charm.ParseURL(charmId) if err != nil { return nil, errgo.Mask(err) } isSub, err := isSubordinate(id) if err != nil { return nil, errgo.Notef(err, "cannot get subordinate status for bundle charm %v", id) } if !isSub { numUnits = 1 } } newSvc := &charm.ServiceSpec{ Charm: charmId, NumUnits: numUnits, Expose: svc.Expose, Options: svc.Options, Annotations: svc.Annotations, Constraints: svc.Constraints, } if svc.To != nil { to, err := stringList(svc.To) if err != nil { return nil, errgo.Notef(err, "bad 'to' placement clause") } // The old syntax differs from the new one only in that // the lxc:foo=0 becomes lxc:foo/0 in the new syntax. for i, p := range to { to[i] = strings.Replace(p, "=", "/", 1) place, err := charm.ParsePlacement(to[i]) if err != nil { return nil, errgo.Notef(err, "cannot parse 'to' placment clause %q", p) } if place.Machine != "" { data.Machines[place.Machine] = new(charm.MachineSpec) } } newSvc.To = to } data.Services[name] = newSvc } var err error data.Relations, err = expandRelations(b.Relations) if err != nil { return nil, errgo.Notef(err, "cannot expand relations") } if len(data.Machines) == 0 { data.Machines = nil } return data, nil } // expandRelations expands any relations that are // in the form [r1, [r2, r3, ...]] into the form [r1, r2], [r1, r3], .... func expandRelations(relations []interface{}) ([][]string, error) { var newRelations [][]string for _, rel := range relations { rel, ok := rel.([]interface{}) if !ok || len(rel) != 2 { return nil, errgo.Newf("unexpected relation clause %#v", rel) } ep0, ok := rel[0].(string) if !ok { return nil, errgo.Newf("first relation endpoint is %#v not string", rel[0]) } if ep1, ok := rel[1].(string); ok { newRelations = append(newRelations, []string{ep0, ep1}) continue } eps, ok := rel[1].([]interface{}) if !ok { return nil, errgo.Newf("second relation endpoint is %#v not list or string", rel[1]) } for _, ep1 := range eps { ep1, ok := ep1.(string) if !ok { return nil, errgo.Newf("relation list member is not string") } newRelations = append(newRelations, []string{ep0, ep1}) } } return newRelations, nil } // inherit adds any inherited attributes to the given bundle b. It does // not modify b, returning a new bundle if necessary. // // The bundles map holds all the bundles from the basket (the possible // bundles that can be inherited from). func inherit(b *legacyBundle, bundles map[string]*legacyBundle) (*legacyBundle, error) { if b.Inherits == nil { return b, nil } inheritsList, err := stringList(b.Inherits) if err != nil { return nil, errgo.Notef(err, "bad inherits clause") } if len(inheritsList) == 0 { return b, nil } if len(inheritsList) > 1 { return nil, errgo.Newf("multiple inheritance not supported") } inherits := inheritsList[0] from := bundles[inherits] if from == nil { return nil, errgo.Newf("inherited-from bundle %q not found", inherits) } if from.Inherits != nil { return nil, errgo.Newf("only a single level of inheritance is supported") } // Make a generic copy of both the base and target bundles, // so we can apply inheritance regardless of Go types. var target map[interface{}]interface{} err = yamlCopy(&target, from) if err != nil { return nil, errgo.Notef(err, "copy target") } var source map[interface{}]interface{} err = yamlCopy(&source, b) if err != nil { return nil, errgo.Notef(err, "copy source") } // Apply the inherited attributes. copyOnto(target, source, true) // Convert back to Go types. var newb legacyBundle err = yamlCopy(&newb, target) if err != nil { return nil, errgo.Notef(err, "copy result") } return &newb, nil } func stringList(v interface{}) ([]string, error) { switch v := v.(type) { case string: return []string{v}, nil case int, float64: // Numbers are casually used as strings; allow that. return []string{fmt.Sprint(v)}, nil case []interface{}: r := make([]string, len(v)) for i, elem := range v { switch elem := elem.(type) { case string: r[i] = elem case float64, int: // Numbers are casually used as strings; allow that. r[i] = fmt.Sprint(elem) default: return nil, errgo.Newf("got %#v, expected string", elem) } } return r, nil } return nil, errgo.Newf("got %#v, expected string", v) } // yamlCopy copies the source value into the value // pointed to by the target value by marshaling // and unmarshaling YAML. func yamlCopy(target, source interface{}) error { data, err := yaml.Marshal(source) if err != nil { return errgo.Notef(err, "marshal copy") } if err := yaml.Unmarshal(data, target); err != nil { return errgo.Notef(err, "unmarshal copy") } return nil } // copyOnto copies the source onto the target, // preserving any of the source that is not present // in the target. func copyOnto(target, source map[interface{}]interface{}, isRoot bool) { for key, val := range source { if key == "inherits" && isRoot { continue } switch val := val.(type) { case map[interface{}]interface{}: if targetVal, ok := target[key].(map[interface{}]interface{}); ok { copyOnto(targetVal, val, false) } else { target[key] = val } default: target[key] = val } } } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/migratebundle/allbundles.txt.gz0000664000175000017500000005142212672604507027340 0ustar marcomarco‹ü4$Tì<ÙN#É–ïõ–¥yUfÅ’+R?`ƒ) Û`lðôåF:!·ÊÅ`æ³æî—͉\ì´ 㪾RšRUÙ'Nœ}‰Œä_ÿÛ²Ò£ÿyö’gßíofÚ¾ó-KŒ0µ"Û,?O3'ùbùQn}iµR'™{–“²Ï­VøX}hµ¬™‘G­6à‹ÇòRçÌ R»šóà!½,=jájÈÃ(32/ —XZ­¶›{Âk0i‹š"©QBUJ–¨* ’)i Vt]Æ´Zr°IÞr¢Ff€5ƒ?n¢O¿Õœ,WÁ§<|æð¤UCQ¼ÁÆyö'þQë[šÌ¿uËÃužÎÄ ––PQž1°Ô{s˜ £xÑÞOHª®Š2O0XSEU£)š„UU—+ɸFèúž±Cm„ îP]1åïG#ƈ¤˜J Eˆè\ze¢ðÙ®© ™Îö šÁ ˜C:Ú›X]ĺ†t‰J*V·É˺¤ ¬SªiªÆ Ço¢àO›9ü­‘ c =j1K^-Lœ¹“fKf ËP¬* e¤`µÉœ}Ú™ q”"ïf²YâvºÛKhô mG' ×vJYÍŒ8‰^»,§°þI/z4r?{ð#ð%×LÃ7°ÓšeYünj‰°Í¦É×– ƒaîûíwð‰“•Aƒ¾›Ê¼ÀdÊpjs§Eü|mY¾ç„YK®¾•v^|ýZ')çV›9!3T ˜/|%J>¸¤šs“(jÁoÎÒjc¢Šþà–A`z6Fp t±½¹>0^#G- éÊæäÜs²-d¥13ɈÖs#3.çsÈ{ªW’x0|?zqƒg3g¬éýF :6Òô%J dÅAI8\(pnà‡7‚Ù§ ùÇÄIgMÍ7`ÝeìÞ¤}gˆ 뼺Ø"yxàˆÀá*,R[¨¿<¬íóPþ.¡fQ ýÙF…lПíædɬ†šcK/ø/ÐÔ­N‹% Œ©ø¿k8'YUêg"û3\GºH­ÌoDÍ_“”tN`‘PivÁ"ýáïLH @ Ú粑é…à"Âc”‹”—ƒ»Ó“Æ4xÇsŠÙÀ62ÈÛY=­Æ4ˆoÌY<”%ÑjÑÌ`;ÛƒÃ9Ù ­MBBM³*bËR#¼·A Â*o¦i$ðè$¬ÎKÁHYîpB× ÙùIg ñ*YEâæ%˜æt¶ˆ Ö~ïõy31m™%Ìof'vè ¾3w|˜NGH{Ëé¹W®ÜÈõl°Çš4þ²,…IéÒíÒV ×Î#ÿØÔ?6µMq.¶^õe­UÛ³ê* îýΪÂnÌkêú)Ž“Ô)¾¬-¬Q¾8f õhcièJÓn¬+¢g`°Ý6Bî‚ ™m¶ëì‘v8Ðé Ö̳àoÿ.;²tZQì|©þÿ²“›87˺ kö\Ð.÷«@„]íÖšb™J ’êzÚy£vÈ’ÜÙOÅP))¼n¼>©ŠK–>HE+(A!{ž‚ó‰‘D+Ú#*QGÑT‘jTÑUéª"+ímY‹œ)t©éKåø–ÞÛ5’E­hÛ ¢/E fÏ=Èu5ô¼»Ûn³•5ò ¾‘dN(ïê­\È;yÙW¤’ªˆ+M•ÙaO¢ŠDEDô4:¦²Tëpãœh‡¬¡=à ЦŠX•4ISu‰ÑÇ5EèÒE™REA’¬K’¤¬Ÿ¹íAÕ¾f¸4b*J sCº.+2â» f§ª*•UI¥S¥:Žƒd”¡ì$s $pó.ïŒ/M“ªë c´MTî‘  +ŠH%¬«ªJ¢Kõ™`ijÐRé«V¼€ä6â{Óª$²à#!¢Q så¬È¢J1k ‘%­ÒmIÁ¡4îG‘&yH“¨Œdm‹Í*ˆšj ¸XåöÐü¹‘mî¬K+*ë6ò€l$°SmªB‚ŒªH‘—tŠÁÖ%¬IQu&]B½EѳãĬÑÞNûH ;ˆÿˆR]G"ØÕdIDægQI'¢®ª!ªUW[¬ •¯À àÝæZ€ñd¼¿µJˆJÑ$D% DU¹„RYBC¥ |¢!yΉkJ÷(–DMà +$«ë:7(ª¨CÖ×Àt Y£¦Ó›C-^ôò»Édp<µï/OŠl†@FPøÎ!ê $I•0Á¨&ôW4õ R!¢ÂtÍr¹*¾A‘% ]#ä^ ªÜ”dàÀQ²;ço—åž„Bx!ðÃ,é`|ÜÇBì¦Ùü) …h„Š4ɺ ±“WYìUtjЬ+ Ô5’?îÂp?g¬‚"ì J"c°Æµ¢³KÑ X³ 6*cµ¢Ü*üzgñR ÜÂ·Ò µ3ƒ¤ÌÉ–Ú…B1– áLatkåÿḵrç°‡xͪþ0 k…Ëa(VæŽåkð‡?×ESþ¿íwö:K,Ãm¿~U‹%ŽË©Ì£ ¹5ªŽÆWëÝü`Þ{¯ÔG¦ÖÜöS+?6²&êâŠ)ÞúF­óI¦ 7èCçj.^;N)Kšzh[ן„fÒ¢öhÐÕØ»iœœå¿X—M‰5 v³‚5¾x(eÂÑZ5´BÒæÀìGÉ’ƒxiRzX–[–"ÙÀ{=íK#c·Ñ¾ìsWOq¼¼óT>p\ï(tj˜Ÿ;<¸wòÔ_Üys.x†[<ÿ9BiÝ>¶}SˆÙ, 7e¹šøõW"v?j?à¼ÈšHdÍÝrfFØÁ¥Å%´tŠÜ³‹§”#…jâo ‹ R¡T eQ±¶U 4µ¬;'šŽ”ºFo—êì'ðâhgc³ÉíŶ[ðKêzìvÆCu¿híŽE1Y—@{Ûx¡}‰Jº®Ê²&ë\“WTÚ']Ö¨*±g [Qî<®`Vçºÿ¶v}•€¶À®¡§Ó¸QT**²¬êº)Ê:®èÓÏ{$ÞQÅv-'¡¶; ßo²ºša4'˜l^˜¹;¡½& ³’æ\HîÛ.¤òU¯ˆ„Hº$#E£[TOUMsª2ö¸êãÿMOŠDS°Nµ"äp{w±8³SM–¡+®|ò/~r£è’H(¡:b<èñmôêTÃÄM¹¢-1㨪ô9gPìh€5CjYon»H³DûþͲbß¶v­lã XÖB¬^=ßÙ¾ +×'câjsió¦†`ØþìÙæ‡fcσ:ÕuŽB±ÒÇ¡Ë+S8ìÕ”ÚP~ù1i™ø‹ä¹ûD¨ÊbÛ̱ÄÓýËÏbÊÕFÁ oñzˆ<*Þjk.ÝHM P½ÂÜÀ±Á®›ë«ºýxÉÉŸ“ó>‹wØÏË?ðÞ=0lWõ‹wúÎ>äo Õ•4Ó`:¥4[^M‹YüÊo_Êo¼ë‡ÅÅKnâ^ú¼Ôó#÷l¡é;o%¾/_ª÷Iq­¿~½áSÅ–¦Q‡ê\Æ—°x©k’¨ÀŠ×º´vy×7?¸Tò¶jöbŽóKƒ­o7”zÿRCýJÞ/4°1f3ì  â z hU/ýÔ£õÛyâÕC3f‹ì‚rq¥Ô¢Â¬ž«Þø©ßðQØk<›sÕ>˹W)jð¢Uhy!ĥ⽡z<ñ·HýíÌ£%¯^¹ø° g¯:CÖRþõ›Ú2d…]-“rè!‡ú3}<–tYT¡Í”U ªVÌ5b‰j"z¡”UT]aeF¼Ímk2K"¬™—]ͺ§}ΪíÜ/ÒÁ5 J}I Þ+¿=0»ª>ÖþÝJS¿5ƒ¶BÅšÔ/•&/Æ 0´üy 2g†_ L2'ˆ¡foˆö¦±^ãûÉëÀ¤WhÐÓŸ§“éÌž¼¢»·Hº<‘¾ºÇî wå[áÔ·žå™9¹ušº/ì`¼0ƒ^6½éèè„=/¬@_Œúz8è^«Ãq§3D³Þzýãë§7ãsý‚fêà¬Â”8ïPfœ]#«‹aü|nyrj«Ây…-z=7o_çÓÅñ³ƒzÃáíÕàz¬_Žýx}êß<Þ誳è¼MïÎ üÍï'Øœaýb2CöÙqv?¹zšÞ]½] GçÉ$Ø3&²?=®èîûÓ¾¿˜N®ÐhÒC÷Äu­>ð7éåÓ‰ ²‘æß»Òï7È>»~±Þ¢ù>æç'§ßÿèêãîÙ“^zÓ×ßle÷éu}¬ÛyœöÇÁýÝ8µ{šru2|ûÃ\FÏm%ßQ)_.o]èîvFÕxgÚDZéuètržÝß]!;¶'çØ›Dö/‚«¹ù«ôtv™þ…I¹ 8*ZNlØd{ ºsÏϲÎíiïäætü8:ÕÏîÐõùmˆæ\û¹Á¥\xvO`OzYàôæÉylãçjÀm_N料ÙíÐ{G·’ÙM<y.Í Û‘}vßÓa6>;Ÿ™áÕ‹IÏÑÅøÊ¿§ã…qw-ºLN.؆íÿ1àßGƒ—Ë“c¯Æw‹¯.¯'rÈìdpªçµ¼oÑëd|mÝÎÌ쿸€w4:»_ º˜Í“Ë—ŽÛ³ó¹sæ÷ÌÀ>nÝYúÕht:›› ‡!ñ_  }DÏŸ§wƒ ˜«ØêÏ|Ë¿Â@·¶zrûÜœž*ÃÛñèr4`ÿÁpwF§/Êà¶wzsͮdzáï^ïr„-çW£hÆè? à»<ØïÝ0õ/n:ñÔë¼Yäõñžé¥ÿ:¿'=dLô|Ðë ¾O@ç0>Ùüôv›ýÛ‚Ï 2F÷ p¼Ì g—¥žéUzÇô^ƒ?_Íì¾?eñb)Ÿa±§6?¾˜Þù}cò꺺o÷/ç Çí€Î 6õox3ß™4he>Qòã›`+ÚŸ@Žà7™o?›z†îä>8»[»½Xù¡|ôM œ™Eä™AÆscò’CüȶãÚ+¼MœجÀüMñ=+öèâ7ð“d:Ñc³+'Ó»çïÎâ6ôÇ9Ĭ àÓÕóä“ûZæÑýÄg>ôtüÉ=¿CM›b:‰§Ïæý!™ÍÀÎß˽ÔÝ[ ÞÌžþdô{ ¯-r§wì¯òÁõü-c1ô6Ðçv÷ðž“m±Ó¦6½ çs“¿W²c¾&9ãsߺ3]½ úz0­}´ô-€—ññvø‡.ðû}©ûÓYl‚¾¯ûñ |p:îMnC ü±ŒE~^æñþ ôTÇzˆ½æ¢ƒ€o‡˜uvéBÜñ—øÏ:;ÀÇ«\Ä«»;~¾¨i ƯödüfŸÎüû Ø›ÇbùÐtqb‹aw‡ãÅ}™äÅ„ ëî`™›*ÞÓI¯ˆ@3ä$7»¶Io1eûÙšcx_Ű`L­À{[!ÀãMgnº«œgS°·þøÆfz_渡{ôÒû‰ 9s \,ôg«ß{c¾¹ÍàãtÔCÜ·g¥Æ®ö"•²‚RѦ”ûXÚ (&åÅÙºðj´^¥¿ÃZú¹æÊ°m±Wå‹$ª÷­YÇQ!öÃ…Îäkõ5{«?ÙÎü«™{¾-8i å½gø_}ÏŒ3ð©èàËÒ¿z |²üœúf_VÒž×÷¬B´@I6«k¾%@ÉåòwVù1Rÿ³½ Q½ù¿ÖMUS©“eÀPº,(ëw€ÐžÌ¶à`S¬©Rš¿£àM%ÔËß{DDes®øUDÅ/ZH¾}s½l–›¢ßL#ÉFݪIÿ?ö®­7ŽY¿Ï¯ôb`Ïv›÷‹?$Y'88ÞœMâEa4Ó–Ç–f&3’%çáüöSdß»‹œž‹ä즃@²šdw‘,«ŠÅ¯R(h¶{“ùƒËËõb}é§Ð¹æóôîñ®¡Rçݽ\,?ä—X‹±. ^^BÑâîòTíªÖoᢵ?»ž™%w/‚%>Üì¡©¤DS.ˆ:Ћ‰Pa`ik`E•·»ö9^§˜ûž?UŸÎÀ<ß:¸7ìðâ—Ÿ¦whQ½¶ô¬ä5S(jœRw±Hj51R*¸Äâ.bɧcÊ!8•!ø.Èht[œ5ÝhCÀ áaFžÝ A±)“VQØ,´Áp©¡©2TJN8ÓÒJ<ú)vúŒä,vºâ›”N®¯<÷úÀEý“›¼¸õÜ7ËY9ê)ü¥éßýR¨Ñ:BbÙë^³¢ z*t> Þß½Z\»ƒ²Ä/ø°C¢ÛÞ}~ù~¾NªXâIë¯ä·ûÕæþsŸž·*â' Óítù¤ö¡#b€Íh "”÷(!©À=HŠ¥]ÖúåׯðäT•£ó~Ü® ÒûUåI†HËdêÐ 'Í?.úÉT,z_*sCˆJX`Ê0Í-“:t-D•*PÄ”¶D [Å¥M× |ž ï 5ÍVÀŽå\U GË]H‡±.¨ÊX÷4¢ƒ»˜"ª#|AËp2‡5ñ8˜XWùJµ"îJ •\0É%ÎO.zÇÁ+Á-1ªŒ<)–éPR;¢%²?×$x@!e€iTîTÄÿ€y:Ïn?7µªsD¸p72 çÌíΟ#ê.¡J­ŒÑndy›ûÜ|’ °½&+i ½a¨*úV²}΃ˆtó´jUj80Õ’†#1ºK«œZk4W ¿äQ 8â‘Pl!‹vƒoP²O>*ÄJÝ(_Nö4‹’ò}ýª1ºÈ,=kÒÒ(Áeå-˜X«»ì*+%åj-³ÍzRüNnA‘¾Þ`ûƾg G,k.qn®îv–ä¸(Û©&ê*…²]A'Êgmr"xu”HQójsÿ{ö±œÉÙôc¶~¿ž¿‹cÔ‹>+t±~‹ðÔªCþæÀÈå§äÁÖ¾o‹/ÄVE¡åÖk30Pkâ~xÊÛéhŠÃ‡ÚuzáñõJ-À¿ÜœèÃ>>¬î7ËéÍÎ %|T§¨Pai=¾kœ©A+óÔÂÄê)”ç~.á%½Þ›W+¬—"ö%{ïK6YUvþ9ÛžÇÆ&ˆÍt¾Ú^? Œ¢æ^²®?F=BÐõê×›£Æz-&Äíšçše':µ‹Çy |ug7'íXÇ*¥ÿÙ½ÈëŠñ;o `^%¢™Dtžjèò—\–#XióÕÃòf5§­Ž¤«ÍuñÝö¯„§`/¥wÓMzýû9þö"‚ÙÙzç™ÉÌ\(+ŒñLHy5×3NÄüêÝü ´ùÐ+î—Îéz9_l< Îç‹2¢*'ÓI‘ݤÛ © Ê ‹N@Yi þ€Òøb›”êq;2,FM«æ¦oœ"teU£•;°ƒmÍâÁÑpe£r­=dK‡×÷'GèÁ·Gðm|{ÄÕqµG\íW{\í2{„7o<¢aìr:vùwºîßs à_w(Äáwüƒâyu$èTªÃ@§ó¿…áþºé l8ê/lŸ(dv#Ðé„x>®„iU–Ô#¨¡&GÒŠĽ‹ˆŒ 1˜ "ƒTt·"””÷Ð 3ŒfyR–” à@…~0)¡Qaº*™O[ŒÿóÉúæÞ¡_ÕÇùc6ô–0B–&,@–°¶{n_/ãrúx‹r8qq€tÚ%/N"ÇÊÞ%I—¼|ZüÏóI­:]ˆ§µsÓ¬&ù/ìÜÍÁg±Hx‚/?â°-á<4£R” ï€ÇP"z¡"yÕãè ­A^&Ph,†”ý”‘]„5¦±KNgú ªòÇœ•¢~Å„–¡zçù XE|¤ö$mhà$)QEýœ2îæsüy€ÆúãW?ç:”Z¦Âg¹m& C„²0FhÕØ•ÞNÁ`Fp ™såzO}«F(ßë A¸A”øO]´9¬~A±f«Õf¾X:ø¿Þ+.Ü×›ßôÄô ÛßÖ&ܨŒöÀ¤eÖ ÿ&¨ë‚ë«•nàyDIPE(™ÿÆ<[߬>ã`òQâʆç^.@× ‚Љ<ÁX †)¢$ ëÚBPF!÷ ‘ ‚nþø@ßâ[{´¿fË‹–-W%~ôTô§±´Ÿû»Kî{üï¥Ë˜›Ý…·°@ÄK$!촭Ȣߓ¨ª'ïd.‚§TÂKÆø°Ú\Wqlâv¶\MÝ“;<†­…Ì]bŒÄtë¼z¢tgŒª•)UÄ…ÓÂGcëµ°©µFc\8#©ÒAäD’  tx²ÍS¡·ÚX÷'UÒÔÅ›jŒU´Ìl5H›‹" Ñçþw¨>C2*.}„Â@Á`%ÚP(WTJ!5îÒé·üÈé"à ³ ARåó¨Ð”2¡«,;R†4íÄ£ÒFBSJ`¨wYðŒ!žL#¬TÂ]àC2¯R!RJ ¢ ש2ÄíÀݪL–GæQ;*ÙÀ#BÈÉRªÔák)C©¬Ó…É Èœ†¹Âà¦ß܃jpûfuýmkúË lúðððÒåô~9»ýayç‘Nÿâ@M_üzÞoüT‚‹3×`Ÿq¡"΃̲T2 óÉ%i$\Í-«Øä5’˜Gaw¥¹"´‘Tª})¬}ÊÑ.,7ëhþ WŽJÖÁ©HaÆå  s§>Ÿ¥&–1ßâœy?'zÅg°tT°R©)²`’`šê\y gˆm¤-meïUŠGÞP"3î’òÈu^­•Ü‚ ’nÖƒÛyo»ÎKbÛüý²Ù¼“Ð Ç#éåüŠ'è:<§X8KUƒf4h¶.Ηm£i±Ò ˶ÑÊ/³öó-šßÆÍ6î]Þï• íP[ Û±ªR£a“1/Z쌾Â×zø~=›MʳsDÑsåhà÷ãÃtyÍ4åUÌ·«Š&.ý]‡@ö¡ç¤Vu–K o6ÖÝ2Öc°‚ß-®·ŸÐÀòJ¶ ³Ñ\¼¨jëàê·Î»Y}g¨¸–ÌÀ>û0¡„ñÚ]B»ÁÖ{·@üœ>q_ò Þ²O‘$¬KX­²œYù…ÞšF_Ð{m§îËÐ;SLr’jªYdw‡ýzCAí œÀÏVoøsôfh03F¤ÂÁáSî©á”ÈF^hOiT“Šöe0ëäk€‹‹w裔§Zʶ5F‘Ýæh±MÙ_Ì ’î¶Õ"™½»n´ªGõ¯PËî·èÎou[°V‹A¢¯[¸½bŒ÷ã=Fðïñô nŒ÷ã=úl1Æ{Œñc¼Çï1Æ{ŒÊ[þßï1²Ëïñˆ÷ÈSR øhå.¦vÖÑcŽW˜E1”¨,ê^)±›=CÐAXÑòèéy£˜±xXƒÐaÐúPŽ¿[Œ™OF—ëèr]®£ËuTÞòÿF—ëÈ.£Ëõàr]=,g7«ûù$Μ­e•Øú)ëDÖûêá¤FœŠZ•Wí§¢G˜„¶Ò‘$ÞåÀíDy'·ëéæc,pøÜ׈¹nŠƒ¼~ÌŒÜ9Dè”*¥˜Ìï<à×K”L­ÒPÁýO(áèr"W#ž€bÊEª³Œ(c6Rj!1a§:8·Í.ô]t©Wew²‹)M Ëý|Cíi8FGè©§ôé©=Åtžt6ѵë—ÿ»ü±ùþ#ÛÜþzý+IY•دíÑ]î•÷_üÕû6ÚfÔÊþªQt4Íþ\¦Ù“±Aû­\Ø{XoùW™m“ž³¸“Rh÷fs ì0x°íS“Ô‚R/”rhkßí-Kp0 \©Ê4ÃRǺm^kxò LýZ¹®0¢55ÏML%QšQ®AC´eÆ÷ŧ,6ô.gJ ¨f§‹^²FÎpØÕ]ª&üDÊ8@6«„d°ý“Ò‘ˆçÒ:ŒO0”Œ)˜I9UTV žç™(R¢ ÓÆeb¢Œ†žâF÷ÃêêÎÁ´ErÚt«Ôͪ3 =e?!S5#ug²¢”“n¢óù:©2«¾¼ù¹´ž%¿Ý¯6÷hãóVÅAGxíÃÖ~ðÚ/±‚héx¼JÞæ*¡Xü¢í7)i-¥-ÐêÓÓÖOgèÈ,·W÷ÛX²-ldò7ãó7RƒlàZ[­`C’øq„±)·V(¨&”±Õéÿ|,÷û“Ðl‰N3 MS’Ô×ÚFWj‰$V‚@cˆ>? âæâP.” ‰’'£‚r£pkQ¥Ò2i…  µ &·xæ"ÿ«yþؘfGÒ+Äò(·zfbn~Ïw ^P…¼ò›`žû}ŠŸŒ®n×÷wY²Ó5T¿1fít¹)Ôå&-HØ3†VîúÃ’Æœ'M~èÐ(]ÖI.Ád¢ÄàZB /0€~ÃÛ1Än¶Ž`êÒ¬ k©Ö ‚ýfLc–¾ Bj M“Ò=ôyºY&ÞâAóTaž4KadL.—uH!évÓIvÔ)PÁê%ƒæ:,Hg ì}%œ,pXÕ>]¤‰i;æ¢2§¦©P¢&½'¨¬©k uDÇ.# [¹¹Îh—Ïì 0G$° ºÍ0aR°UXV5%ss¤·_¶Š¢ç(ˆ*ØÕ©th‹Ö„”íYÈ_Ûƒu^¶cõl[ËgOÞsÍ<ã]º?/GÆï)¯±ÝÝe¿OÚ¦ZaZtþÎc¬aÁ¨í]ƒmpÍŸéTHË)(L‹Ò¤¶ÿ?Ù*åÒ€öbÀl¡U™©Ä¥a–šI¡ª´ßs? ƒÉuu§ÕÙzÖLÀå`à.9œÖ áp¥ãäz‚g듨0åËöUÉ-Ë<†;#=y$÷<ˆA°L]À…ÔÆàv,qy8±JKFmiÅ}Ì­á)q§öJ€zM)êâ@©=u÷P¼^\OÞ¯6w«¥3зîïd»º¹w´ü ]%]£\|H ŠakuÊ4H<—JÅVà_ SR§\Q.]6^!ït8@ŠT3ØX$ü¨êý ‡ ØE¦›£cðp€ïR z…àRÍÊpÝIÛ‘Üö9I_ÒT<½Ôv:Njï»K°ÿè°…vàJ¸c¡9î¼U†¦°5îtG«/· … dcZ§„¸”-ŒpÇÚÿÚÎJPáÃb³¨nr¬–Î ¹ÙÄæóƒÀpD㕪JÇl…Û uÊ‚ó’f¹¾YÝÔDù ü>D«B˜oã̲l“ÕÝçÝ]”Í}uí{§þÁ9Me?Q»O.+–»™dA=u(û¼Û¿ ’>ØÁ²Æ³÷к3oÎA·ÁdG†‹þœ­Ö™»ï?ûîTUåÙ{å¡»/‹%Ì×Ñn—³¬‘8¡îåCvÕ\,ÝúâçïɽÓFëÙíÞQâêXÛùÙ«ôÍewsãÁsb ¯¨òüdJíäL+êp-ãuò—Fß]ßFz¥Ïß1kŒã;- ¡É,ç_ j¯VLYDfú#6u¾Â™”ᲟvR‡ ZStQËšæŽÙ•2³Ï[0eš[pû=íd"í×”eXÆ‘ö[êeƒ¼¦*ìVk¿È ŽãéÉ_s:‚ürÁ^âû]bMÎÑrõiŠŽlEæñ“T÷øîºÆ‘O0%õ HŠ_ÿ¥r…RRÛ¨‚¿eàôvª(Ò@WW³Í™øÓÆU£2Æ3Œñ 8c<ÃÏpÚx†®ÐñÙÆ‡€£9ÒOš#=àëâL÷¶zÎ#irýôï*¯æ¹ƒ%;ÎôÓÍ´; !íü½Y76¥†[f…à¼Ê轆iôÖÐ!WÍ-±©±ñLä —4Õœº#èbuáuðeÿÃ¥¸ó›”Rãbh£~?e´T¹¼›¥ƒ¥›Ó9ÊûÝÊGíø¬ï*’·\JwùBe(•Œ—7u;Y¦‡ì«¢ Wåå¥þª­Ú»p"ãü—/?ŽýðððÒÝ3x9»ßlà­î#/ÿ’ÂÏ¿ž÷¿ýìRÀojÏâ q¡"΃̂ÎÍ4Ì'—N­(×N‘ñ:2$y£îÌú¤ó²¸Ç 4¢ž'Ú§ÖŽ‘å¨d -ç 3ÂÐWq•@•&–±ÆEô=ñ´éQÒQÁ>HAs%¢u½¼MhJ¸Ó×È’{†‹ÐJÝ«J×J-M9ãLÉåÚ€e!¹A$ݬ·óÞv—Ķù!PÍæuQ¤—ξ.DzÙ7’ÝãͰöt÷]š;Ž,õ~Ù6š†sÛûeÖ~¾£Eó;Ð¸ÙÆ½Ë{k¤ ÛÙíX㤀ƨó¢ÅÎè+|!Ž–2Dèór¶Zg“â÷?rnúé‡7˜âWTŠ­£¢J4±[É@÷9BÁcJWÁ3«sL¤Š‚>Ç„õ‡x¬žÑ)7\ÙR^…úÿgïY›ã¶‘ü®_¡ÓÞ•?¬HA‚ªrÕÚN$ËñúïÅv”J©8ÎÚNæ!y|wûÛ¯›à›3#;›ãVm<"@7ýÝñ«Ã£u˜ì8)y]¦,«ws?†: ó‹&êg1Z×Äš‡ÒÚ÷d qæ¹AtäÓ0¬&u¨)ODÈ´ nÛu‰ïÖSÌkÐKa¤ÎtÉ1p%Aä[y»]Í.NŸ®W÷O_B ·ï¶ë‰>_fvV¸Ý ˜8T†©X¦ÆLÕŽ«Xg„;ºƒê-â8.‹I{‹ñlêÕCÔÞ `Úl˜¯v8b’”P V¤¯òJ3cÚ²™9K°l¦k„kíŠ-GÖÕ‰Ë O–#‡c‹òË6ˆ‹‰­œRx±lÿኇÿç¶Y¶ÔP †Ú(ï—šÌNi›ç‰»(mñB™ÌU7ëQæ©ÛXI(|/+ygá vµAøùìØ{¾˜˜eã×Ï–“ݬŠYË òÇ` -èè9GÇžsü©9G“OMúìñ¤ú©¿÷ïïýû{ÿ#Þû™—jïßßû÷÷þý½‘•þ?»ÈíïýûåÒßûrï_H÷]÷гuŽºÓ’}^ë‚™/3T©[™ÍãsóbbóôÍRáàÃ_na·€4fÅI\®ŽMŒüxGØEþÀ¾H´P$ÛŸƒÏ·³ ¢ù$Z:'å#±v©n[_PKÂ4g;`¤¹p²Å]g1¶2ÐaŽR Ÿ$Û}õ9’+ﻂ9¬`W[³]ð§æáåIMYa¢âãá,P³ìw‚ºÈ8Ë–®nsÓ¡˜gIåŒbcŠ\“cöÜôÒÖ[ÊãŒÿs²Ô@Ðùá}†WŒŽ¸0NÔÈRÛÖ-î2Ç5¹‹?¤ìƒrÓur('I"à ôûÜY>SÇ6tÌʨe2Êä! 1Œ®Í-f[Ô5¸\ÝÇÒ£-ªaS¤'C!2L¢(@²ðZh­ˆñžï ƒùN‡Â–êý1¨¥»„‡š®cp®ˆ'ãèÔaŽÍ¹M Î’ðgÑÀkB´vš,­ ŽÛéy±¯ÙÅY>+æ^rÀŨְˆÃ ˔Ś0D”`€oj3f›j!‘°IÐ\Ø}qqNoH–•XéË*‰m€¬(_¤%í•@+̈D±Q–žæq‘¼mlp è³Í÷Ùæûló}¶ù6˜+®væ @‡~°\f)TpÚµµ? Ù‚-<™ï0¸9ü^ÎÂÝNµ»ÚQëf_OÙ­^k×ZNt‹ï(ó+®(†·Ì K]*Bˆ™»ÝýS¢Uá)$Ï^ƒšpŠC]1ý[[-T#ÝtlLÀ˜Å=+f¢3 ;Û¤è¶ Ñ‹mÎtÛE)N Ï¢l–F=q‰Åxœ"ƯKЂºË°f-X¶’E8+KÍ—ê bXŽkQ±ˆÔ½·ZL×R«:A29ȾÃB\¢Æà·|×µt‹¸#`b€ÚHÕQ° ³²ûqMÌ8¦ÞBógÞê.áÓ9ÀMCmjÅS•Ú\LÇÉ@+Î/ö±;“ö«yZBRçŒòi¸–å&§- •LEÞ‘Ùt0Ìn—vñQe¸ºÚN‡¸zƒÀg£Ñlw4Ð,BÀ6LOóG£A <6ðFöþÊ{˜½xN„ÖºXò_Á| ‹%Àk ôÔÑ|€‘I“B£`ÍdÌ=+Éš9“·#~>MÆâ¿ñži­ýÿûï®sïówÁr3ÉØ',·Úü¼š%·Ž`qµêÁ‰ þç§]ja’†Íz8èVËÃÊÙ­R1^c·º_~§ m¥‹e‚âíòº à…º ŒïÅa+\ýB"ݵuI¡BñôU¸öfAjáÀÄŠ_ ùÜnámäI¬ÿ …åt1üa’…Ù 40¹– ’ÅN!åÚ&¾PÂÿì|š„òA”‡×c×è!.ÏšÍ*32"—… ›g8n½N8ø821¨i62©ÑHl1¨Àœ qH.cÙH‡â¨B‘ÏDŽwZI£m=œåK‚atbÃ&ÞyÉ‚5-+òx4rÈG~ÙIBš™Š†åjºž{wá}Ž‚r̓€h6Ñ]z6å† £<ƒ¶ÆM¥(ÌL”G<“'²Š–ïE´I3æoÃÜç8öU`éuŽWI<ë7_š'¡{­ò̴DzR³mçmù+ÞzÎO"ß …ø‡Œ½få…8÷@` -RY2‘U8dù¡ª«ÈU˜œG}“4¾À­E.©ØVXš6U!DBøFntZÿ(ÆJ‘¡Á9@š®©N»®ËéÔzÚ,ª%0ÂjÒÜçRÍ:Z‚Ò@Ö¹aÿTªUnÅç ÉP’U©´ÒW‡ÚŠÝ8ó¾ì–áC°*fŽy‡ÿ}Žÿ ã^“ì·“‚¦`ô¼Ô?=¹ ©œÿâUÝ’:n:²õïÛ °Aqí2]kC¡ébœÝ¼ìŸÀ¡ÅÍP†e9}C”¼!ÊÕ-þwÁ,ôЄë˜Â£Óp‰*·âÅXn܆áÃ»× k@W㸗â?Õ‰nè`}éã/gòÖcÏDL$ð€-›À4iVô`èø`/£á€9®¢íún‡ÓUÄwÉå¾è^ytX"ÅxJùP´¢Óˆðñ¦nŒ_Pl¡í¿µ‡ƒ*@Òerv¯ª_(o›B Ž6ï*œl0GI*5èe¬NJKm¸ÃÒÅÔIâçm£ûRÛÔ l ËrF]¹3Že‚¾ŒW\¶Á ³À"lÏr£3î©eóZSW'Ô²\Tã…5eQ0H,|ãZáŒÅ2|?Ï¢º+¹ÃfØ 0sˆÉ]CtìGÃvœ8›@b¶öÞ:$y ˜C&£ÎÈÓ´¥K…XŽŽO‹\°o‰ë¤PÍϵe©8“ ¹ƒŽCM®£s¹M1†ô_ž¸±ÝËíÇÃôMŒmDZ]fù{nØ6¥ìdŽÚM‘› ù3ëÐ;rþL¼Çe”Q—.=VþLéØãóA© Ü“&¹ŽÞòK× Øj:²£O¦#ª¹°è,ÐJñºgžÆt‘‰_O¢u´ˆR¯GÉŸYœ utŽsжPC XÊLÀmTPÙ³1¿{VN†j/•j ÿn¶'ê,  NÄ ˆÖ!¤B˜:̺ 8eä'›éŒ;ŒY.…­oT 9œ›(× 6DŽÑ/!8¤*¤Ô›¥&’H,‚dœ°íñ‹ã2€Öç2‡ƒˆ¡R™Ú¦FËÑÝ-ñ.ûgí‡iûLKÕÑAÈu¸‹ÿ•£Ê@À€.À ç®M½ ²›­€°„¡ÖM‚.@±åñ½\WÇë](·A\YÌ9D«=,ÇŒ<¨Õ€ŒAÎŽ–e€¬´9JSBL06’5‚²Z<3«]!fã0†ÊÄvå>nšÜrìj]a(EzUÞW©Æ¥IÒŽZîÉšS„oʆ´Ù?"»¯§XR¿÷F­zö4“ë,˜Ýu¨Xè¿rpXtG=Íóá[`ÞáÈÆ/ÛÍ“+&#±¯^äªðêŠÿƒóî=(êyüWåñ•â… ²œ ¯+ *ÄXÇ"¦—½Äh)1’ 9ç‹ÄÅï¸G!ü±ÚàÛ‹Jõ“òÆ躼¸þãKôøã`å-pãÏ–Å>cO©¢þ¹¸“í–3$ç™7&gF¾_í¶|I/AáS8Y¬}¯Ú}¹îÓÉÜm‡ º"TP]ª´¢_œÖ^‰ÎÑ{¢Ò9|ìÚ9Té0ø±—C¥ãÔ¢k÷iÅHHî§@¾©PK‹º¢–Vì€Þ­¬‚±‡qvd˜$…{à’Tí€ìOô­S÷Q½Xê?Iúœx_@#)u‡°z›Ì« ;8»ÓhÅD§ ‹kÑså!̯O .@OÎOõýsܧçÙò¾~ ÿÿþ-ñé»ûÁÏŸïovÏï‡óËÝ»ÿêò“wõó.×ÜómøËÇ×›¡éîW?m~ùðöÓÍÇ·_Þ̱ʋO“L½lvó<Í¿|qé/^ßûwYó…æŠ=•›¿ùøz7 ×ÛÁ•»H1¾|ß&ìçùûÏÃï¿äš¾úûx`ÞÌ+ W—_|èýújb _=·ßì\'l½«ËÝàãðÞŸ¾»~EÜ7á8×\LÀOþܵ‚÷/&>}û›73ñvvý’½Wï ÿ»ðþ Ý8×WÌÇ£Æ=¢Qn.ÁèÕë™o¾ß çïÅ´|ïn3¬¡ìãûLËõ͇˻›×c€5®/Ç›\sïæînh^în®Þß½y;»¿~ù:¾z÷p=ùRœùÇá3© Í6ïyº~“•=)r¥`-~<Éä`²Óò/¯@ fü–°ßO°EÖQüÂÔo7ðæSjø†öŽ—{ Qa¼ò¸°ÙA\^‘…ø¡ •# ¥âW:DRq„|l¨‹SÇø´$! èåÂ#•"<]D1…Ìôƒéfþ{bàªp*Á î,˲$«€…qYMž• C °ÜâÛÙ,LÞh¼lf9ƒ! 0²£¡WÆÍ€i ³/ø|HÍ¡_ikø+Œ%øü§—W‹›Ÿ®®^<>ý1Íøþ㟷lðó—ð—7ïï?={–6®‡`ôÅG´QØbø"¤Ä]°[oÂ…z®¡Þzf¡³ÕY„Ø„wÁâ"¾±Ü°Obï§"×ÎAè­*Â3EF+”²ðÞÓ’;ãQÖÏÃU'“I…]”!2÷Vw±p-vXm;´­š’écvH%ZÙ¡%é=f‡LÒ¡ý˜Ú’ÇìБtȳC.éÐ}ÌÝ\‡å'3õÝÊÏî*ŒmlÐí_‹ÏW/N/A/øîÕËÿ.>¤€âq€6ßÎ6ÓålŠ i^튨GEÕ’5*–j!)«>âö4U+ä£Ð¬$ ›+$›á„+)£>—WM+hå ñ¸i9ïÍ_ª{Q`œƒ@ñ®,7»âiو§Ù€'íŠ'ÝOÚˆ'mÀÓꊧµžV#žVž¬+žlxÚxÚ x:]ñtöÁÓiÄÓiÀ“wÅ“ïƒ'oÄ“7àévÅÓÝO·O·‚§¨PAÉw‰hHŠ-©¥”Ä(vfãˆý7Îe‰8ò=ç ÏK“–pfK8ÚÎj ÇZÂÙ-᜖p¼%œÛ§ZÐeèÊú*”ª×L®²¼‹Å²ÅZˆ—¬òè&[¡á"w|ó—ÓûéüôÓ‘lÖÏÌS°íOÒRùé:Fw\M‡É)O®áÕtŒÉKD’·¸‚6ñîAY@“÷‚E€írè1þ4lŠ['%ÓÕ#§$ŒpÑ‹|On²7¦Õ£uÿòC4ŠLí©NšþÀç=ðëé}zÛ"JWðκ8=Ûë츆#˜+o¦msAÊÓùB¼©ŠÒ÷Tų¢Ã÷uN‘òQ€dÇHj”óÜKÝn½Y«\U–rXD®`m4Ûo„·›ðÆh:^ÜŽf¡‡ÔßN—Ï.1¹Hn"kúù̧ãUdH‚ýZHAr?]m¢¨õÑmhcºH­Üšî0õ;ûm°…Æ)Àï[&{ŽÛa»šnvZ”Íe_º QE3{ðvjœ“¶bÀ&Þï€jF@”«ÙÄ>d߆Y¸YU‡ªÓ4+ZrI¿ D&Q܃ÑpŒÜ¡¥ñaÇ\Òó à’ÄwMêó‘äHLò7|‘øéŒ!÷£4sŽ,Ïi«‚(zXEá©ÏÐMèL†¨ä ø¨ˆšÇBTrÀ|TDéÞˆúÁtΉëU^ª'0ÊA?ájѤ€ê‘Ô»¡ë´‹IàÕôŒ¥MÒi³¬aÓQ¢/IÝ’´Ör®Z™å(gïÝ`e [s­¯k.ÐR¶{S(uÊ*2:K 5¦´¢†”´Lª™jÅ U¤J…•¡(•×5œhZñÏj·¥BÕ$—Àê§ \!¥T^ ¥¤¨fP¥\£ øz:Ô ¦„¨ª(n T–‚]€Í.À4›€2g•MRF¹8ª y–Y‰Y´ùjÅ‚Š.’¶–û›¾¹Õ•J¶˜cÏ’±(”Wb¡H¹ü€³§O‡å¤‹›*•µûÖµKvÛz‘ùÝXèÛmÁSƒ´m…X½n žW¤»ÕÉi¦{V4÷­H;LMº¾ÛVIž¶ðeÑØºŸâ“ûh™íß¶n¤{eŸÓù›ö0ͦÞ|­}ÎÞ­ÞêÁC6€%ðäIöM¥6«2¡`¨¾Òzx?=ò¥V¾xh\8Ÿµ5÷\ÏHCËf䬗èÁ¼÷hÄÕ¿âxìI¼-#~åÍF{“UþÎå„;LBøØÍzxý©ÏZø )*›{$@0©%ùuÑ:¿:évÒ L®»é¢ö1IÇ÷·ÐÒY÷11Š—SH=- oã3}’ψŒg_Ûå-¨ PWüqÖvPÇ[ãÃ\-y”¹d@²ãkîa8÷ðbiï%B 9îVÊ!6Ál †¯Ôâ¤ËŠM2¾G½Ý[kãä5•¢¶šÆ=†_kÜâ)‰bÐý´âq1oÚ¡írÆ)Õšˆ±’‡/*Ckí§’P“hË BÚŠRÍß(ÁÒDÙLÜÑá ¥rí"ý¥5Ò—NÖku»«‹ÏCઘ”½¶9„’·ñÞ\w/oJFBçz—7žu.k²B‚¼èi¼òæóèú+öÇÌ¿ÃG>Y#¼#7­¼#·™¬÷ã¶­ÛGR±>ŽÑp…w^He€¦12pEÃ’,“’8ŠnTjŒl ƒlð¡¯Y8®ko-£.E7áÊ p ömoÖöfmoÖV ïÍÚÞ¬=íÍÚÞ¬m2k{¦7az¦7a”0 Í_³5"Ä0;i8(–YµUL³ã†1ËõÛG,«)bÙf²]¼Å0Ët‰=m3…õý2œƒìýü!ÈŽ´6áÜ÷6NÝMH " Ÿx^·I*éš„8Ôp™cPiü\™¶î8s‰c0jšqFšÄý¦î¶F€hYöÖ=sQg¶í2ÿMR9TtMÝv¨iqÓ%F–ø¶Ý…R}’ôæ $0ä6tKl‹3“XR !:ú¶© `Yj¨31•ÚÄ‹¼»jÓŽÅ0ò=àÌdŽî˜.çÜ%Hˆ<13çTç&c¦C\Bl3^/g3o¾ü¦èkÄü©c1Æ-‹;®<àèÌ`˜¾ W4Ñ8~ò`¥~S®"˜j™&W¦nÖ¸c딹·]ÛÖJBs#þ¦øÛŽ¥[”R˵ —ʃ¥ÃøÛ:· á”Ø63h’‹\dêü¦ÃïÀÒ œ‡Pn;„+NÛ\‡Ñ·a¿³1IBÀÞi?³äzÍ×þÔÑ ×µLÛnH‰<õ ìMà2®Å¸a8<¿@¼åÿµw-½ãHøž_aä2À"b(¾`O‹Ý½ìqo9¸MÚíÄöÚqÒ»¿~‹z’I1R·“ Œzº-’*’ÅzPõUí£´52ŸÏ¥©·P5jò>$¬Ûœ:YpòXm „UŒFEÖ’I&$f0¯¦äþ¿ÏßvÛ¬Ò‹ÑMvZzkú¥Ê Ѐ 8@;€è¢4|Œ°@Lj‰dŒÀî÷Eð¤ò­³Ê+i…˜Ö SyU(¿ò#B" v6ˆem[ÆKX-÷EV×8ˆÒÝkléÁQÌSÐ]T˜š+,tê)Vˆ–EYQTÕ× ã„ 8Ç#s “PÿöÓܘI`!)NA_7ú­¬š«†Ó*«á¼” W§GK0;ƒü f•Ri0¬Gò–;j£ç©ãõ­ð¯mÝ”¦U&gP “#$B°ñ÷ªVìPTX9’ß`^öÛléÿÄhg4¨šÍ³ŽŠ©Ü¢rÆ‚æXdàÃÂN(.ªfU+0ŸÁ•ñþºV ç°Béðœ(ضAVS‚<™W ëºçd…œ3Œ0È*%À iÙ’€Q@ϯFjçüêÚ•¥šÑ·ñ.¦Žá:éóF‘S»7öù¬þ3§àúÊSGq̆™¤´'brQÕ>’ìmÝïuÖ ó(˜½®+2qÚŸØ»1½Ý{ÆÏù·\oïð!“Ò=,fR{Ç!e”Q“>N@ؤ 0..’G ­”RDNÊ8 Bç 䌷$f ž”DOò0ÞîNR©z1…„Ù“tûUÓ'“c)ýc’ltUgu_ÊÑ!"“í›´££¤¨´´Aæ¼?ÆÖ£C$ɉÔQfQ2.øF‡ˆ‰ªÑΣŠ$m„°¥}W œ"OuÌÞ“¶KK‘]¥o ¼_ëgèáJ¼@çFùû·S©—Ô³È.À#òcॿ[¿f ôuy$@öÈíÔ†Ýk‰b@!AQÕ=tÍÅF4å›Cýç>‹s½ÝŸì×Þ`5°ùò®WCu˜ª··Qw/iåÿ¬žîXOýÒ¤ß}üMãq™2‹“zÎ;ú³ÍØUâ®ýþÁ‚™š§Ým’#Eª½.oí÷Y7ŒwνäÛœ:{˜©nvGøôå¥#ÉöŽS2®Ç¨W‚^ êx¯ËÕ°¸`VÆG]õ¸òçÞó…Â2À¾´«²ïì4ÃÈ{ J•BRhsó(sƱ¿Žøõ°P^Õ.V¿°Ä8”7å¯î‹—ãí¼ï‘žy1š—ù„6}^Áˆå¤Jøtž:(L·Ñ|F|gŠ*§þ)æ9A„â\0s…m>%4­ÞZ^±É©}m‚% ;w³¹…½ÕZqoÆi¥Y]¾ÝÝ-xnÆò¤f¼¶Ö±)#TÁnƒ~wpÕcòôÃÑ=‡þZ}¤¿*~TŸì«hÄáÙ¼®?é—±áŽ+èãv¬…ûia–ÉuÎý<“µÇÅiá™Y9ÄŒS ôÈÀ1%õót«×S­ýÍo÷x×þ<;ÜàwÙ_ $¼ß\©Ò¨ùÎÚXrarš-òs%¨Æi/uuu[m½ø5‹²Â98Oÿl÷ô¯ÝÃ?ÖFU}ùíöey¸}}}…Ó¹ÚÜ®N‡ jÞqûþöûõ°ó¿ËDñ‡.!Ê ø#B‰&ÍYÛ.Ö»>RÉ Ž+d|úV0tjœÛþŸsÝ!{eN'Áø¨ÉÂÓzJ{¶ý ÏàM*Å¿1yCL/°#"Þ§ÄuøhyâƒR!­CjÎã!.FÔ_ÙoäBÅîÞ»Õñ_¨ n4Bw*/D|N‹fŸŸf=®N”}TÁºæˆž-…ûûHû=ÐÙîcƪlaO?ïKb½ë,ÁíïÑÞÀ—–±û½ØnÖÛãUýÿÎZxºÀ$u¯XLI3pLeÃj÷§‡ràkë¸Ô?‚—Q¬6Ùª8<¯ÿX¯–Ï&TÔ-ïÒ6-o -3«®„ªVk´;<ÜÚËcaÀ§ XÁçîñP<í^Šì´}„ ›ê7˜Á»Ô.^³3AÆ`Î-V«ÅÓrS,¾þ¯+¢ipÑUYø²ÐK³nŽÓÙy›7Æÿ¼)Áe>ÃÏ}îìP¾d´*„wÝîò#ؘÑP §eÔªn^| ~æ\õp®àñ^âˆng²wÕâxÎÊëz³¾2økV¹žÃ¢ih-ðœËe9Jõ³…A¬‡t[*"á†Ô?uwE>Üeìv³},SÞ3Û#/¨[V –@t5ê¶aêÅNå=DuyA‰O™~¹.7´èͪmo~¼Y|©É»éHûý÷øÞ=Èû ôúéó\â}¶Ë¹Y—n±ˆÆvéö1/ʬńsrZ•Õ¶®l`îð(|’“à¿ö½±®}o»ÝsFÁæ¯ÿósžŸ·Ïr(ýš}írÑÛûû~U§•ªTÿªÈ©g|ÿZ€phe~©6ka[nïë2n¦,XV5ºrù°×Ÿ¦òš¹²h zñ< iN@Ò@>2„Qi ‚­Î¹Àæ 4Ã(Øœ*Š(’ç,O=“ã–)8n¦äGÅq³<ÇÍJÀåÇýî(heÀic(h&•ýPè³:Ì‹Bÿ8n# ÇqÜL/Žû|(i#iÆQÒB9 éfµÎs%š–0WÅ1†¥ ð¥Ì5׎NY-GÐÀ‡(‘m#ïJÒD:9ÑÀŒ$ç’kÔ«X„f Xˆ¸žLà»Ç/î)¤1f ù)8N>Üþ¹2ÁHž F2' ‘>û,v«d ìJPY%CðnºA’ÃsìÀrß Ü¬E¸YÛÜhæ_Œ¹‡sA æž6Xu¿4" Ý¤"`a!©j¾ðž ÒD–v&yDq£Æ„ƒÔ$ºÄZ·ÖÍ™€Ö’s´ RiæÍ÷ÂYó$œµæ.̺YÅ^µ8ÔÚm›åsNPF1Í®AÂs^ )ïG:lΜ‡ ‰Û}fè2èìqè²d^èò;f2 \c“ʃõ-Ièˆ íÏdðùíŸ xm²^ï6E±¯ !myy8Ù$Àa†sP _Ò¨XF¹¬³® ã–΂¾× è{j°9l˜²\jÑæžú ‰#¡Ø:ÇXQc–¦ÀTè}°öåÛ>í_üsü_4q€N\r\r\rüŠIÒò’Œ`Â83P\#’3eˆÙyI‰pI‰ K/).).).).)Ú®—”—”Ÿ2%BŠ¥ÿgÍ«·†G»ÇíñKV‡KV‡÷Ïêðc»kë =­ïï³mñüº;l®ŠíËߘ¨^_ à±lì­÷XY—ªšy!aà…Ë Vϧƒ˜ž¾ýÑ>[}3±&ìóT?Ÿîª5ã–Ja q))Î1áTcï=ÃQL´Tv€Ýõ+ ¾ÌÊ yoÁ‰[gÌþèNtv#LÌ‚†#{R&$€%³Ò-Ÿû“„âˆqB$ææî>P«A0¤¥äBW!á+~kÁAVe%,)$É짃Îûåj‚¥ú— M 4ø?4»C \charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/repo.go0000664000175000017500000000337012672604507022504 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. // Package charmrepo implements access to charm repositories. package charmrepo // import "gopkg.in/juju/charmrepo.v2-unstable" import ( "fmt" "github.com/juju/loggo" "gopkg.in/juju/charm.v6-unstable" ) var logger = loggo.GetLogger("juju.charm.charmrepo") // Interface represents a charm repository (a collection of charms). type Interface interface { // Get returns the charm referenced by curl. Get(curl *charm.URL) (charm.Charm, error) // GetBundle returns the bundle referenced by curl. GetBundle(curl *charm.URL) (charm.Bundle, error) // Resolve resolves the given reference to a canonical form which refers // unambiguously to a specific revision of an entity. If the entity // is a charm that may support more than one series, canonRef.Series will // be empty and supportedSeries will hold the list of series supported by // the charm with the preferred series first. // If ref holds a series, then Resolve will always ensure that the returned // entity supports that series. Resolve(ref *charm.URL) (canonRef *charm.URL, supportedSeries []string, err error) } // InferRepository returns a charm repository inferred from the provided charm // or bundle reference. // Charm store references will use the provided parameters. // Local references will use the provided path. func InferRepository(ref *charm.URL, charmStoreParams NewCharmStoreParams, localRepoPath string) (Interface, error) { switch ref.Schema { case "cs": return NewCharmStore(charmStoreParams), nil case "local": return NewLocalRepository(localRepoPath) } // TODO fix this error message to reference bundles too? return nil, fmt.Errorf("unknown schema for charm reference %q", ref) } charm-2.1.1/src/gopkg.in/juju/charmrepo.v2-unstable/charmpath.go0000664000175000017500000000476112672604507023513 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package charmrepo // import "gopkg.in/juju/charmrepo.v2-unstable" import ( "os" "path/filepath" "strings" "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" ) func isNotExistsError(err error) bool { if os.IsNotExist(err) { return true } // On Windows, we get a path error due to a GetFileAttributesEx syscall. // To avoid being too proscriptive, we'll simply check for the error // type and not any content. if _, ok := err.(*os.PathError); ok { return true } return false } func isValidCharmOrBundlePath(path string) bool { //Exclude relative paths. return strings.HasPrefix(path, ".") || filepath.IsAbs(path) } // NewCharmAtPath returns the charm represented by this path, // and a URL that describes it. If the series is empty, // the charm's default series is used, if any. // Otherwise, the series is validated against those the // charm declares it supports. func NewCharmAtPath(path, series string) (charm.Charm, *charm.URL, error) { return NewCharmAtPathForceSeries(path, series, false) } // NewCharmAtPathForSeries returns the charm represented by this path, // and a URL that describes it. If the series is empty, // the charm's default series is used, if any. // Otherwise, the series is validated against those the // charm declares it supports. If force is true, then any // series validation errors are ignored and the requested // series is used regardless. Note though that is it still // an error if the series is not specified and the charm does not // define any. func NewCharmAtPathForceSeries(path, series string, force bool) (charm.Charm, *charm.URL, error) { if path == "" { return nil, nil, errgo.New("empty charm path") } _, err := os.Stat(path) if isNotExistsError(err) { return nil, nil, os.ErrNotExist } else if err == nil && !isValidCharmOrBundlePath(path) { return nil, nil, InvalidPath(path) } ch, err := charm.ReadCharm(path) if err != nil { if isNotExistsError(err) { return nil, nil, CharmNotFound(path) } return nil, nil, err } absPath, err := filepath.Abs(path) if err != nil { return nil, nil, err } _, name := filepath.Split(absPath) meta := ch.Meta() seriesToUse := series if !force || series == "" { seriesToUse, err = charm.SeriesForCharm(series, meta.Series) if err != nil { return nil, nil, err } } url := &charm.URL{ Schema: "local", Name: name, Series: seriesToUse, Revision: ch.Revision(), } return ch, url, nil } charm-2.1.1/src/gopkg.in/juju/environschema.v1/0000775000175000017500000000000012672604577020261 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/environschema.v1/sample.go0000664000175000017500000001120412672604577022067 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package environschema import ( "bytes" "fmt" "go/doc" "io" "reflect" "sort" "strings" "unicode" "gopkg.in/yaml.v2" ) // SampleYAML writes YAML output to w, indented by indent spaces // that holds the attributes in attrs with descriptions found // in the given fields. An entry for any attribute in fields not // in attrs will be generated but commented out. func SampleYAML(w io.Writer, indent int, attrs map[string]interface{}, fields Fields) error { indentStr := strings.Repeat(" ", indent) orderedFields := make(fieldsByGroup, 0, len(fields)) for name, f := range fields { orderedFields = append(orderedFields, attrWithName{ name: name, Attr: f, }) } sort.Sort(orderedFields) for i, f := range orderedFields { if i > 0 { w.Write(nl) } writeSampleDescription(w, f.Attr, indentStr+"# ") val, ok := attrs[f.name] if ok { fmt.Fprintf(w, "%s:", f.name) indentVal(w, val, indentStr) } else { if f.Example != nil { val = f.Example } else { val = sampleValue(f.Type) } fmt.Fprintf(w, "# %s:", f.name) indentVal(w, val, indentStr+"# ") } } return nil } const textWidth = 80 var ( space = []byte(" ") nl = []byte("\n") ) // writeSampleDescription writes the given attribute to w // prefixed by the given indentation string. func writeSampleDescription(w io.Writer, f Attr, indent string) { previousText := false // section marks the start of a new section of the comment; // sections are separated with empty lines. section := func() { if previousText { fmt.Fprintf(w, "%s\n", strings.TrimRightFunc(indent, unicode.IsSpace)) } previousText = true } descr := strings.TrimSpace(f.Description) if descr != "" { section() doc.ToText(w, descr, indent, " ", textWidth-len(indent)) } vars := make([]string, 0, len(f.EnvVars)+1) if f.EnvVar != "" { vars = append(vars, "$"+f.EnvVar) } for _, v := range f.EnvVars { vars = append(vars, "$"+v) } if len(vars) > 0 { section() fmt.Fprintf(w, "%sDefault value taken from %s.\n", indent, wordyList(vars)) } attrText := "" switch { case f.Secret && f.Immutable: attrText = "immutable and considered secret" case f.Secret: attrText = "considered secret" case f.Immutable: attrText = "immutable" } if attrText != "" { section() fmt.Fprintf(w, "%sThis attribute is %s.\n", indent, attrText) } section() } // emptyLine writes an empty line prefixed with the given // indent, ensuring that it doesn't have any trailing white space. func emptyLine(w io.Writer, indent string) { fmt.Fprintf(w, "%s\n", strings.TrimRightFunc(indent, unicode.IsSpace)) } // wordyList formats the given slice in the form "x, y or z". func wordyList(words []string) string { if len(words) == 0 { return "" } if len(words) == 1 { return words[0] } return strings.Join(words[0:len(words)-1], ", ") + " or " + words[len(words)-1] } var groupPriority = map[Group]int{ ProviderGroup: 3, AccountGroup: 2, EnvironGroup: 1, } type attrWithName struct { name string Attr } type fieldsByGroup []attrWithName func (f fieldsByGroup) Len() int { return len(f) } func (f fieldsByGroup) Swap(i0, i1 int) { f[i0], f[i1] = f[i1], f[i0] } func (f fieldsByGroup) Less(i0, i1 int) bool { f0, f1 := &f[i0], &f[i1] pri0, pri1 := groupPriority[f0.Group], groupPriority[f1.Group] if pri0 != pri1 { return pri0 > pri1 } return f0.name < f1.name } // indentVal writes the given YAML-formatted value x to w and prefixing // the second and subsequent lines with the given ident. func indentVal(w io.Writer, x interface{}, indentStr string) { data, err := yaml.Marshal(x) if err != nil { panic(fmt.Errorf("cannot marshal YAML", err)) } if len(data) == 0 { panic("YAML cannot marshal to empty string") } indent := []byte(indentStr + " ") if canUseSameLine(x) { w.Write(space) } else { w.Write(nl) w.Write(indent) } data = bytes.TrimSuffix(data, nl) lines := bytes.Split(data, nl) for i, line := range lines { if i > 0 { w.Write(indent) } w.Write(line) w.Write(nl) } } func canUseSameLine(x interface{}) bool { if x == nil { return true } v := reflect.ValueOf(x) switch v.Kind() { case reflect.Map: return v.Len() == 0 case reflect.Slice: return v.Len() == 0 } return true } func yamlQuote(s string) string { data, _ := yaml.Marshal(s) return strings.TrimSpace(string(data)) } func sampleValue(t FieldType) interface{} { switch t { case Tstring: return "" case Tbool: return false case Tint: return 0 case Tattrs: return map[string]string{ "example": "value", } default: panic(fmt.Errorf("unknown schema type %q", t)) } } charm-2.1.1/src/gopkg.in/juju/environschema.v1/dependencies.tsv0000664000175000017500000000066212672604577023451 0ustar marcomarcogithub.com/juju/loggo git dc8e19f7c70a62a59c69c40f85b8df09ff20742c 2014-11-17T04:05:26Z github.com/juju/schema git e32c2dd75b909a7b51aa85f980e8231a7d1a953a 2015-06-02T10:19:02Z github.com/juju/testing git c7042d828963caa252862b759ef56ada297e8323 2015-04-21T10:32:42Z gopkg.in/check.v1 git 64131543e7896d5bcc6bd5a76287eb75ea96c673 2014-10-24T13:38:53Z gopkg.in/yaml.v1 git 9f9df34309c04878acc86042b16630b0f696e1de 2014-09-24T16:16:07Z charm-2.1.1/src/gopkg.in/juju/environschema.v1/form/0000775000175000017500000000000012672604577021224 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/environschema.v1/form/interaction_test.go0000664000175000017500000001127512672604577025137 0ustar marcomarcopackage form_test import ( "fmt" "strconv" "strings" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" ) // newInteractionChecker returns a object that can be used to check a sequence of // IO interactions. Expected input from the user is marked with the // given user input marker (for example a distinctive unicode character // that will not occur in the rest of the text) and runs to the end of a // line. // // The returned interactionChecker is an io.ReadWriteCloser that checks that read // and write corresponds to the expected action in the sequence. // // After all interaction is done, the interactionChecker should be closed to // check that no more interactions are expected. // // Any failures will result in c.Fatalf being called. // // For example given the interactionChecker created with: // // checker := newInteractionChecker(c, "»", `What is your name: »Bob // And your age: »148 // You're very old, Bob! // `) // // The following code will pass the checker: // // fmt.Fprintf(checker, "What is your name: ") // buf := make([]byte, 100) // n, _ := checker.Read(buf) // name := strings.TrimSpace(string(buf[0:n])) // fmt.Fprintf(checker, "And your age: ") // n, _ = checker.Read(buf) // age, err := strconv.Atoi(strings.TrimSpace(string(buf[0:n]))) // c.Assert(err, gc.IsNil) // if age > 90 { // fmt.Fprintf(checker, "You're very old, %s!\n", name) // } // checker.Close() func newInteractionChecker(c *gc.C, userInputMarker, text string) *interactionChecker { var ios []ioInteraction for { i := strings.Index(text, userInputMarker) foundInput := i >= 0 if i == -1 { i = len(text) } if i > 0 { ios = append(ios, ioInteraction{ isInput: false, data: text[0:i], }) text = text[i:] } if !foundInput { break } text = text[len(userInputMarker):] endLine := strings.Index(text, "\n") if endLine == -1 { c.Errorf("no newline found after expected input %q", text) } ios = append(ios, ioInteraction{ isInput: true, data: text[0 : endLine+1], }) text = text[endLine+1:] } return &interactionChecker{ c: c, ios: ios, } } type ioInteraction struct { isInput bool data string } type interactionChecker struct { c *gc.C ios []ioInteraction } // Read implements io.Reader by producing the next user // input data from the interactionChecker. It raises an fatal error if // the currently expected action is not a read. func (c *interactionChecker) Read(buf []byte) (int, error) { if len(c.ios) == 0 { c.c.Fatalf("got read when expecting interaction to have finished") } io := &c.ios[0] if !io.isInput { c.c.Fatalf("got read when expecting write %q", io.data) } n := copy(buf, io.data) io.data = io.data[n:] if len(io.data) == 0 { c.ios = c.ios[1:] } return n, nil } // Write implements io.Writer by checking that the written // data corresponds with the next expected text // to be written. func (c *interactionChecker) Write(buf []byte) (int, error) { if len(c.ios) == 0 { c.c.Fatalf("got write %q when expecting interaction to have finished", buf) } io := &c.ios[0] if io.isInput { c.c.Fatalf("got write %q when expecting read %q", buf, io.data) } if len(buf) > len(io.data) { c.c.Fatalf("write too long; got %q want %q", buf, io.data) } checkData := io.data[0:len(buf)] if string(buf) != checkData { c.c.Fatalf("unexpected write got %q want %q", buf, io.data) } io.data = io.data[len(buf):] if len(io.data) == 0 { c.ios = c.ios[1:] } return len(buf), nil } // Close implements io.Closer by checking that all expected interactions // have been completed. func (c *interactionChecker) Close() error { if len(c.ios) == 0 { return nil } io := &c.ios[0] what := "write" if io.isInput { what = "read" } c.c.Fatalf("filler terminated too early; expected %s %q", what, io.data) return nil } type interactionCheckerSuite struct{} var _ = gc.Suite(&interactionCheckerSuite{}) func (*interactionCheckerSuite) TestNewIOChecker(c *gc.C) { checker := newInteractionChecker(c, "»", `What is your name: »Bob And your age: »148 You're very old, Bob! `) c.Assert(checker.ios, jc.DeepEquals, []ioInteraction{{ data: "What is your name: ", }, { isInput: true, data: "Bob\n", }, { data: "And your age: ", }, { isInput: true, data: "148\n", }, { data: "You're very old, Bob!\n", }}) fmt.Fprintf(checker, "What is your name: ") buf := make([]byte, 100) n, _ := checker.Read(buf) name := strings.TrimSpace(string(buf[0:n])) fmt.Fprintf(checker, "And your age: ") n, _ = checker.Read(buf) age, err := strconv.Atoi(strings.TrimSpace(string(buf[0:n]))) c.Assert(err, gc.IsNil) if age > 90 { fmt.Fprintf(checker, "You're very old, %s!\n", name) } checker.Close() c.Assert(checker.ios, gc.HasLen, 0) } charm-2.1.1/src/gopkg.in/juju/environschema.v1/form/form.go0000664000175000017500000002035112672604577022517 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. // Package form provides ways to create and process forms based on // environschema schemas. // // The API exposed by this package is not currently subject // to the environschema.v1 API compatibility guarantees. package form import ( "fmt" "io" "os" "sort" "strings" "github.com/juju/schema" "golang.org/x/crypto/ssh/terminal" "gopkg.in/errgo.v1" "gopkg.in/juju/environschema.v1" ) // Form describes a form based on a schema. type Form struct { // Title holds the title of the form, giving contextual // information for the fields. Title string // Fields holds the fields that make up the body of the form. Fields environschema.Fields } // Filler represents an object that can fill out a Form. The the form is // described in f. The returned value should be compatible with the // schema defined in f.Fields. type Filler interface { Fill(f Form) (map[string]interface{}, error) } // SortedFields returns the given fields sorted first by group name. // Those in the same group are sorted so that secret fields come after // non-secret ones, finally the fields are sorted by name. func SortedFields(fields environschema.Fields) []NamedAttr { fs := make(namedAttrSlice, 0, len(fields)) for k, v := range fields { fs = append(fs, NamedAttr{ Name: k, Attr: v, }) } sort.Sort(fs) return fs } // NamedAttr associates a name with an environschema.Field. type NamedAttr struct { Name string environschema.Attr } type namedAttrSlice []NamedAttr func (s namedAttrSlice) Len() int { return len(s) } func (s namedAttrSlice) Less(i, j int) bool { a1 := &s[i] a2 := &s[j] if a1.Group != a2.Group { return a1.Group < a2.Group } if a1.Secret != a2.Secret { return a2.Secret } return a1.Name < a2.Name } func (s namedAttrSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // IOFiller is a Filler based around an io.Reader and io.Writer. type IOFiller struct { // In is used to read responses from the user. If this is nil, // then os.Stdin will be used. In io.Reader // Out is used to write prompts and information to the user. If // this is nil, then os.Stdout will be used. Out io.Writer // MaxTries is the number of times to attempt to get a valid // response when prompting. If this is 0 then the default of 3 // attempts will be used. MaxTries int // ShowDescriptions holds whether attribute descriptions // should be printed as well as the attribute names. ShowDescriptions bool // GetDefault returns the default value for the given attribute, // which must have been coerced using the given checker. // If there is no default, it should return (nil, "", nil). // // The display return value holds the string to use // to describe the value of the default. If it's empty, // fmt.Sprint(val) will be used. // // If GetDefault returns an error, it will be printed as a warning. // // If GetDefault is nil, DefaultFromEnv will be used. GetDefault func(attr NamedAttr, checker schema.Checker) (val interface{}, display string, err error) } // Fill implements Filler.Fill by writing the field information to // f.Out, then reading input from f.In. If f.In is a terminal and the // attribute is secret, echo will be disabled. // // Fill processes fields by first sorting them and then prompting for // the value of each one in turn. // // The fields are sorted by first by group name. Those in the same group // are sorted so that secret fields come after non-secret ones, finally // the fields are sorted by description. // // Each field will be prompted for, then the returned value will be // validated against the field's type. If the returned value does not // validate correctly it will be prompted again up to MaxTries before // giving up. func (f IOFiller) Fill(form Form) (map[string]interface{}, error) { if len(form.Fields) == 0 { return map[string]interface{}{}, nil } if f.MaxTries == 0 { f.MaxTries = 3 } if f.In == nil { f.In = os.Stdin } if f.Out == nil { f.Out = os.Stdout } if f.GetDefault == nil { f.GetDefault = DefaultFromEnv } fields := SortedFields(form.Fields) values := make(map[string]interface{}, len(fields)) checkers := make([]schema.Checker, len(fields)) allMandatory := true for i, field := range fields { checker, err := field.Checker() if err != nil { return nil, errgo.Notef(err, "invalid field %s", field.Name) } checkers[i] = checker allMandatory = allMandatory && field.Mandatory } if form.Title != "" { f.printf("%s\n", form.Title) } if allMandatory { f.printf("Press return to select a default value.\n") } else { f.printf("Press return to select a default value, or enter - to omit an entry.\n") } for i, field := range fields { v, err := f.promptLoop(field, checkers[i], allMandatory) if err != nil { return nil, errgo.Notef(err, "cannot complete form") } if v != nil { values[field.Name] = v } } return values, nil } func (f IOFiller) promptLoop(attr NamedAttr, checker schema.Checker, allMandatory bool) (interface{}, error) { if f.ShowDescriptions && attr.Description != "" { f.printf("\n%s\n", strings.TrimSpace(attr.Description)) } defVal, defDisplay, err := f.GetDefault(attr, checker) if err != nil { f.printf("Warning: invalid default value: %v\n", err) } if defVal != nil && defDisplay == "" { defDisplay = fmt.Sprint(defVal) } for i := 0; i < f.MaxTries; i++ { vStr, err := f.prompt(attr, checker, defDisplay) if err != nil { return nil, errgo.Mask(err) } if vStr == "" { // An empty value has been entered, signifying // that the user has chosen the default value. // If there is no default and the attribute is mandatory, // we treat it as a potentially valid value and // coerce it below. if defVal != nil { return defVal, nil } if !attr.Mandatory { // No value entered but the attribute is not mandatory. return nil, nil } } else if vStr == "-" && !allMandatory { // The user has entered a hyphen to cause // the attribute to be omitted. if attr.Mandatory { f.printf("Cannot omit %s because it is mandatory.\n", attr.Name) continue } f.printf("Value %s omitted.\n", attr.Name) return nil, nil } v, err := checker.Coerce(vStr, nil) if err == nil { return v, nil } f.printf("Invalid input: %v\n", err) } return nil, errgo.New("too many invalid inputs") } func (f IOFiller) printf(format string, a ...interface{}) { fmt.Fprintf(f.Out, format, a...) } func (f IOFiller) prompt(attr NamedAttr, checker schema.Checker, def string) (string, error) { prompt := attr.Name if def != "" { if attr.Secret { def = strings.Repeat("*", len(def)) } prompt = fmt.Sprintf("%s [%s]", attr.Name, def) } f.printf("%s: ", prompt) input, err := readLine(f.Out, f.In, attr.Secret) if err != nil { return "", errgo.Notef(err, "cannot read input") } return input, nil } func readLine(w io.Writer, r io.Reader, secret bool) (string, error) { if f, ok := r.(*os.File); ok && secret && terminal.IsTerminal(int(f.Fd())) { defer w.Write([]byte{'\n'}) line, err := terminal.ReadPassword(int(f.Fd())) return string(line), err } var input []byte for { var buf [1]byte n, err := r.Read(buf[:]) if n == 1 { if buf[0] == '\n' { break } input = append(input, buf[0]) } if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } return "", errgo.Mask(err) } } return strings.TrimRight(string(input), "\r"), nil } // DefaultFromEnv returns any default value found in the environment for // the given attribute. // // The environment variables specified in attr will be checked in order // and the first non-empty value found is coerced using the given // checker and returned. func DefaultFromEnv(attr NamedAttr, checker schema.Checker) (val interface{}, _ string, err error) { val, envVar := defaultFromEnv(attr) if val == "" { return nil, "", nil } v, err := checker.Coerce(val, nil) if err != nil { return nil, "", errgo.Notef(err, "cannot convert $%s", envVar) } return v, "", nil } func defaultFromEnv(attr NamedAttr) (val, envVar string) { if attr.EnvVar != "" { if val := os.Getenv(attr.EnvVar); val != "" { return val, attr.EnvVar } } for _, envVar := range attr.EnvVars { if val := os.Getenv(envVar); val != "" { return val, envVar } } return "", "" } charm-2.1.1/src/gopkg.in/juju/environschema.v1/form/form_test.go0000664000175000017500000005155512672604577023570 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package form_test import ( "bytes" "strings" "github.com/juju/schema" "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/juju/environschema.v1" "gopkg.in/juju/environschema.v1/form" ) type formSuite struct { testing.OsEnvSuite } var _ = gc.Suite(&formSuite{}) var _ form.Filler = form.IOFiller{} var ioFillerTests = []struct { about string form form.Form filler form.IOFiller environment map[string]string expectIO string expectResult map[string]interface{} expectError string }{{ about: "no fields, no interaction", form: form.Form{ Title: "something", }, expectIO: "", expectResult: map[string]interface{}{}, }, { about: "single field no default", form: form.Form{ Fields: environschema.Fields{ "A": environschema.Attr{ Type: environschema.Tstring, Description: "A description", }, }, }, expectIO: ` |Press return to select a default value, or enter - to omit an entry. |A: »B `, expectResult: map[string]interface{}{ "A": "B", }, }, { about: "single field with default", form: form.Form{ Fields: environschema.Fields{ "A": environschema.Attr{ Type: environschema.Tstring, Description: "A description", EnvVar: "A", }, }, }, environment: map[string]string{ "A": "C", }, expectIO: ` |Press return to select a default value, or enter - to omit an entry. |A [C]: »B `, expectResult: map[string]interface{}{ "A": "B", }, }, { about: "single field with default no input", form: form.Form{ Fields: environschema.Fields{ "A": environschema.Attr{ Type: environschema.Tstring, Description: "A description", EnvVar: "A", }, }, }, environment: map[string]string{ "A": "C", }, expectIO: ` |Press return to select a default value, or enter - to omit an entry. |A [C]: » `, expectResult: map[string]interface{}{ "A": "C", }, }, { about: "secret single field with default no input", form: form.Form{ Fields: environschema.Fields{ "A": environschema.Attr{ Type: environschema.Tstring, Description: "A description", EnvVar: "A", Secret: true, }, }, }, environment: map[string]string{ "A": "password", }, expectIO: ` |Press return to select a default value, or enter - to omit an entry. |A [********]: » `, expectResult: map[string]interface{}{ "A": "password", }, }, { about: "windows line endings", form: form.Form{ Fields: environschema.Fields{ "A": environschema.Attr{ Type: environschema.Tstring, Description: "A description", }, }, }, expectIO: ` |Press return to select a default value, or enter - to omit an entry. |A: »B` + "\r" + ` `, expectResult: map[string]interface{}{ "A": "B", }, }, { about: "with title", form: form.Form{ Title: "Test Title", Fields: environschema.Fields{ "A": environschema.Attr{ Type: environschema.Tstring, Description: "A description", }, }, }, expectIO: ` |Test Title |Press return to select a default value, or enter - to omit an entry. |A: »hello `, expectResult: map[string]interface{}{ "A": "hello", }, }, { about: "title with prompts", form: form.Form{ Title: "Test Title", Fields: environschema.Fields{ "A": environschema.Attr{ Type: environschema.Tstring, Description: "A description", }, }, }, expectIO: ` |Test Title |Press return to select a default value, or enter - to omit an entry. |A: »B `, expectResult: map[string]interface{}{ "A": "B", }, }, { about: "correct ordering", form: form.Form{ Fields: environschema.Fields{ "a1": environschema.Attr{ Group: "A", Description: "z a1 description", Type: environschema.Tstring, }, "c1": environschema.Attr{ Group: "A", Description: "c1 description", Type: environschema.Tstring, }, "b1": environschema.Attr{ Group: "A", Description: "b1 description", Type: environschema.Tstring, Secret: true, }, "a2": environschema.Attr{ Group: "B", Description: "a2 description", Type: environschema.Tstring, }, "c2": environschema.Attr{ Group: "B", Description: "c2 description", Type: environschema.Tstring, }, "b2": environschema.Attr{ Group: "B", Description: "b2 description", Type: environschema.Tstring, Secret: true, }, }, }, expectIO: ` |Press return to select a default value, or enter - to omit an entry. |a1: »a1 |c1: »c1 |b1: »b1 |a2: »a2 |c2: »c2 |b2: »b2 `, expectResult: map[string]interface{}{ "a1": "a1", "b1": "b1", "c1": "c1", "a2": "a2", "b2": "b2", "c2": "c2", }, }, { about: "string type", form: form.Form{ Fields: environschema.Fields{ "a": environschema.Attr{ Description: "a description", Type: environschema.Tstring, }, "b": environschema.Attr{ Description: "b description", Type: environschema.Tstring, Mandatory: true, }, "c": environschema.Attr{ Description: "c description", Type: environschema.Tstring, }, }, }, expectIO: ` |Press return to select a default value, or enter - to omit an entry. |a: » |b: » |c: »something `, expectResult: map[string]interface{}{ "b": "", "c": "something", }, }, { about: "bool type", form: form.Form{ Fields: environschema.Fields{ "a": environschema.Attr{ Description: "a description", Type: environschema.Tbool, }, "b": environschema.Attr{ Description: "b description", Type: environschema.Tbool, }, "c": environschema.Attr{ Description: "c description", Type: environschema.Tbool, }, "d": environschema.Attr{ Description: "d description", Type: environschema.Tbool, }, }, }, expectIO: ` |Press return to select a default value, or enter - to omit an entry. |a: »true |b: »false |c: »1 |d: »0 `, expectResult: map[string]interface{}{ "a": true, "b": false, "c": true, "d": false, }, }, { about: "int type", form: form.Form{ Fields: environschema.Fields{ "a": environschema.Attr{ Description: "a description", Type: environschema.Tint, }, "b": environschema.Attr{ Description: "b description", Type: environschema.Tint, }, "c": environschema.Attr{ Description: "c description", Type: environschema.Tint, }, }, }, expectIO: ` |Press return to select a default value, or enter - to omit an entry. |a: »0 |b: »-1000000 |c: »1000000 `, expectResult: map[string]interface{}{ "a": 0, "b": -1000000, "c": 1000000, }, }, { about: "attrs type", form: form.Form{ Fields: environschema.Fields{ "a": environschema.Attr{ Description: "a description", Type: environschema.Tattrs, }, "b": environschema.Attr{ Description: "b description", Type: environschema.Tattrs, }, }, }, expectIO: ` |Press return to select a default value, or enter - to omit an entry. |a: »x=y z= foo=bar |b: » `, expectResult: map[string]interface{}{ "a": map[string]string{ "x": "y", "foo": "bar", "z": "", }, }, }, { about: "don't mention hyphen if all entries are mandatory", form: form.Form{ Fields: environschema.Fields{ "a": environschema.Attr{ Description: "a description", Type: environschema.Tint, Mandatory: true, }, "b": environschema.Attr{ Description: "b description", Type: environschema.Tstring, Mandatory: true, }, }, }, expectIO: ` |Press return to select a default value. |a: »12 |b: »- `, expectResult: map[string]interface{}{ "a": 12, "b": "-", }, }, { about: "too many bad responses", form: form.Form{ Fields: environschema.Fields{ "a": environschema.Attr{ Description: "a description", Type: environschema.Tint, Mandatory: true, }, }, }, expectIO: ` |Press return to select a default value. |a: »one |Invalid input: expected number, got string("one") |a: » |Invalid input: expected number, got string("") |a: »three |Invalid input: expected number, got string("three") `, expectError: `cannot complete form: too many invalid inputs`, }, { about: "too many bad responses with maxtries=1", form: form.Form{ Fields: environschema.Fields{ "a": environschema.Attr{ Description: "a description", Type: environschema.Tint, }, }, }, filler: form.IOFiller{ MaxTries: 1, }, expectIO: ` |Press return to select a default value, or enter - to omit an entry. |a: »one |Invalid input: expected number, got string("one") `, expectError: `cannot complete form: too many invalid inputs`, }, { about: "bad then good input", form: form.Form{ Fields: environschema.Fields{ "a": environschema.Attr{ Description: "a description", Type: environschema.Tint, }, }, }, expectIO: ` |Press return to select a default value, or enter - to omit an entry. |a: »one |Invalid input: expected number, got string("one") |a: »two |Invalid input: expected number, got string("two") |a: »3 `, expectResult: map[string]interface{}{ "a": 3, }, }, { about: "empty value entered for optional attribute with no default", form: form.Form{ Fields: environschema.Fields{ "a": environschema.Attr{ Description: "a description", Type: environschema.Tstring, }, "b": environschema.Attr{ Description: "b description", Type: environschema.Tint, }, "c": environschema.Attr{ Description: "c description", Type: environschema.Tbool, }, }, }, expectIO: ` |Press return to select a default value, or enter - to omit an entry. |a: » |b: » |c: » `, expectResult: map[string]interface{}{}, }, { about: "unsupported type", form: form.Form{ Fields: environschema.Fields{ "a": environschema.Attr{ Description: "a description", Type: "bogus", }, }, }, expectError: `invalid field a: invalid type "bogus"`, }, { about: "no interaction is done if any field has an invalid type", form: form.Form{ Title: "some title", Fields: environschema.Fields{ "a": environschema.Attr{ Description: "a description", Type: environschema.Tstring, }, "b": environschema.Attr{ Description: "b description", Type: "bogus", }, }, }, expectError: `invalid field b: invalid type "bogus"`, }, { about: "invalid default value is ignored", environment: map[string]string{ "a": "three", }, form: form.Form{ Fields: environschema.Fields{ "a": environschema.Attr{ Description: "a description", Type: environschema.Tint, EnvVars: []string{"a"}, }, }, }, expectIO: ` |Press return to select a default value, or enter - to omit an entry. |Warning: invalid default value: cannot convert $a: expected number, got string("three") |a: »99 `, expectResult: map[string]interface{}{ "a": 99, }, }, { about: "entering a hyphen causes an optional value to be omitted", environment: map[string]string{ "a": "29", }, form: form.Form{ Fields: environschema.Fields{ "a": environschema.Attr{ Description: "a description", Type: environschema.Tint, EnvVar: "a", }, }, }, expectIO: ` |Press return to select a default value, or enter - to omit an entry. |a [29]: »- |Value a omitted. `, expectResult: map[string]interface{}{}, }, { about: "entering a hyphen causes a mandatory value to be fail when there are other optional values", form: form.Form{ Fields: environschema.Fields{ "a": environschema.Attr{ Description: "a description", Type: environschema.Tint, Mandatory: true, }, "b": environschema.Attr{ Description: "b description", Type: environschema.Tint, }, }, }, expectIO: ` |Press return to select a default value, or enter - to omit an entry. |a: »- |Cannot omit a because it is mandatory. |a: »123 |b: »99 `, expectResult: map[string]interface{}{ "a": 123, "b": 99, }, }, { about: "descriptions can be enabled with ShowDescriptions", form: form.Form{ Fields: environschema.Fields{ "a": environschema.Attr{ Description: " The a attribute\nis pretty boring.\n\n", Type: environschema.Tstring, Mandatory: true, }, "b": environschema.Attr{ Type: environschema.Tint, }, }, }, filler: form.IOFiller{ ShowDescriptions: true, }, expectIO: ` |Press return to select a default value, or enter - to omit an entry. | |The a attribute |is pretty boring. |a: »- |Cannot omit a because it is mandatory. |a: »value |b: »99 `, expectResult: map[string]interface{}{ "a": "value", "b": 99, }, }, { about: "custom GetDefault value success", form: form.Form{ Fields: environschema.Fields{ "a": environschema.Attr{ Description: "a description", Type: environschema.Tstring, }, }, }, filler: form.IOFiller{ GetDefault: func(attr form.NamedAttr, checker schema.Checker) (interface{}, string, error) { return "hello", "", nil }, }, expectIO: ` |Press return to select a default value, or enter - to omit an entry. |a [hello]: » `, expectResult: map[string]interface{}{ "a": "hello", }, }, { about: "custom GetDefault value error", form: form.Form{ Fields: environschema.Fields{ "a": environschema.Attr{ Description: "a description", Type: environschema.Tstring, }, }, }, filler: form.IOFiller{ GetDefault: func(attr form.NamedAttr, checker schema.Checker) (interface{}, string, error) { return nil, "", errgo.New("some error") }, }, expectIO: ` |Press return to select a default value, or enter - to omit an entry. |Warning: invalid default value: some error |a: »value `, expectResult: map[string]interface{}{ "a": "value", }, }, { about: "custom GetDefault value with custom display", form: form.Form{ Fields: environschema.Fields{ "a": environschema.Attr{ Description: "a description", Type: environschema.Tint, }, }, }, filler: form.IOFiller{ GetDefault: func(attr form.NamedAttr, checker schema.Checker) (interface{}, string, error) { return 99, "ninety-nine", nil }, }, expectIO: ` |Press return to select a default value, or enter - to omit an entry. |a [ninety-nine]: » `, expectResult: map[string]interface{}{ "a": 99, }, }, { about: "custom GetDefault value with empty display and non-string type", form: form.Form{ Fields: environschema.Fields{ "a": environschema.Attr{ Description: "a description", Type: environschema.Tint, }, }, }, filler: form.IOFiller{ GetDefault: func(attr form.NamedAttr, checker schema.Checker) (interface{}, string, error) { return 99, "", nil }, }, expectIO: ` |Press return to select a default value, or enter - to omit an entry. |a [99]: » `, expectResult: map[string]interface{}{ "a": 99, }, }} func (s *formSuite) TestIOFiller(c *gc.C) { for i, test := range ioFillerTests { func() { c.Logf("%d. %s", i, test.about) for k, v := range test.environment { defer testing.PatchEnvironment(k, v)() } ioChecker := newInteractionChecker(c, "»", strings.TrimPrefix(unbeautify(test.expectIO), "\n")) ioFiller := test.filler ioFiller.In = ioChecker ioFiller.Out = ioChecker result, err := ioFiller.Fill(test.form) if test.expectError != "" { c.Assert(err, gc.ErrorMatches, test.expectError) c.Assert(result, gc.IsNil) } else { ioChecker.Close() c.Assert(err, gc.IsNil) c.Assert(result, jc.DeepEquals, test.expectResult) } }() } } func (s *formSuite) TestIOFillerReadError(c *gc.C) { r := errorReader{} var out bytes.Buffer ioFiller := form.IOFiller{ In: r, Out: &out, } result, err := ioFiller.Fill(form.Form{ Fields: environschema.Fields{ "a": environschema.Attr{ Description: "a description", Type: environschema.Tstring, }, }, }) c.Check(out.String(), gc.Equals, "Press return to select a default value, or enter - to omit an entry.\na: ") c.Assert(err, gc.ErrorMatches, `cannot complete form: cannot read input: some read error`) c.Assert(result, gc.IsNil) // Verify that the cause is masked. Maybe it shouldn't // be, but test the code as it is. c.Assert(errgo.Cause(err), gc.Not(gc.Equals), errRead) } func (s *formSuite) TestIOFillerUnexpectedEOF(c *gc.C) { r := strings.NewReader("a") var out bytes.Buffer ioFiller := form.IOFiller{ In: r, Out: &out, } result, err := ioFiller.Fill(form.Form{ Fields: environschema.Fields{ "a": environschema.Attr{ Description: "a description", Type: environschema.Tstring, }, }, }) c.Check(out.String(), gc.Equals, "Press return to select a default value, or enter - to omit an entry.\na: ") c.Assert(err, gc.ErrorMatches, `cannot complete form: cannot read input: unexpected EOF`) c.Assert(result, gc.IsNil) } func (s *formSuite) TestSortedFields(c *gc.C) { fields := environschema.Fields{ "a1": environschema.Attr{ Group: "A", Description: "a1 description", Type: environschema.Tstring, }, "c1": environschema.Attr{ Group: "A", Description: "c1 description", Type: environschema.Tstring, }, "b1": environschema.Attr{ Group: "A", Description: "b1 description", Type: environschema.Tstring, Secret: true, }, "a2": environschema.Attr{ Group: "B", Description: "a2 description", Type: environschema.Tstring, }, "c2": environschema.Attr{ Group: "B", Description: "c2 description", Type: environschema.Tstring, }, "b2": environschema.Attr{ Group: "B", Description: "b2 description", Type: environschema.Tstring, Secret: true, }, } c.Assert(form.SortedFields(fields), jc.DeepEquals, []form.NamedAttr{{ Name: "a1", Attr: environschema.Attr{ Group: "A", Description: "a1 description", Type: environschema.Tstring, }}, { Name: "c1", Attr: environschema.Attr{ Group: "A", Description: "c1 description", Type: environschema.Tstring, }}, { Name: "b1", Attr: environschema.Attr{ Group: "A", Description: "b1 description", Type: environschema.Tstring, Secret: true, }}, { Name: "a2", Attr: environschema.Attr{ Group: "B", Description: "a2 description", Type: environschema.Tstring, }}, { Name: "c2", Attr: environschema.Attr{ Group: "B", Description: "c2 description", Type: environschema.Tstring, }}, { Name: "b2", Attr: environschema.Attr{ Group: "B", Description: "b2 description", Type: environschema.Tstring, Secret: true, }, }}) } var errRead = errgo.New("some read error") type errorReader struct{} func (r errorReader) Read([]byte) (int, error) { return 0, errRead } var defaultFromEnvTests = []struct { about string environment map[string]string attr environschema.Attr expect interface{} expectError string }{{ about: "no envvars", attr: environschema.Attr{ EnvVar: "A", Type: environschema.Tstring, }, }, { about: "matching envvar", environment: map[string]string{ "A": "B", }, attr: environschema.Attr{ EnvVar: "A", Type: environschema.Tstring, }, expect: "B", }, { about: "matching envvars", environment: map[string]string{ "B": "C", }, attr: environschema.Attr{ EnvVar: "A", Type: environschema.Tstring, EnvVars: []string{"B"}, }, expect: "C", }, { about: "envvar takes priority", environment: map[string]string{ "A": "1", "B": "2", }, attr: environschema.Attr{ EnvVar: "A", Type: environschema.Tstring, EnvVars: []string{"B"}, }, expect: "1", }, { about: "cannot coerce", environment: map[string]string{ "A": "B", }, attr: environschema.Attr{ EnvVar: "A", Type: environschema.Tint, }, expectError: `cannot convert \$A: expected number, got string\("B"\)`, }} func (s *formSuite) TestDefaultFromEnv(c *gc.C) { for i, test := range defaultFromEnvTests { c.Logf("%d. %s", i, test.about) func() { for k, v := range test.environment { defer testing.PatchEnvironment(k, v)() } checker, err := test.attr.Checker() c.Assert(err, gc.IsNil) result, display, err := form.DefaultFromEnv(form.NamedAttr{ Name: "ignored", Attr: test.attr, }, checker) if test.expectError != "" { c.Assert(err, gc.ErrorMatches, test.expectError) c.Assert(display, gc.Equals, "") c.Assert(result, gc.Equals, nil) return } c.Assert(err, gc.IsNil) c.Assert(display, gc.Equals, "") c.Assert(result, gc.Equals, test.expect) }() } } // indentReplacer deletes tabs and | beautifier characters. var indentReplacer = strings.NewReplacer("\t", "", "|", "") // unbeautify strips the leading tabs and | characters that // we use to make the tests look nicer. func unbeautify(s string) string { return indentReplacer.Replace(s) } charm-2.1.1/src/gopkg.in/juju/environschema.v1/form/package_test.go0000664000175000017500000000032412672604577024204 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package form_test import ( "testing" gc "gopkg.in/check.v1" ) func TestPackage(t *testing.T) { gc.TestingT(t) } charm-2.1.1/src/gopkg.in/juju/environschema.v1/form/cmd/0000775000175000017500000000000012672604577021767 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/environschema.v1/form/cmd/formtest/0000775000175000017500000000000012672604577023632 5ustar marcomarcocharm-2.1.1/src/gopkg.in/juju/environschema.v1/form/cmd/formtest/main.go0000664000175000017500000000312312672604577025104 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package main import ( "encoding/json" "flag" "fmt" "os" "gopkg.in/juju/environschema.v1" "gopkg.in/juju/environschema.v1/form" ) var showDescriptions = flag.Bool("v", false, "show descriptions") func main() { flag.Parse() f := form.IOFiller{ ShowDescriptions: *showDescriptions, } fmt.Println(`formtest: This is a simple interactive test program for environschema forms. Expect the prompts to be as follows: e-mail [user@example.com]: name: password: PIN [****]: The entered values will be displayed at the end. `) os.Setenv("PIN", "1234") os.Setenv("EMAIL", "user@example.com") r, err := f.Fill(form.Form{ Title: "Test Form", Fields: environschema.Fields{ "name": environschema.Attr{ Description: "Your full name.", Type: environschema.Tstring, Mandatory: true, }, "email": environschema.Attr{ Description: "Your email address.", Type: environschema.Tstring, EnvVar: "EMAIL", }, "password": environschema.Attr{ Description: "Your very secret password.", Type: environschema.Tstring, Secret: true, Mandatory: true, }, "pin": environschema.Attr{ Description: "Some PIN that you have probably forgotten.", Type: environschema.Tint, EnvVar: "PIN", Secret: true, }, }}) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } b, err := json.MarshalIndent(r, "", "\t") if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } fmt.Println(string(b)) } charm-2.1.1/src/gopkg.in/juju/environschema.v1/fields.go0000664000175000017500000002022212672604577022054 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. // Package environschema implements a way to specify // configuration attributes for Juju environments. package environschema // import "gopkg.in/juju/environschema.v1" import ( "fmt" "reflect" "strings" "github.com/juju/errors" "github.com/juju/schema" "github.com/juju/utils/keyvalues" ) // What to do about reading content from paths? // Could just have a load of client-side special cases. // Fields holds a map from attribute name to // information about that attribute. type Fields map[string]Attr type Attr struct { // Description holds a human-readable description // of the attribute. Description string `json:"description"` // Type holds the type of the attribute value. Type FieldType `json:"type"` // Group holds the group that the attribute belongs to. // All attributes within a Fields that have the same Group // attribute are considered to be part of the same group. Group Group `json:"group"` // Immutable specifies whether the attribute cannot // be changed once set. Immutable bool // Mandatory specifies whether the attribute // must be provided. Mandatory bool `json:"mandatory,omitempty"` // Secret specifies whether the attribute should be // considered secret. Secret bool `json:"is-secret,omitempty"` // EnvVar holds the environment variable // that will be used to obtain the default value // if it isn't specified. EnvVar string `json:"env-var,omitempty"` // EnvVars holds additional environment // variables to be used if the value in EnvVar is // not available, from highest to lowest priority. EnvVars []string `json:"env-vars,omitempty"` // Example holds an example value for the attribute // that can be used to produce a plausible-looking // entry for the attribute without necessarily using // it as a default value. // // TODO if the example holds some special values, use // it as a template to generate initial random values // (for example for admin-password) ? Example interface{} `json:"example,omitempty"` // Values holds the set of all possible values of the attribute. Values []interface{} `json:"values,omitempty"` } // Checker returns a checker that can be used to coerce values into the // type of the attribute. Specifically, string is always supported for // any checker type. func (attr Attr) Checker() (schema.Checker, error) { checker := checkers[attr.Type] if checker == nil { return nil, fmt.Errorf("invalid type %q", attr.Type) } if len(attr.Values) == 0 { return checker, nil } return oneOfValues(checker, attr.Values) } // Group describes the grouping of attributes. type Group string // The following constants are the initially defined group values. const ( // JujuGroup groups attributes defined by Juju that may // not be specified by a user. JujuGroup Group = "juju" // EnvironGroup groups attributes that are defined across all // possible Juju environments. EnvironGroup Group = "environ" // AccountGroup groups attributes that define a user account // used by a provider. AccountGroup Group = "account" // ProviderGroup groups attributes defined by the provider // that are not account credentials. This is also the default // group. ProviderGroup Group = "" ) // FieldType describes the type of an attribute value. type FieldType string // The following constants are the possible type values. // The "canonical Go type" is the type that the will be // the result of a successful Coerce call. const ( // Tstring represents a string type. Its canonical Go type is string. Tstring FieldType = "string" // Tbool represents a boolean type. Its canonical Go type is bool. Tbool FieldType = "bool" // Tint represents an integer type. Its canonical Go type is int. Tint FieldType = "int" // Tattrs represents an attribute map. Its canonical Go type is map[string]string. Tattrs FieldType = "attrs" ) var checkers = map[FieldType]schema.Checker{ Tstring: schema.String(), Tbool: schema.Bool(), Tint: schema.ForceInt(), Tattrs: attrsChecker{}, } // Alternative possibilities to ValidationSchema to bear in mind for // the future: // func (s Fields) Checker() schema.Checker // func (s Fields) Validate(value map[string]interface{}) (v map[string] interface{}, extra []string, err error) // ValidationSchema returns values suitable for passing to // schema.FieldMap to create a schema.Checker that will validate the given fields. // It will return an error if the fields are invalid. // // The Defaults return value will contain entries for all non-mandatory // attributes set to schema.Omit. It is the responsibility of the // client to set any actual default values as required. func (s Fields) ValidationSchema() (schema.Fields, schema.Defaults, error) { fields := make(schema.Fields) defaults := make(schema.Defaults) for name, attr := range s { path := []string{name} checker, err := attr.Checker() if err != nil { return nil, nil, errors.Annotatef(err, "%s", mkPath(path)) } if !attr.Mandatory { defaults[name] = schema.Omit } fields[name] = checker } return fields, defaults, nil } // oneOfValues returns a checker that coerces its value // using the supplied checker, then checks that the // resulting value is equal to one of the given values. func oneOfValues(checker schema.Checker, values []interface{}) (schema.Checker, error) { cvalues := make([]interface{}, len(values)) for i, v := range values { cv, err := checker.Coerce(v, nil) if err != nil { return nil, fmt.Errorf("invalid enumerated value: %v", err) } cvalues[i] = cv } return oneOfValuesChecker{ vals: cvalues, checker: checker, }, nil } type oneOfValuesChecker struct { vals []interface{} checker schema.Checker } // Coerce implements schema.Checker.Coerce. func (c oneOfValuesChecker) Coerce(v interface{}, path []string) (interface{}, error) { v, err := c.checker.Coerce(v, path) if err != nil { return v, err } for _, allow := range c.vals { if allow == v { return v, nil } } return nil, fmt.Errorf("%sexpected one of %v, got %#v", pathPrefix(path), c.vals, v) } type attrsChecker struct{} var ( attrMapChecker = schema.Map(schema.String(), schema.String()) attrSliceChecker = schema.List(schema.String()) ) func (c attrsChecker) Coerce(v interface{}, path []string) (interface{}, error) { // TODO consider allowing only the map variant. switch reflect.TypeOf(v).Kind() { case reflect.String: s, err := schema.String().Coerce(v, path) if err != nil { return nil, errors.Mask(err) } result, err := keyvalues.Parse(strings.Fields(s.(string)), true) if err != nil { return nil, fmt.Errorf("%s%v", pathPrefix(path), err) } return result, nil case reflect.Slice: slice0, err := attrSliceChecker.Coerce(v, path) if err != nil { return nil, errors.Mask(err) } slice := slice0.([]interface{}) fields := make([]string, len(slice)) for i, f := range slice { fields[i] = f.(string) } result, err := keyvalues.Parse(fields, true) if err != nil { return nil, fmt.Errorf("%s%v", pathPrefix(path), err) } return result, nil case reflect.Map: imap0, err := attrMapChecker.Coerce(v, path) if err != nil { return nil, errors.Mask(err) } imap := imap0.(map[interface{}]interface{}) result := make(map[string]string) for k, v := range imap { result[k.(string)] = v.(string) } return result, nil default: return nil, errors.Errorf("%sunexpected type for value, got %T(%v)", pathPrefix(path), v, v) } } // pathPrefix returns an error message prefix holding // the concatenation of the path elements. If path // starts with a ".", the dot is omitted. func pathPrefix(path []string) string { if p := mkPath(path); p != "" { return p + ": " } return "" } // mkPath returns a string holding // the concatenation of the path elements. // If path starts with a ".", the dot is omitted. func mkPath(path []string) string { if len(path) == 0 { return "" } if path[0] == "." { return strings.Join(path[1:], "") } return strings.Join(path, "") } // ExampleYAML returns the fields formatted as a YAML // example, with non-mandatory fields commented out, // like the providers do currently. func (s Fields) ExampleYAML() []byte { panic("unimplemented") } charm-2.1.1/src/gopkg.in/juju/environschema.v1/README.md0000664000175000017500000000014612672604577021541 0ustar marcomarcoEnviron schema ============ This package allows the specification of Juju environment config schema. charm-2.1.1/src/gopkg.in/juju/environschema.v1/fields_test.go0000664000175000017500000001434712672604577023126 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package environschema_test import ( "github.com/juju/schema" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/environschema.v1" ) type suite struct{} var _ = gc.Suite(&suite{}) type valueTest struct { about string val interface{} expectError string expectVal interface{} } var validationSchemaTests = []struct { about string fields environschema.Fields expectError string tests []valueTest }{{ about: "regular schema", fields: environschema.Fields{ "stringvalue": { Type: environschema.Tstring, }, "mandatory-stringvalue": { Type: environschema.Tstring, Mandatory: true, }, "intvalue": { Type: environschema.Tint, }, "boolvalue": { Type: environschema.Tbool, }, "attrvalue": { Type: environschema.Tattrs, }, }, tests: []valueTest{{ about: "all fields ok", val: map[string]interface{}{ "stringvalue": "hello", "mandatory-stringvalue": "goodbye", "intvalue": 320.0, "boolvalue": true, "attrvalue": "a=b c=d", }, expectVal: map[string]interface{}{ "stringvalue": "hello", "intvalue": 320, "mandatory-stringvalue": "goodbye", "boolvalue": true, "attrvalue": map[string]string{"a": "b", "c": "d"}, }, }, { about: "non-mandatory fields missing", val: map[string]interface{}{ "mandatory-stringvalue": "goodbye", }, expectVal: map[string]interface{}{ "mandatory-stringvalue": "goodbye", }, }, { about: "wrong type for string", val: map[string]interface{}{ "stringvalue": 123, "mandatory-stringvalue": "goodbye", "intvalue": 0, "boolvalue": false, }, expectError: `stringvalue: expected string, got int\(123\)`, }, { about: "int value specified as string", val: map[string]interface{}{ "mandatory-stringvalue": "goodbye", "intvalue": "100", }, expectVal: map[string]interface{}{ "intvalue": 100, "mandatory-stringvalue": "goodbye", }, }, { about: "wrong type for int value", val: map[string]interface{}{ "mandatory-stringvalue": "goodbye", "intvalue": false, }, expectError: `intvalue: expected number, got bool\(false\)`, }, { about: "attr type specified as list", val: map[string]interface{}{ "mandatory-stringvalue": "goodbye", "attrvalue": []interface{}{"a=b", "c=d"}, }, expectVal: map[string]interface{}{ "mandatory-stringvalue": "goodbye", "attrvalue": map[string]string{"a": "b", "c": "d"}, }, }, { about: "attr type specified as map", val: map[string]interface{}{ "mandatory-stringvalue": "goodbye", "attrvalue": map[interface{}]interface{}{"a": "b", "c": "d"}, }, expectVal: map[string]interface{}{ "mandatory-stringvalue": "goodbye", "attrvalue": map[string]string{"a": "b", "c": "d"}, }, }, { about: "invalid attrs string value", val: map[string]interface{}{ "mandatory-stringvalue": "goodbye", "attrvalue": "a=b d f=gh", }, expectError: `attrvalue: expected "key=value", got "d"`, }, { about: "invalid attrs list value", val: map[string]interface{}{ "mandatory-stringvalue": "goodbye", "attrvalue": []interface{}{"a=b d", "f"}, }, expectError: `attrvalue: expected "key=value", got "f"`, }, { about: "attrs list element not coercable", val: map[string]interface{}{ "mandatory-stringvalue": "goodbye", "attrvalue": []interface{}{"a=b d", 123.45}, }, expectError: `attrvalue\[1\]: expected string, got float64\(123\.45\)`, }, { about: "attrs map element not coercable", val: map[string]interface{}{ "mandatory-stringvalue": "goodbye", "attrvalue": map[interface{}]interface{}{"a": 123, "c": "d"}, }, expectError: `attrvalue\.a: expected string, got int\(123\)`, }, { about: "unexpected attrs type", val: map[string]interface{}{ "mandatory-stringvalue": "goodbye", "attrvalue": 123.45, }, expectError: `attrvalue: unexpected type for value, got float64\(123\.45\)`, }}, }, { about: "enumerated values", fields: environschema.Fields{ "enumstring": { Type: environschema.Tstring, Values: []interface{}{"a", "b"}, }, "enumint": { Type: environschema.Tint, Values: []interface{}{10, "20"}, }, }, tests: []valueTest{{ about: "all fields ok", val: map[string]interface{}{ "enumstring": "a", "enumint": 20, }, expectVal: map[string]interface{}{ "enumstring": "a", "enumint": 20, }, }, { about: "string value not in values", val: map[string]interface{}{ "enumstring": "wrong", "enumint": 20, }, expectError: `enumstring: expected one of \[a b\], got "wrong"`, }, { about: "int value not in values", val: map[string]interface{}{ "enumstring": "b", "enumint": "5", }, expectError: `enumint: expected one of \[10 20\], got 5`, }, { about: "invalid type for string value", val: map[string]interface{}{ "enumstring": 123, "enumint": 10, }, expectError: `enumstring: expected string, got int\(123\)`, }, { about: "invalid type for int value", val: map[string]interface{}{ "enumstring": "b", "enumint": false, }, expectError: `enumint: expected number, got bool\(false\)`, }}, }, { about: "invalid value type", fields: environschema.Fields{ "stringvalue": { Type: "nontype", }, }, expectError: `stringvalue: invalid type "nontype"`, }} func (*suite) TestValidationSchema(c *gc.C) { for i, test := range validationSchemaTests { c.Logf("test %d: %s", i, test.about) sfields, sdefaults, err := test.fields.ValidationSchema() if test.expectError != "" { c.Assert(err, gc.ErrorMatches, test.expectError) continue } c.Assert(err, gc.IsNil) checker := schema.FieldMap(sfields, sdefaults) for j, vtest := range test.tests { c.Logf("- test %d: %s", j, vtest.about) val, err := checker.Coerce(vtest.val, nil) if vtest.expectError != "" { c.Assert(err, gc.ErrorMatches, vtest.expectError) continue } c.Assert(err, gc.IsNil) c.Assert(val, jc.DeepEquals, vtest.expectVal) } } } charm-2.1.1/src/gopkg.in/juju/environschema.v1/LICENCE0000664000175000017500000002150112672604577021245 0ustar marcomarcoAll files in this repository are licensed as follows. If you contribute to this repository, it is assumed that you license your contribution under the same license unless you state otherwise. All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. This software is licensed under the LGPLv3, included below. As a special exception to the GNU Lesser General Public License version 3 ("LGPL3"), the copyright holders of this Library give you permission to convey to a third party a Combined Work that links statically or dynamically to this Library without providing any Minimal Corresponding Source or Minimal Application Code as set out in 4d or providing the installation information set out in section 4e, provided that you comply with the other provisions of LGPL3 and provided that you meet, for the Application the terms and conditions of the license(s) which apply to the Application. Except as stated in this special exception, the provisions of LGPL3 will continue to comply in full to this Library. If you modify this Library, you may apply this exception to your version of this Library, but you are not obliged to do so. If you do not wish to do so, delete this exception statement from your version. This exception does not (and cannot) modify any license terms which apply to the Application, with which you must still comply. GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. charm-2.1.1/src/gopkg.in/juju/environschema.v1/package_test.go0000664000175000017500000000033512672604577023243 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package environschema_test import ( "testing" gc "gopkg.in/check.v1" ) func TestPackage(t *testing.T) { gc.TestingT(t) } charm-2.1.1/src/gopkg.in/juju/environschema.v1/sample_test.go0000664000175000017500000001524112672604577023133 0ustar marcomarco// Copyright 2015 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. package environschema_test import ( "bytes" "strings" gc "gopkg.in/check.v1" "gopkg.in/juju/environschema.v1" ) type sampleSuite struct{} var _ = gc.Suite(&sampleSuite{}) var sampleYAMLTests = []struct { about string indent int attrs map[string]interface{} fields environschema.Fields expect string }{{ about: "simple values, all attributes specified", attrs: map[string]interface{}{ "foo": "foovalue", "bar": 1243, "baz": false, "attrs": map[string]string{ "arble": "bletch", "hello": "goodbye", }, }, fields: environschema.Fields{ "foo": { Type: environschema.Tstring, Description: "foo is a string.", }, "bar": { Type: environschema.Tint, Description: "bar is a number.\nWith a long description that contains newlines. And quite a bit more text that will be folded because it is longer than 80 characters.", }, "baz": { Type: environschema.Tbool, Description: "baz is a bool.", }, "attrs": { Type: environschema.Tattrs, Description: "attrs is an attribute list", }, }, expect: ` |# attrs is an attribute list |# |attrs: | arble: bletch | hello: goodbye | |# bar is a number. With a long description that contains newlines. And quite a |# bit more text that will be folded because it is longer than 80 characters. |# |bar: 1243 | |# baz is a bool. |# |baz: false | |# foo is a string. |# |foo: foovalue `, }, { about: "when a value is not specified, it's commented out", attrs: map[string]interface{}{ "foo": "foovalue", }, fields: environschema.Fields{ "foo": { Type: environschema.Tstring, Description: "foo is a string.", }, "bar": { Type: environschema.Tint, Description: "bar is a number.", Example: 1243, }, }, expect: ` |# bar is a number. |# |# bar: 1243 | |# foo is a string. |# |foo: foovalue `, }, { about: "environment variables are mentioned as defaults", attrs: map[string]interface{}{ "bar": 1324, "baz": true, "foo": "foovalue", }, fields: environschema.Fields{ "bar": { Type: environschema.Tint, Description: "bar is a number.", EnvVars: []string{"BAR_VAL", "ALT_BAR_VAL"}, }, "baz": { Type: environschema.Tbool, Description: "baz is a bool.", EnvVar: "BAZ_VAL", EnvVars: []string{"ALT_BAZ_VAL", "ALT2_BAZ_VAL"}, }, "foo": { Type: environschema.Tstring, Description: "foo is a string.", EnvVar: "FOO_VAL", }, }, expect: ` |# bar is a number. |# |# Default value taken from $BAR_VAL or $ALT_BAR_VAL. |# |bar: 1324 | |# baz is a bool. |# |# Default value taken from $BAZ_VAL, $ALT_BAZ_VAL or $ALT2_BAZ_VAL. |# |baz: true | |# foo is a string. |# |# Default value taken from $FOO_VAL. |# |foo: foovalue `, }, { about: "sorted by attribute group (provider, account, environ, other), then alphabetically", fields: environschema.Fields{ "baz": { Type: environschema.Tbool, Description: "baz is a bool.", Group: environschema.ProviderGroup, }, "zaphod": { Type: environschema.Tstring, Group: environschema.ProviderGroup, }, "bar": { Type: environschema.Tint, Description: "bar is a number.", Group: environschema.AccountGroup, }, "foo": { Type: environschema.Tstring, Description: "foo is a string.", Group: environschema.AccountGroup, }, "alpha": { Type: environschema.Tstring, Group: environschema.EnvironGroup, }, "bravo": { Type: environschema.Tstring, Group: environschema.EnvironGroup, }, "charlie": { Type: environschema.Tstring, Group: "unknown", }, "delta": { Type: environschema.Tstring, Group: "unknown", }, }, expect: ` |# baz is a bool. |# |# baz: false | |# zaphod: "" | |# bar is a number. |# |# bar: 0 | |# foo is a string. |# |# foo: "" | |# alpha: "" | |# bravo: "" | |# charlie: "" | |# delta: "" `, }, { about: "example value is used when possible; zero value otherwise", fields: environschema.Fields{ "intval-with-example": { Type: environschema.Tint, Example: 999, }, "intval": { Type: environschema.Tint, }, "boolval": { Type: environschema.Tbool, }, "attrsval": { Type: environschema.Tattrs, }, }, expect: ` |# attrsval: |# example: value | |# boolval: false | |# intval: 0 | |# intval-with-example: 999 `, }, { about: "secret values are marked as secret/immutable", fields: environschema.Fields{ "a": { Type: environschema.Tbool, Description: "With a description", Secret: true, }, "b": { Type: environschema.Tstring, Secret: true, }, "c": { Type: environschema.Tstring, Secret: true, Description: "With a description", EnvVar: "VAR", }, "d": { Type: environschema.Tstring, Immutable: true, }, "e": { Type: environschema.Tstring, Immutable: true, Secret: true, }, }, expect: ` |# With a description |# |# This attribute is considered secret. |# |# a: false | |# This attribute is considered secret. |# |# b: "" | |# With a description |# |# Default value taken from $VAR. |# |# This attribute is considered secret. |# |# c: "" | |# This attribute is immutable. |# |# d: "" | |# This attribute is immutable and considered secret. |# |# e: "" `, }} func (*sampleSuite) TestSampleYAML(c *gc.C) { for i, test := range sampleYAMLTests { c.Logf("test %d. %s\n", i, test.about) var buf bytes.Buffer err := environschema.SampleYAML(&buf, 0, test.attrs, test.fields) c.Assert(err, gc.IsNil) diff(c, buf.String(), unbeautify(test.expect[1:])) } } // indentReplacer deletes tabs and | beautifier characters. var indentReplacer = strings.NewReplacer("\t", "", "|", "") // unbeautify strips the leading tabs and | characters that // we use to make the tests look nicer. func unbeautify(s string) string { return indentReplacer.Replace(s) } func diff(c *gc.C, have, want string) { // Final sanity check in case the below logic is flawed. defer c.Check(have, gc.Equals, want) haveLines := strings.Split(have, "\n") wantLines := strings.Split(want, "\n") for i, wantLine := range wantLines { if i >= len(haveLines) { c.Errorf("have too few lines from line %d, %s", i+1, wantLine) return } haveLine := haveLines[i] if !c.Check(haveLine, gc.Equals, wantLine, gc.Commentf("line %d", i+1)) { return } } if len(haveLines) > len(wantLines) { c.Errorf("have too many lines from line %d, %s", len(wantLines), haveLines[len(wantLines)]) return } } charm-2.1.1/src/gopkg.in/yaml.v1/0000775000175000017500000000000012672604524015375 5ustar marcomarcocharm-2.1.1/src/gopkg.in/yaml.v1/decode.go0000664000175000017500000003075312672604524017157 0ustar marcomarcopackage yaml import ( "encoding/base64" "fmt" "reflect" "strconv" "time" ) const ( documentNode = 1 << iota mappingNode sequenceNode scalarNode aliasNode ) type node struct { kind int line, column int tag string value string implicit bool children []*node anchors map[string]*node } // ---------------------------------------------------------------------------- // Parser, produces a node tree out of a libyaml event stream. type parser struct { parser yaml_parser_t event yaml_event_t doc *node } func newParser(b []byte) *parser { p := parser{} if !yaml_parser_initialize(&p.parser) { panic("Failed to initialize YAML emitter") } if len(b) == 0 { b = []byte{'\n'} } yaml_parser_set_input_string(&p.parser, b) p.skip() if p.event.typ != yaml_STREAM_START_EVENT { panic("Expected stream start event, got " + strconv.Itoa(int(p.event.typ))) } p.skip() return &p } func (p *parser) destroy() { if p.event.typ != yaml_NO_EVENT { yaml_event_delete(&p.event) } yaml_parser_delete(&p.parser) } func (p *parser) skip() { if p.event.typ != yaml_NO_EVENT { if p.event.typ == yaml_STREAM_END_EVENT { fail("Attempted to go past the end of stream. Corrupted value?") } yaml_event_delete(&p.event) } if !yaml_parser_parse(&p.parser, &p.event) { p.fail() } } func (p *parser) fail() { var where string var line int if p.parser.problem_mark.line != 0 { line = p.parser.problem_mark.line } else if p.parser.context_mark.line != 0 { line = p.parser.context_mark.line } if line != 0 { where = "line " + strconv.Itoa(line) + ": " } var msg string if len(p.parser.problem) > 0 { msg = p.parser.problem } else { msg = "Unknown problem parsing YAML content" } fail(where + msg) } func (p *parser) anchor(n *node, anchor []byte) { if anchor != nil { p.doc.anchors[string(anchor)] = n } } func (p *parser) parse() *node { switch p.event.typ { case yaml_SCALAR_EVENT: return p.scalar() case yaml_ALIAS_EVENT: return p.alias() case yaml_MAPPING_START_EVENT: return p.mapping() case yaml_SEQUENCE_START_EVENT: return p.sequence() case yaml_DOCUMENT_START_EVENT: return p.document() case yaml_STREAM_END_EVENT: // Happens when attempting to decode an empty buffer. return nil default: panic("Attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) } panic("unreachable") } func (p *parser) node(kind int) *node { return &node{ kind: kind, line: p.event.start_mark.line, column: p.event.start_mark.column, } } func (p *parser) document() *node { n := p.node(documentNode) n.anchors = make(map[string]*node) p.doc = n p.skip() n.children = append(n.children, p.parse()) if p.event.typ != yaml_DOCUMENT_END_EVENT { panic("Expected end of document event but got " + strconv.Itoa(int(p.event.typ))) } p.skip() return n } func (p *parser) alias() *node { n := p.node(aliasNode) n.value = string(p.event.anchor) p.skip() return n } func (p *parser) scalar() *node { n := p.node(scalarNode) n.value = string(p.event.value) n.tag = string(p.event.tag) n.implicit = p.event.implicit p.anchor(n, p.event.anchor) p.skip() return n } func (p *parser) sequence() *node { n := p.node(sequenceNode) p.anchor(n, p.event.anchor) p.skip() for p.event.typ != yaml_SEQUENCE_END_EVENT { n.children = append(n.children, p.parse()) } p.skip() return n } func (p *parser) mapping() *node { n := p.node(mappingNode) p.anchor(n, p.event.anchor) p.skip() for p.event.typ != yaml_MAPPING_END_EVENT { n.children = append(n.children, p.parse(), p.parse()) } p.skip() return n } // ---------------------------------------------------------------------------- // Decoder, unmarshals a node into a provided value. type decoder struct { doc *node aliases map[string]bool } func newDecoder() *decoder { d := &decoder{} d.aliases = make(map[string]bool) return d } // d.setter deals with setters and pointer dereferencing and initialization. // // It's a slightly convoluted case to handle properly: // // - nil pointers should be initialized, unless being set to nil // - we don't know at this point yet what's the value to SetYAML() with. // - we can't separate pointer deref/init and setter checking, because // a setter may be found while going down a pointer chain. // // Thus, here is how it takes care of it: // // - out is provided as a pointer, so that it can be replaced. // - when looking at a non-setter ptr, *out=ptr.Elem(), unless tag=!!null // - when a setter is found, *out=interface{}, and a set() function is // returned to call SetYAML() with the value of *out once it's defined. // func (d *decoder) setter(tag string, out *reflect.Value, good *bool) (set func()) { if (*out).Kind() != reflect.Ptr && (*out).CanAddr() { setter, _ := (*out).Addr().Interface().(Setter) if setter != nil { var arg interface{} *out = reflect.ValueOf(&arg).Elem() return func() { *good = setter.SetYAML(shortTag(tag), arg) } } } again := true for again { again = false setter, _ := (*out).Interface().(Setter) if tag != yaml_NULL_TAG || setter != nil { if pv := (*out); pv.Kind() == reflect.Ptr { if pv.IsNil() { *out = reflect.New(pv.Type().Elem()).Elem() pv.Set((*out).Addr()) } else { *out = pv.Elem() } setter, _ = pv.Interface().(Setter) again = true } } if setter != nil { var arg interface{} *out = reflect.ValueOf(&arg).Elem() return func() { *good = setter.SetYAML(shortTag(tag), arg) } } } return nil } func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { switch n.kind { case documentNode: good = d.document(n, out) case scalarNode: good = d.scalar(n, out) case aliasNode: good = d.alias(n, out) case mappingNode: good = d.mapping(n, out) case sequenceNode: good = d.sequence(n, out) default: panic("Internal error: unknown node kind: " + strconv.Itoa(n.kind)) } return } func (d *decoder) document(n *node, out reflect.Value) (good bool) { if len(n.children) == 1 { d.doc = n d.unmarshal(n.children[0], out) return true } return false } func (d *decoder) alias(n *node, out reflect.Value) (good bool) { an, ok := d.doc.anchors[n.value] if !ok { fail("Unknown anchor '" + n.value + "' referenced") } if d.aliases[n.value] { fail("Anchor '" + n.value + "' value contains itself") } d.aliases[n.value] = true good = d.unmarshal(an, out) delete(d.aliases, n.value) return good } var zeroValue reflect.Value func resetMap(out reflect.Value) { for _, k := range out.MapKeys() { out.SetMapIndex(k, zeroValue) } } var durationType = reflect.TypeOf(time.Duration(0)) func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { var tag string var resolved interface{} if n.tag == "" && !n.implicit { tag = yaml_STR_TAG resolved = n.value } else { tag, resolved = resolve(n.tag, n.value) if tag == yaml_BINARY_TAG { data, err := base64.StdEncoding.DecodeString(resolved.(string)) if err != nil { fail("!!binary value contains invalid base64 data") } resolved = string(data) } } if set := d.setter(tag, &out, &good); set != nil { defer set() } if resolved == nil { if out.Kind() == reflect.Map && !out.CanAddr() { resetMap(out) } else { out.Set(reflect.Zero(out.Type())) } good = true return } switch out.Kind() { case reflect.String: if tag == yaml_BINARY_TAG { out.SetString(resolved.(string)) good = true } else if resolved != nil { out.SetString(n.value) good = true } case reflect.Interface: if resolved == nil { out.Set(reflect.Zero(out.Type())) } else { out.Set(reflect.ValueOf(resolved)) } good = true case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: switch resolved := resolved.(type) { case int: if !out.OverflowInt(int64(resolved)) { out.SetInt(int64(resolved)) good = true } case int64: if !out.OverflowInt(resolved) { out.SetInt(resolved) good = true } case float64: if resolved < 1<<63-1 && !out.OverflowInt(int64(resolved)) { out.SetInt(int64(resolved)) good = true } case string: if out.Type() == durationType { d, err := time.ParseDuration(resolved) if err == nil { out.SetInt(int64(d)) good = true } } } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: switch resolved := resolved.(type) { case int: if resolved >= 0 { out.SetUint(uint64(resolved)) good = true } case int64: if resolved >= 0 { out.SetUint(uint64(resolved)) good = true } case float64: if resolved < 1<<64-1 && !out.OverflowUint(uint64(resolved)) { out.SetUint(uint64(resolved)) good = true } } case reflect.Bool: switch resolved := resolved.(type) { case bool: out.SetBool(resolved) good = true } case reflect.Float32, reflect.Float64: switch resolved := resolved.(type) { case int: out.SetFloat(float64(resolved)) good = true case int64: out.SetFloat(float64(resolved)) good = true case float64: out.SetFloat(resolved) good = true } case reflect.Ptr: if out.Type().Elem() == reflect.TypeOf(resolved) { elem := reflect.New(out.Type().Elem()) elem.Elem().Set(reflect.ValueOf(resolved)) out.Set(elem) good = true } } return good } func settableValueOf(i interface{}) reflect.Value { v := reflect.ValueOf(i) sv := reflect.New(v.Type()).Elem() sv.Set(v) return sv } func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { if set := d.setter(yaml_SEQ_TAG, &out, &good); set != nil { defer set() } var iface reflect.Value if out.Kind() == reflect.Interface { // No type hints. Will have to use a generic sequence. iface = out out = settableValueOf(make([]interface{}, 0)) } if out.Kind() != reflect.Slice { return false } et := out.Type().Elem() l := len(n.children) for i := 0; i < l; i++ { e := reflect.New(et).Elem() if ok := d.unmarshal(n.children[i], e); ok { out.Set(reflect.Append(out, e)) } } if iface.IsValid() { iface.Set(out) } return true } func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { if set := d.setter(yaml_MAP_TAG, &out, &good); set != nil { defer set() } if out.Kind() == reflect.Struct { return d.mappingStruct(n, out) } if out.Kind() == reflect.Interface { // No type hints. Will have to use a generic map. iface := out out = settableValueOf(make(map[interface{}]interface{})) iface.Set(out) } if out.Kind() != reflect.Map { return false } outt := out.Type() kt := outt.Key() et := outt.Elem() if out.IsNil() { out.Set(reflect.MakeMap(outt)) } l := len(n.children) for i := 0; i < l; i += 2 { if isMerge(n.children[i]) { d.merge(n.children[i+1], out) continue } k := reflect.New(kt).Elem() if d.unmarshal(n.children[i], k) { kkind := k.Kind() if kkind == reflect.Interface { kkind = k.Elem().Kind() } if kkind == reflect.Map || kkind == reflect.Slice { fail(fmt.Sprintf("invalid map key: %#v", k.Interface())) } e := reflect.New(et).Elem() if d.unmarshal(n.children[i+1], e) { out.SetMapIndex(k, e) } } } return true } func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { sinfo, err := getStructInfo(out.Type()) if err != nil { panic(err) } name := settableValueOf("") l := len(n.children) for i := 0; i < l; i += 2 { ni := n.children[i] if isMerge(ni) { d.merge(n.children[i+1], out) continue } if !d.unmarshal(ni, name) { continue } if info, ok := sinfo.FieldsMap[name.String()]; ok { var field reflect.Value if info.Inline == nil { field = out.Field(info.Num) } else { field = out.FieldByIndex(info.Inline) } d.unmarshal(n.children[i+1], field) } } return true } func (d *decoder) merge(n *node, out reflect.Value) { const wantMap = "map merge requires map or sequence of maps as the value" switch n.kind { case mappingNode: d.unmarshal(n, out) case aliasNode: an, ok := d.doc.anchors[n.value] if ok && an.kind != mappingNode { fail(wantMap) } d.unmarshal(n, out) case sequenceNode: // Step backwards as earlier nodes take precedence. for i := len(n.children) - 1; i >= 0; i-- { ni := n.children[i] if ni.kind == aliasNode { an, ok := d.doc.anchors[ni.value] if ok && an.kind != mappingNode { fail(wantMap) } } else if ni.kind != mappingNode { fail(wantMap) } d.unmarshal(ni, out) } default: fail(wantMap) } } func isMerge(n *node) bool { return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) } charm-2.1.1/src/gopkg.in/yaml.v1/emitterc.go0000664000175000017500000013037412672604524017550 0ustar marcomarcopackage yaml import ( "bytes" ) // Flush the buffer if needed. func flush(emitter *yaml_emitter_t) bool { if emitter.buffer_pos+5 >= len(emitter.buffer) { return yaml_emitter_flush(emitter) } return true } // Put a character to the output buffer. func put(emitter *yaml_emitter_t, value byte) bool { if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { return false } emitter.buffer[emitter.buffer_pos] = value emitter.buffer_pos++ emitter.column++ return true } // Put a line break to the output buffer. func put_break(emitter *yaml_emitter_t) bool { if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { return false } switch emitter.line_break { case yaml_CR_BREAK: emitter.buffer[emitter.buffer_pos] = '\r' emitter.buffer_pos += 1 case yaml_LN_BREAK: emitter.buffer[emitter.buffer_pos] = '\n' emitter.buffer_pos += 1 case yaml_CRLN_BREAK: emitter.buffer[emitter.buffer_pos+0] = '\r' emitter.buffer[emitter.buffer_pos+1] = '\n' emitter.buffer_pos += 2 default: panic("unknown line break setting") } emitter.column = 0 emitter.line++ return true } // Copy a character from a string into buffer. func write(emitter *yaml_emitter_t, s []byte, i *int) bool { if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { return false } p := emitter.buffer_pos w := width(s[*i]) switch w { case 4: emitter.buffer[p+3] = s[*i+3] fallthrough case 3: emitter.buffer[p+2] = s[*i+2] fallthrough case 2: emitter.buffer[p+1] = s[*i+1] fallthrough case 1: emitter.buffer[p+0] = s[*i+0] default: panic("unknown character width") } emitter.column++ emitter.buffer_pos += w *i += w return true } // Write a whole string into buffer. func write_all(emitter *yaml_emitter_t, s []byte) bool { for i := 0; i < len(s); { if !write(emitter, s, &i) { return false } } return true } // Copy a line break character from a string into buffer. func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { if s[*i] == '\n' { if !put_break(emitter) { return false } *i++ } else { if !write(emitter, s, i) { return false } emitter.column = 0 emitter.line++ } return true } // Set an emitter error and return false. func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { emitter.error = yaml_EMITTER_ERROR emitter.problem = problem return false } // Emit an event. func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { emitter.events = append(emitter.events, *event) for !yaml_emitter_need_more_events(emitter) { event := &emitter.events[emitter.events_head] if !yaml_emitter_analyze_event(emitter, event) { return false } if !yaml_emitter_state_machine(emitter, event) { return false } yaml_event_delete(event) emitter.events_head++ } return true } // Check if we need to accumulate more events before emitting. // // We accumulate extra // - 1 event for DOCUMENT-START // - 2 events for SEQUENCE-START // - 3 events for MAPPING-START // func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { if emitter.events_head == len(emitter.events) { return true } var accumulate int switch emitter.events[emitter.events_head].typ { case yaml_DOCUMENT_START_EVENT: accumulate = 1 break case yaml_SEQUENCE_START_EVENT: accumulate = 2 break case yaml_MAPPING_START_EVENT: accumulate = 3 break default: return false } if len(emitter.events)-emitter.events_head > accumulate { return false } var level int for i := emitter.events_head; i < len(emitter.events); i++ { switch emitter.events[i].typ { case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: level++ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: level-- } if level == 0 { return false } } return true } // Append a directive to the directives stack. func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { for i := 0; i < len(emitter.tag_directives); i++ { if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { if allow_duplicates { return true } return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") } } // [Go] Do we actually need to copy this given garbage collection // and the lack of deallocating destructors? tag_copy := yaml_tag_directive_t{ handle: make([]byte, len(value.handle)), prefix: make([]byte, len(value.prefix)), } copy(tag_copy.handle, value.handle) copy(tag_copy.prefix, value.prefix) emitter.tag_directives = append(emitter.tag_directives, tag_copy) return true } // Increase the indentation level. func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { emitter.indents = append(emitter.indents, emitter.indent) if emitter.indent < 0 { if flow { emitter.indent = emitter.best_indent } else { emitter.indent = 0 } } else if !indentless { emitter.indent += emitter.best_indent } return true } // State dispatcher. func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { switch emitter.state { default: case yaml_EMIT_STREAM_START_STATE: return yaml_emitter_emit_stream_start(emitter, event) case yaml_EMIT_FIRST_DOCUMENT_START_STATE: return yaml_emitter_emit_document_start(emitter, event, true) case yaml_EMIT_DOCUMENT_START_STATE: return yaml_emitter_emit_document_start(emitter, event, false) case yaml_EMIT_DOCUMENT_CONTENT_STATE: return yaml_emitter_emit_document_content(emitter, event) case yaml_EMIT_DOCUMENT_END_STATE: return yaml_emitter_emit_document_end(emitter, event) case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: return yaml_emitter_emit_flow_sequence_item(emitter, event, true) case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: return yaml_emitter_emit_flow_sequence_item(emitter, event, false) case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: return yaml_emitter_emit_flow_mapping_key(emitter, event, true) case yaml_EMIT_FLOW_MAPPING_KEY_STATE: return yaml_emitter_emit_flow_mapping_key(emitter, event, false) case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: return yaml_emitter_emit_flow_mapping_value(emitter, event, true) case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: return yaml_emitter_emit_flow_mapping_value(emitter, event, false) case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: return yaml_emitter_emit_block_sequence_item(emitter, event, true) case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: return yaml_emitter_emit_block_sequence_item(emitter, event, false) case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: return yaml_emitter_emit_block_mapping_key(emitter, event, true) case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: return yaml_emitter_emit_block_mapping_key(emitter, event, false) case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: return yaml_emitter_emit_block_mapping_value(emitter, event, true) case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: return yaml_emitter_emit_block_mapping_value(emitter, event, false) case yaml_EMIT_END_STATE: return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") } panic("invalid emitter state") } // Expect STREAM-START. func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { if event.typ != yaml_STREAM_START_EVENT { return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") } if emitter.encoding == yaml_ANY_ENCODING { emitter.encoding = event.encoding if emitter.encoding == yaml_ANY_ENCODING { emitter.encoding = yaml_UTF8_ENCODING } } if emitter.best_indent < 2 || emitter.best_indent > 9 { emitter.best_indent = 2 } if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { emitter.best_width = 80 } if emitter.best_width < 0 { emitter.best_width = 1<<31 - 1 } if emitter.line_break == yaml_ANY_BREAK { emitter.line_break = yaml_LN_BREAK } emitter.indent = -1 emitter.line = 0 emitter.column = 0 emitter.whitespace = true emitter.indention = true if emitter.encoding != yaml_UTF8_ENCODING { if !yaml_emitter_write_bom(emitter) { return false } } emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE return true } // Expect DOCUMENT-START or STREAM-END. func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { if event.typ == yaml_DOCUMENT_START_EVENT { if event.version_directive != nil { if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { return false } } for i := 0; i < len(event.tag_directives); i++ { tag_directive := &event.tag_directives[i] if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { return false } if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { return false } } for i := 0; i < len(default_tag_directives); i++ { tag_directive := &default_tag_directives[i] if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { return false } } implicit := event.implicit if !first || emitter.canonical { implicit = false } if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { return false } if !yaml_emitter_write_indent(emitter) { return false } } if event.version_directive != nil { implicit = false if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { return false } if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { return false } if !yaml_emitter_write_indent(emitter) { return false } } if len(event.tag_directives) > 0 { implicit = false for i := 0; i < len(event.tag_directives); i++ { tag_directive := &event.tag_directives[i] if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { return false } if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { return false } if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { return false } if !yaml_emitter_write_indent(emitter) { return false } } } if yaml_emitter_check_empty_document(emitter) { implicit = false } if !implicit { if !yaml_emitter_write_indent(emitter) { return false } if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { return false } if emitter.canonical { if !yaml_emitter_write_indent(emitter) { return false } } } emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE return true } if event.typ == yaml_STREAM_END_EVENT { if emitter.open_ended { if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { return false } if !yaml_emitter_write_indent(emitter) { return false } } if !yaml_emitter_flush(emitter) { return false } emitter.state = yaml_EMIT_END_STATE return true } return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") } // Expect the root node. func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) return yaml_emitter_emit_node(emitter, event, true, false, false, false) } // Expect DOCUMENT-END. func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { if event.typ != yaml_DOCUMENT_END_EVENT { return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") } if !yaml_emitter_write_indent(emitter) { return false } if !event.implicit { // [Go] Allocate the slice elsewhere. if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { return false } if !yaml_emitter_write_indent(emitter) { return false } } if !yaml_emitter_flush(emitter) { return false } emitter.state = yaml_EMIT_DOCUMENT_START_STATE emitter.tag_directives = emitter.tag_directives[:0] return true } // Expect a flow item node. func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { if first { if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { return false } if !yaml_emitter_increase_indent(emitter, true, false) { return false } emitter.flow_level++ } if event.typ == yaml_SEQUENCE_END_EVENT { emitter.flow_level-- emitter.indent = emitter.indents[len(emitter.indents)-1] emitter.indents = emitter.indents[:len(emitter.indents)-1] if emitter.canonical && !first { if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { return false } if !yaml_emitter_write_indent(emitter) { return false } } if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { return false } emitter.state = emitter.states[len(emitter.states)-1] emitter.states = emitter.states[:len(emitter.states)-1] return true } if !first { if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { return false } } if emitter.canonical || emitter.column > emitter.best_width { if !yaml_emitter_write_indent(emitter) { return false } } emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) return yaml_emitter_emit_node(emitter, event, false, true, false, false) } // Expect a flow key node. func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { if first { if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { return false } if !yaml_emitter_increase_indent(emitter, true, false) { return false } emitter.flow_level++ } if event.typ == yaml_MAPPING_END_EVENT { emitter.flow_level-- emitter.indent = emitter.indents[len(emitter.indents)-1] emitter.indents = emitter.indents[:len(emitter.indents)-1] if emitter.canonical && !first { if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { return false } if !yaml_emitter_write_indent(emitter) { return false } } if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { return false } emitter.state = emitter.states[len(emitter.states)-1] emitter.states = emitter.states[:len(emitter.states)-1] return true } if !first { if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { return false } } if emitter.canonical || emitter.column > emitter.best_width { if !yaml_emitter_write_indent(emitter) { return false } } if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) return yaml_emitter_emit_node(emitter, event, false, false, true, true) } if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { return false } emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) return yaml_emitter_emit_node(emitter, event, false, false, true, false) } // Expect a flow value node. func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { if simple { if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { return false } } else { if emitter.canonical || emitter.column > emitter.best_width { if !yaml_emitter_write_indent(emitter) { return false } } if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { return false } } emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) return yaml_emitter_emit_node(emitter, event, false, false, true, false) } // Expect a block item node. func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { if first { if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { return false } } if event.typ == yaml_SEQUENCE_END_EVENT { emitter.indent = emitter.indents[len(emitter.indents)-1] emitter.indents = emitter.indents[:len(emitter.indents)-1] emitter.state = emitter.states[len(emitter.states)-1] emitter.states = emitter.states[:len(emitter.states)-1] return true } if !yaml_emitter_write_indent(emitter) { return false } if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { return false } emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) return yaml_emitter_emit_node(emitter, event, false, true, false, false) } // Expect a block key node. func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { if first { if !yaml_emitter_increase_indent(emitter, false, false) { return false } } if event.typ == yaml_MAPPING_END_EVENT { emitter.indent = emitter.indents[len(emitter.indents)-1] emitter.indents = emitter.indents[:len(emitter.indents)-1] emitter.state = emitter.states[len(emitter.states)-1] emitter.states = emitter.states[:len(emitter.states)-1] return true } if !yaml_emitter_write_indent(emitter) { return false } if yaml_emitter_check_simple_key(emitter) { emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) return yaml_emitter_emit_node(emitter, event, false, false, true, true) } if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { return false } emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) return yaml_emitter_emit_node(emitter, event, false, false, true, false) } // Expect a block value node. func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { if simple { if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { return false } } else { if !yaml_emitter_write_indent(emitter) { return false } if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { return false } } emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) return yaml_emitter_emit_node(emitter, event, false, false, true, false) } // Expect a node. func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, root bool, sequence bool, mapping bool, simple_key bool) bool { emitter.root_context = root emitter.sequence_context = sequence emitter.mapping_context = mapping emitter.simple_key_context = simple_key switch event.typ { case yaml_ALIAS_EVENT: return yaml_emitter_emit_alias(emitter, event) case yaml_SCALAR_EVENT: return yaml_emitter_emit_scalar(emitter, event) case yaml_SEQUENCE_START_EVENT: return yaml_emitter_emit_sequence_start(emitter, event) case yaml_MAPPING_START_EVENT: return yaml_emitter_emit_mapping_start(emitter, event) default: return yaml_emitter_set_emitter_error(emitter, "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") } return false } // Expect ALIAS. func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { if !yaml_emitter_process_anchor(emitter) { return false } emitter.state = emitter.states[len(emitter.states)-1] emitter.states = emitter.states[:len(emitter.states)-1] return true } // Expect SCALAR. func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { if !yaml_emitter_select_scalar_style(emitter, event) { return false } if !yaml_emitter_process_anchor(emitter) { return false } if !yaml_emitter_process_tag(emitter) { return false } if !yaml_emitter_increase_indent(emitter, true, false) { return false } if !yaml_emitter_process_scalar(emitter) { return false } emitter.indent = emitter.indents[len(emitter.indents)-1] emitter.indents = emitter.indents[:len(emitter.indents)-1] emitter.state = emitter.states[len(emitter.states)-1] emitter.states = emitter.states[:len(emitter.states)-1] return true } // Expect SEQUENCE-START. func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { if !yaml_emitter_process_anchor(emitter) { return false } if !yaml_emitter_process_tag(emitter) { return false } if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || yaml_emitter_check_empty_sequence(emitter) { emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE } else { emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE } return true } // Expect MAPPING-START. func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { if !yaml_emitter_process_anchor(emitter) { return false } if !yaml_emitter_process_tag(emitter) { return false } if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || yaml_emitter_check_empty_mapping(emitter) { emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE } else { emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE } return true } // Check if the document content is an empty scalar. func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { return false // [Go] Huh? } // Check if the next events represent an empty sequence. func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { if len(emitter.events)-emitter.events_head < 2 { return false } return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT } // Check if the next events represent an empty mapping. func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { if len(emitter.events)-emitter.events_head < 2 { return false } return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT } // Check if the next node can be expressed as a simple key. func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { length := 0 switch emitter.events[emitter.events_head].typ { case yaml_ALIAS_EVENT: length += len(emitter.anchor_data.anchor) case yaml_SCALAR_EVENT: if emitter.scalar_data.multiline { return false } length += len(emitter.anchor_data.anchor) + len(emitter.tag_data.handle) + len(emitter.tag_data.suffix) + len(emitter.scalar_data.value) case yaml_SEQUENCE_START_EVENT: if !yaml_emitter_check_empty_sequence(emitter) { return false } length += len(emitter.anchor_data.anchor) + len(emitter.tag_data.handle) + len(emitter.tag_data.suffix) case yaml_MAPPING_START_EVENT: if !yaml_emitter_check_empty_mapping(emitter) { return false } length += len(emitter.anchor_data.anchor) + len(emitter.tag_data.handle) + len(emitter.tag_data.suffix) default: return false } return length <= 128 } // Determine an acceptable scalar style. func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 if no_tag && !event.implicit && !event.quoted_implicit { return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") } style := event.scalar_style() if style == yaml_ANY_SCALAR_STYLE { style = yaml_PLAIN_SCALAR_STYLE } if emitter.canonical { style = yaml_DOUBLE_QUOTED_SCALAR_STYLE } if emitter.simple_key_context && emitter.scalar_data.multiline { style = yaml_DOUBLE_QUOTED_SCALAR_STYLE } if style == yaml_PLAIN_SCALAR_STYLE { if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { style = yaml_SINGLE_QUOTED_SCALAR_STYLE } if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { style = yaml_SINGLE_QUOTED_SCALAR_STYLE } if no_tag && !event.implicit { style = yaml_SINGLE_QUOTED_SCALAR_STYLE } } if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { if !emitter.scalar_data.single_quoted_allowed { style = yaml_DOUBLE_QUOTED_SCALAR_STYLE } } if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { style = yaml_DOUBLE_QUOTED_SCALAR_STYLE } } if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { emitter.tag_data.handle = []byte{'!'} } emitter.scalar_data.style = style return true } // Write an achor. func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { if emitter.anchor_data.anchor == nil { return true } c := []byte{'&'} if emitter.anchor_data.alias { c[0] = '*' } if !yaml_emitter_write_indicator(emitter, c, true, false, false) { return false } return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) } // Write a tag. func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { return true } if len(emitter.tag_data.handle) > 0 { if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { return false } if len(emitter.tag_data.suffix) > 0 { if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { return false } } } else { // [Go] Allocate these slices elsewhere. if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { return false } if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { return false } if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { return false } } return true } // Write a scalar. func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { switch emitter.scalar_data.style { case yaml_PLAIN_SCALAR_STYLE: return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) case yaml_SINGLE_QUOTED_SCALAR_STYLE: return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) case yaml_DOUBLE_QUOTED_SCALAR_STYLE: return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) case yaml_LITERAL_SCALAR_STYLE: return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) case yaml_FOLDED_SCALAR_STYLE: return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) } panic("unknown scalar style") } // Check if a %YAML directive is valid. func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { if version_directive.major != 1 || version_directive.minor != 1 { return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") } return true } // Check if a %TAG directive is valid. func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { handle := tag_directive.handle prefix := tag_directive.prefix if len(handle) == 0 { return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") } if handle[0] != '!' { return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") } if handle[len(handle)-1] != '!' { return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") } for i := 1; i < len(handle)-1; i += width(handle[i]) { if !is_alpha(handle, i) { return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") } } if len(prefix) == 0 { return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") } return true } // Check if an anchor is valid. func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { if len(anchor) == 0 { problem := "anchor value must not be empty" if alias { problem = "alias value must not be empty" } return yaml_emitter_set_emitter_error(emitter, problem) } for i := 0; i < len(anchor); i += width(anchor[i]) { if !is_alpha(anchor, i) { problem := "anchor value must contain alphanumerical characters only" if alias { problem = "alias value must contain alphanumerical characters only" } return yaml_emitter_set_emitter_error(emitter, problem) } } emitter.anchor_data.anchor = anchor emitter.anchor_data.alias = alias return true } // Check if a tag is valid. func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { if len(tag) == 0 { return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") } for i := 0; i < len(emitter.tag_directives); i++ { tag_directive := &emitter.tag_directives[i] if bytes.HasPrefix(tag, tag_directive.prefix) { emitter.tag_data.handle = tag_directive.handle emitter.tag_data.suffix = tag[len(tag_directive.prefix):] return true } } emitter.tag_data.suffix = tag return true } // Check if a scalar is valid. func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { var ( block_indicators = false flow_indicators = false line_breaks = false special_characters = false leading_space = false leading_break = false trailing_space = false trailing_break = false break_space = false space_break = false preceeded_by_whitespace = false followed_by_whitespace = false previous_space = false previous_break = false ) emitter.scalar_data.value = value if len(value) == 0 { emitter.scalar_data.multiline = false emitter.scalar_data.flow_plain_allowed = false emitter.scalar_data.block_plain_allowed = true emitter.scalar_data.single_quoted_allowed = true emitter.scalar_data.block_allowed = false return true } if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { block_indicators = true flow_indicators = true } preceeded_by_whitespace = true for i, w := 0, 0; i < len(value); i += w { w = width(value[0]) followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) if i == 0 { switch value[i] { case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': flow_indicators = true block_indicators = true case '?', ':': flow_indicators = true if followed_by_whitespace { block_indicators = true } case '-': if followed_by_whitespace { flow_indicators = true block_indicators = true } } } else { switch value[i] { case ',', '?', '[', ']', '{', '}': flow_indicators = true case ':': flow_indicators = true if followed_by_whitespace { block_indicators = true } case '#': if preceeded_by_whitespace { flow_indicators = true block_indicators = true } } } if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { special_characters = true } if is_space(value, i) { if i == 0 { leading_space = true } if i+width(value[i]) == len(value) { trailing_space = true } if previous_break { break_space = true } previous_space = true previous_break = false } else if is_break(value, i) { line_breaks = true if i == 0 { leading_break = true } if i+width(value[i]) == len(value) { trailing_break = true } if previous_space { space_break = true } previous_space = false previous_break = true } else { previous_space = false previous_break = false } // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. preceeded_by_whitespace = is_blankz(value, i) } emitter.scalar_data.multiline = line_breaks emitter.scalar_data.flow_plain_allowed = true emitter.scalar_data.block_plain_allowed = true emitter.scalar_data.single_quoted_allowed = true emitter.scalar_data.block_allowed = true if leading_space || leading_break || trailing_space || trailing_break { emitter.scalar_data.flow_plain_allowed = false emitter.scalar_data.block_plain_allowed = false } if trailing_space { emitter.scalar_data.block_allowed = false } if break_space { emitter.scalar_data.flow_plain_allowed = false emitter.scalar_data.block_plain_allowed = false emitter.scalar_data.single_quoted_allowed = false } if space_break || special_characters { emitter.scalar_data.flow_plain_allowed = false emitter.scalar_data.block_plain_allowed = false emitter.scalar_data.single_quoted_allowed = false emitter.scalar_data.block_allowed = false } if line_breaks { emitter.scalar_data.flow_plain_allowed = false emitter.scalar_data.block_plain_allowed = false } if flow_indicators { emitter.scalar_data.flow_plain_allowed = false } if block_indicators { emitter.scalar_data.block_plain_allowed = false } return true } // Check if the event data is valid. func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { emitter.anchor_data.anchor = nil emitter.tag_data.handle = nil emitter.tag_data.suffix = nil emitter.scalar_data.value = nil switch event.typ { case yaml_ALIAS_EVENT: if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { return false } case yaml_SCALAR_EVENT: if len(event.anchor) > 0 { if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { return false } } if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { if !yaml_emitter_analyze_tag(emitter, event.tag) { return false } } if !yaml_emitter_analyze_scalar(emitter, event.value) { return false } case yaml_SEQUENCE_START_EVENT: if len(event.anchor) > 0 { if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { return false } } if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { if !yaml_emitter_analyze_tag(emitter, event.tag) { return false } } case yaml_MAPPING_START_EVENT: if len(event.anchor) > 0 { if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { return false } } if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { if !yaml_emitter_analyze_tag(emitter, event.tag) { return false } } } return true } // Write the BOM character. func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { if !flush(emitter) { return false } pos := emitter.buffer_pos emitter.buffer[pos+0] = '\xEF' emitter.buffer[pos+1] = '\xBB' emitter.buffer[pos+2] = '\xBF' emitter.buffer_pos += 3 return true } func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { indent := emitter.indent if indent < 0 { indent = 0 } if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { if !put_break(emitter) { return false } } for emitter.column < indent { if !put(emitter, ' ') { return false } } emitter.whitespace = true emitter.indention = true return true } func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { if need_whitespace && !emitter.whitespace { if !put(emitter, ' ') { return false } } if !write_all(emitter, indicator) { return false } emitter.whitespace = is_whitespace emitter.indention = (emitter.indention && is_indention) emitter.open_ended = false return true } func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { if !write_all(emitter, value) { return false } emitter.whitespace = false emitter.indention = false return true } func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { if !emitter.whitespace { if !put(emitter, ' ') { return false } } if !write_all(emitter, value) { return false } emitter.whitespace = false emitter.indention = false return true } func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { if need_whitespace && !emitter.whitespace { if !put(emitter, ' ') { return false } } for i := 0; i < len(value); { var must_write bool switch value[i] { case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': must_write = true default: must_write = is_alpha(value, i) } if must_write { if !write(emitter, value, &i) { return false } } else { w := width(value[i]) for k := 0; k < w; k++ { octet := value[i] i++ if !put(emitter, '%') { return false } c := octet >> 4 if c < 10 { c += '0' } else { c += 'A' - 10 } if !put(emitter, c) { return false } c = octet & 0x0f if c < 10 { c += '0' } else { c += 'A' - 10 } if !put(emitter, c) { return false } } } } emitter.whitespace = false emitter.indention = false return true } func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { if !emitter.whitespace { if !put(emitter, ' ') { return false } } spaces := false breaks := false for i := 0; i < len(value); { if is_space(value, i) { if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { if !yaml_emitter_write_indent(emitter) { return false } i += width(value[i]) } else { if !write(emitter, value, &i) { return false } } spaces = true } else if is_break(value, i) { if !breaks && value[i] == '\n' { if !put_break(emitter) { return false } } if !write_break(emitter, value, &i) { return false } emitter.indention = true breaks = true } else { if breaks { if !yaml_emitter_write_indent(emitter) { return false } } if !write(emitter, value, &i) { return false } emitter.indention = false spaces = false breaks = false } } emitter.whitespace = false emitter.indention = false if emitter.root_context { emitter.open_ended = true } return true } func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { return false } spaces := false breaks := false for i := 0; i < len(value); { if is_space(value, i) { if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { if !yaml_emitter_write_indent(emitter) { return false } i += width(value[i]) } else { if !write(emitter, value, &i) { return false } } spaces = true } else if is_break(value, i) { if !breaks && value[i] == '\n' { if !put_break(emitter) { return false } } if !write_break(emitter, value, &i) { return false } emitter.indention = true breaks = true } else { if breaks { if !yaml_emitter_write_indent(emitter) { return false } } if value[i] == '\'' { if !put(emitter, '\'') { return false } } if !write(emitter, value, &i) { return false } emitter.indention = false spaces = false breaks = false } } if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { return false } emitter.whitespace = false emitter.indention = false return true } func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { spaces := false if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { return false } for i := 0; i < len(value); { if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || is_bom(value, i) || is_break(value, i) || value[i] == '"' || value[i] == '\\' { octet := value[i] var w int var v rune switch { case octet&0x80 == 0x00: w, v = 1, rune(octet&0x7F) case octet&0xE0 == 0xC0: w, v = 2, rune(octet&0x1F) case octet&0xF0 == 0xE0: w, v = 3, rune(octet&0x0F) case octet&0xF8 == 0xF0: w, v = 4, rune(octet&0x07) } for k := 1; k < w; k++ { octet = value[i+k] v = (v << 6) + (rune(octet) & 0x3F) } i += w if !put(emitter, '\\') { return false } var ok bool switch v { case 0x00: ok = put(emitter, '0') case 0x07: ok = put(emitter, 'a') case 0x08: ok = put(emitter, 'b') case 0x09: ok = put(emitter, 't') case 0x0A: ok = put(emitter, 'n') case 0x0b: ok = put(emitter, 'v') case 0x0c: ok = put(emitter, 'f') case 0x0d: ok = put(emitter, 'r') case 0x1b: ok = put(emitter, 'e') case 0x22: ok = put(emitter, '"') case 0x5c: ok = put(emitter, '\\') case 0x85: ok = put(emitter, 'N') case 0xA0: ok = put(emitter, '_') case 0x2028: ok = put(emitter, 'L') case 0x2029: ok = put(emitter, 'P') default: if v <= 0xFF { ok = put(emitter, 'x') w = 2 } else if v <= 0xFFFF { ok = put(emitter, 'u') w = 4 } else { ok = put(emitter, 'U') w = 8 } for k := (w - 1) * 4; ok && k >= 0; k -= 4 { digit := byte((v >> uint(k)) & 0x0F) if digit < 10 { ok = put(emitter, digit+'0') } else { ok = put(emitter, digit+'A'-10) } } } if !ok { return false } spaces = false } else if is_space(value, i) { if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { if !yaml_emitter_write_indent(emitter) { return false } if is_space(value, i+1) { if !put(emitter, '\\') { return false } } i += width(value[i]) } else if !write(emitter, value, &i) { return false } spaces = true } else { if !write(emitter, value, &i) { return false } spaces = false } } if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { return false } emitter.whitespace = false emitter.indention = false return true } func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { if is_space(value, 0) || is_break(value, 0) { indent_hint := []byte{'0' + byte(emitter.best_indent)} if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { return false } } emitter.open_ended = false var chomp_hint [1]byte if len(value) == 0 { chomp_hint[0] = '-' } else { i := len(value) - 1 for value[i]&0xC0 == 0x80 { i-- } if !is_break(value, i) { chomp_hint[0] = '-' } else if i == 0 { chomp_hint[0] = '+' emitter.open_ended = true } else { i-- for value[i]&0xC0 == 0x80 { i-- } if is_break(value, i) { chomp_hint[0] = '+' emitter.open_ended = true } } } if chomp_hint[0] != 0 { if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { return false } } return true } func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { return false } if !yaml_emitter_write_block_scalar_hints(emitter, value) { return false } if !put_break(emitter) { return false } emitter.indention = true emitter.whitespace = true breaks := true for i := 0; i < len(value); { if is_break(value, i) { if !write_break(emitter, value, &i) { return false } emitter.indention = true breaks = true } else { if breaks { if !yaml_emitter_write_indent(emitter) { return false } } if !write(emitter, value, &i) { return false } emitter.indention = false breaks = false } } return true } func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { return false } if !yaml_emitter_write_block_scalar_hints(emitter, value) { return false } if !put_break(emitter) { return false } emitter.indention = true emitter.whitespace = true breaks := true leading_spaces := true for i := 0; i < len(value); { if is_break(value, i) { if !breaks && !leading_spaces && value[i] == '\n' { k := 0 for is_break(value, k) { k += width(value[k]) } if !is_blankz(value, k) { if !put_break(emitter) { return false } } } if !write_break(emitter, value, &i) { return false } emitter.indention = true breaks = true } else { if breaks { if !yaml_emitter_write_indent(emitter) { return false } leading_spaces = is_blank(value, i) } if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { if !yaml_emitter_write_indent(emitter) { return false } i += width(value[i]) } else { if !write(emitter, value, &i) { return false } } emitter.indention = false breaks = false } } return true } charm-2.1.1/src/gopkg.in/yaml.v1/writerc.go0000664000175000017500000000460312672604524017406 0ustar marcomarcopackage yaml // Set the writer error and return false. func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { emitter.error = yaml_WRITER_ERROR emitter.problem = problem return false } // Flush the output buffer. func yaml_emitter_flush(emitter *yaml_emitter_t) bool { if emitter.write_handler == nil { panic("write handler not set") } // Check if the buffer is empty. if emitter.buffer_pos == 0 { return true } // If the output encoding is UTF-8, we don't need to recode the buffer. if emitter.encoding == yaml_UTF8_ENCODING { if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) } emitter.buffer_pos = 0 return true } // Recode the buffer into the raw buffer. var low, high int if emitter.encoding == yaml_UTF16LE_ENCODING { low, high = 0, 1 } else { high, low = 1, 0 } pos := 0 for pos < emitter.buffer_pos { // See the "reader.c" code for more details on UTF-8 encoding. Note // that we assume that the buffer contains a valid UTF-8 sequence. // Read the next UTF-8 character. octet := emitter.buffer[pos] var w int var value rune switch { case octet&0x80 == 0x00: w, value = 1, rune(octet&0x7F) case octet&0xE0 == 0xC0: w, value = 2, rune(octet&0x1F) case octet&0xF0 == 0xE0: w, value = 3, rune(octet&0x0F) case octet&0xF8 == 0xF0: w, value = 4, rune(octet&0x07) } for k := 1; k < w; k++ { octet = emitter.buffer[pos+k] value = (value << 6) + (rune(octet) & 0x3F) } pos += w // Write the character. if value < 0x10000 { var b [2]byte b[high] = byte(value >> 8) b[low] = byte(value & 0xFF) emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) } else { // Write the character using a surrogate pair (check "reader.c"). var b [4]byte value -= 0x10000 b[high] = byte(0xD8 + (value >> 18)) b[low] = byte((value >> 10) & 0xFF) b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) b[low+2] = byte(value & 0xFF) emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) } } // Write the raw buffer. if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) } emitter.buffer_pos = 0 emitter.raw_buffer = emitter.raw_buffer[:0] return true } charm-2.1.1/src/gopkg.in/yaml.v1/yamlh.go0000664000175000017500000006150012672604524017040 0ustar marcomarcopackage yaml import ( "io" ) // The version directive data. type yaml_version_directive_t struct { major int8 // The major version number. minor int8 // The minor version number. } // The tag directive data. type yaml_tag_directive_t struct { handle []byte // The tag handle. prefix []byte // The tag prefix. } type yaml_encoding_t int // The stream encoding. const ( // Let the parser choose the encoding. yaml_ANY_ENCODING yaml_encoding_t = iota yaml_UTF8_ENCODING // The default UTF-8 encoding. yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. ) type yaml_break_t int // Line break types. const ( // Let the parser choose the break type. yaml_ANY_BREAK yaml_break_t = iota yaml_CR_BREAK // Use CR for line breaks (Mac style). yaml_LN_BREAK // Use LN for line breaks (Unix style). yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). ) type yaml_error_type_t int // Many bad things could happen with the parser and emitter. const ( // No error is produced. yaml_NO_ERROR yaml_error_type_t = iota yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. yaml_READER_ERROR // Cannot read or decode the input stream. yaml_SCANNER_ERROR // Cannot scan the input stream. yaml_PARSER_ERROR // Cannot parse the input stream. yaml_COMPOSER_ERROR // Cannot compose a YAML document. yaml_WRITER_ERROR // Cannot write to the output stream. yaml_EMITTER_ERROR // Cannot emit a YAML stream. ) // The pointer position. type yaml_mark_t struct { index int // The position index. line int // The position line. column int // The position column. } // Node Styles type yaml_style_t int8 type yaml_scalar_style_t yaml_style_t // Scalar styles. const ( // Let the emitter choose the style. yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota yaml_PLAIN_SCALAR_STYLE // The plain scalar style. yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. yaml_LITERAL_SCALAR_STYLE // The literal scalar style. yaml_FOLDED_SCALAR_STYLE // The folded scalar style. ) type yaml_sequence_style_t yaml_style_t // Sequence styles. const ( // Let the emitter choose the style. yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. ) type yaml_mapping_style_t yaml_style_t // Mapping styles. const ( // Let the emitter choose the style. yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota yaml_BLOCK_MAPPING_STYLE // The block mapping style. yaml_FLOW_MAPPING_STYLE // The flow mapping style. ) // Tokens type yaml_token_type_t int // Token types. const ( // An empty token. yaml_NO_TOKEN yaml_token_type_t = iota yaml_STREAM_START_TOKEN // A STREAM-START token. yaml_STREAM_END_TOKEN // A STREAM-END token. yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. yaml_BLOCK_END_TOKEN // A BLOCK-END token. yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. yaml_KEY_TOKEN // A KEY token. yaml_VALUE_TOKEN // A VALUE token. yaml_ALIAS_TOKEN // An ALIAS token. yaml_ANCHOR_TOKEN // An ANCHOR token. yaml_TAG_TOKEN // A TAG token. yaml_SCALAR_TOKEN // A SCALAR token. ) func (tt yaml_token_type_t) String() string { switch tt { case yaml_NO_TOKEN: return "yaml_NO_TOKEN" case yaml_STREAM_START_TOKEN: return "yaml_STREAM_START_TOKEN" case yaml_STREAM_END_TOKEN: return "yaml_STREAM_END_TOKEN" case yaml_VERSION_DIRECTIVE_TOKEN: return "yaml_VERSION_DIRECTIVE_TOKEN" case yaml_TAG_DIRECTIVE_TOKEN: return "yaml_TAG_DIRECTIVE_TOKEN" case yaml_DOCUMENT_START_TOKEN: return "yaml_DOCUMENT_START_TOKEN" case yaml_DOCUMENT_END_TOKEN: return "yaml_DOCUMENT_END_TOKEN" case yaml_BLOCK_SEQUENCE_START_TOKEN: return "yaml_BLOCK_SEQUENCE_START_TOKEN" case yaml_BLOCK_MAPPING_START_TOKEN: return "yaml_BLOCK_MAPPING_START_TOKEN" case yaml_BLOCK_END_TOKEN: return "yaml_BLOCK_END_TOKEN" case yaml_FLOW_SEQUENCE_START_TOKEN: return "yaml_FLOW_SEQUENCE_START_TOKEN" case yaml_FLOW_SEQUENCE_END_TOKEN: return "yaml_FLOW_SEQUENCE_END_TOKEN" case yaml_FLOW_MAPPING_START_TOKEN: return "yaml_FLOW_MAPPING_START_TOKEN" case yaml_FLOW_MAPPING_END_TOKEN: return "yaml_FLOW_MAPPING_END_TOKEN" case yaml_BLOCK_ENTRY_TOKEN: return "yaml_BLOCK_ENTRY_TOKEN" case yaml_FLOW_ENTRY_TOKEN: return "yaml_FLOW_ENTRY_TOKEN" case yaml_KEY_TOKEN: return "yaml_KEY_TOKEN" case yaml_VALUE_TOKEN: return "yaml_VALUE_TOKEN" case yaml_ALIAS_TOKEN: return "yaml_ALIAS_TOKEN" case yaml_ANCHOR_TOKEN: return "yaml_ANCHOR_TOKEN" case yaml_TAG_TOKEN: return "yaml_TAG_TOKEN" case yaml_SCALAR_TOKEN: return "yaml_SCALAR_TOKEN" } return "" } // The token structure. type yaml_token_t struct { // The token type. typ yaml_token_type_t // The start/end of the token. start_mark, end_mark yaml_mark_t // The stream encoding (for yaml_STREAM_START_TOKEN). encoding yaml_encoding_t // The alias/anchor/scalar value or tag/tag directive handle // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). value []byte // The tag suffix (for yaml_TAG_TOKEN). suffix []byte // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). prefix []byte // The scalar style (for yaml_SCALAR_TOKEN). style yaml_scalar_style_t // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). major, minor int8 } // Events type yaml_event_type_t int8 // Event types. const ( // An empty event. yaml_NO_EVENT yaml_event_type_t = iota yaml_STREAM_START_EVENT // A STREAM-START event. yaml_STREAM_END_EVENT // A STREAM-END event. yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. yaml_ALIAS_EVENT // An ALIAS event. yaml_SCALAR_EVENT // A SCALAR event. yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. yaml_MAPPING_START_EVENT // A MAPPING-START event. yaml_MAPPING_END_EVENT // A MAPPING-END event. ) // The event structure. type yaml_event_t struct { // The event type. typ yaml_event_type_t // The start and end of the event. start_mark, end_mark yaml_mark_t // The document encoding (for yaml_STREAM_START_EVENT). encoding yaml_encoding_t // The version directive (for yaml_DOCUMENT_START_EVENT). version_directive *yaml_version_directive_t // The list of tag directives (for yaml_DOCUMENT_START_EVENT). tag_directives []yaml_tag_directive_t // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). anchor []byte // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). tag []byte // The scalar value (for yaml_SCALAR_EVENT). value []byte // Is the document start/end indicator implicit, or the tag optional? // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). implicit bool // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). quoted_implicit bool // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). style yaml_style_t } func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } // Nodes const ( yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. // Not in original libyaml. yaml_BINARY_TAG = "tag:yaml.org,2002:binary" yaml_MERGE_TAG = "tag:yaml.org,2002:merge" yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. ) type yaml_node_type_t int // Node types. const ( // An empty node. yaml_NO_NODE yaml_node_type_t = iota yaml_SCALAR_NODE // A scalar node. yaml_SEQUENCE_NODE // A sequence node. yaml_MAPPING_NODE // A mapping node. ) // An element of a sequence node. type yaml_node_item_t int // An element of a mapping node. type yaml_node_pair_t struct { key int // The key of the element. value int // The value of the element. } // The node structure. type yaml_node_t struct { typ yaml_node_type_t // The node type. tag []byte // The node tag. // The node data. // The scalar parameters (for yaml_SCALAR_NODE). scalar struct { value []byte // The scalar value. length int // The length of the scalar value. style yaml_scalar_style_t // The scalar style. } // The sequence parameters (for YAML_SEQUENCE_NODE). sequence struct { items_data []yaml_node_item_t // The stack of sequence items. style yaml_sequence_style_t // The sequence style. } // The mapping parameters (for yaml_MAPPING_NODE). mapping struct { pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). pairs_start *yaml_node_pair_t // The beginning of the stack. pairs_end *yaml_node_pair_t // The end of the stack. pairs_top *yaml_node_pair_t // The top of the stack. style yaml_mapping_style_t // The mapping style. } start_mark yaml_mark_t // The beginning of the node. end_mark yaml_mark_t // The end of the node. } // The document structure. type yaml_document_t struct { // The document nodes. nodes []yaml_node_t // The version directive. version_directive *yaml_version_directive_t // The list of tag directives. tag_directives_data []yaml_tag_directive_t tag_directives_start int // The beginning of the tag directives list. tag_directives_end int // The end of the tag directives list. start_implicit int // Is the document start indicator implicit? end_implicit int // Is the document end indicator implicit? // The start/end of the document. start_mark, end_mark yaml_mark_t } // The prototype of a read handler. // // The read handler is called when the parser needs to read more bytes from the // source. The handler should write not more than size bytes to the buffer. // The number of written bytes should be set to the size_read variable. // // [in,out] data A pointer to an application data specified by // yaml_parser_set_input(). // [out] buffer The buffer to write the data from the source. // [in] size The size of the buffer. // [out] size_read The actual number of bytes read from the source. // // On success, the handler should return 1. If the handler failed, // the returned value should be 0. On EOF, the handler should set the // size_read to 0 and return 1. type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) // This structure holds information about a potential simple key. type yaml_simple_key_t struct { possible bool // Is a simple key possible? required bool // Is a simple key required? token_number int // The number of the token. mark yaml_mark_t // The position mark. } // The states of the parser. type yaml_parser_state_t int const ( yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. yaml_PARSE_END_STATE // Expect nothing. ) func (ps yaml_parser_state_t) String() string { switch ps { case yaml_PARSE_STREAM_START_STATE: return "yaml_PARSE_STREAM_START_STATE" case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" case yaml_PARSE_DOCUMENT_START_STATE: return "yaml_PARSE_DOCUMENT_START_STATE" case yaml_PARSE_DOCUMENT_CONTENT_STATE: return "yaml_PARSE_DOCUMENT_CONTENT_STATE" case yaml_PARSE_DOCUMENT_END_STATE: return "yaml_PARSE_DOCUMENT_END_STATE" case yaml_PARSE_BLOCK_NODE_STATE: return "yaml_PARSE_BLOCK_NODE_STATE" case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" case yaml_PARSE_FLOW_NODE_STATE: return "yaml_PARSE_FLOW_NODE_STATE" case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" case yaml_PARSE_FLOW_MAPPING_KEY_STATE: return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" case yaml_PARSE_END_STATE: return "yaml_PARSE_END_STATE" } return "" } // This structure holds aliases data. type yaml_alias_data_t struct { anchor []byte // The anchor. index int // The node id. mark yaml_mark_t // The anchor mark. } // The parser structure. // // All members are internal. Manage the structure using the // yaml_parser_ family of functions. type yaml_parser_t struct { // Error handling error yaml_error_type_t // Error type. problem string // Error description. // The byte about which the problem occured. problem_offset int problem_value int problem_mark yaml_mark_t // The error context. context string context_mark yaml_mark_t // Reader stuff read_handler yaml_read_handler_t // Read handler. input_file io.Reader // File input data. input []byte // String input data. input_pos int eof bool // EOF flag buffer []byte // The working buffer. buffer_pos int // The current position of the buffer. unread int // The number of unread characters in the buffer. raw_buffer []byte // The raw buffer. raw_buffer_pos int // The current position of the buffer. encoding yaml_encoding_t // The input encoding. offset int // The offset of the current position (in bytes). mark yaml_mark_t // The mark of the current position. // Scanner stuff stream_start_produced bool // Have we started to scan the input stream? stream_end_produced bool // Have we reached the end of the input stream? flow_level int // The number of unclosed '[' and '{' indicators. tokens []yaml_token_t // The tokens queue. tokens_head int // The head of the tokens queue. tokens_parsed int // The number of tokens fetched from the queue. token_available bool // Does the tokens queue contain a token ready for dequeueing. indent int // The current indentation level. indents []int // The indentation levels stack. simple_key_allowed bool // May a simple key occur at the current position? simple_keys []yaml_simple_key_t // The stack of simple keys. // Parser stuff state yaml_parser_state_t // The current parser state. states []yaml_parser_state_t // The parser states stack. marks []yaml_mark_t // The stack of marks. tag_directives []yaml_tag_directive_t // The list of TAG directives. // Dumper stuff aliases []yaml_alias_data_t // The alias data. document *yaml_document_t // The currently parsed document. } // Emitter Definitions // The prototype of a write handler. // // The write handler is called when the emitter needs to flush the accumulated // characters to the output. The handler should write @a size bytes of the // @a buffer to the output. // // @param[in,out] data A pointer to an application data specified by // yaml_emitter_set_output(). // @param[in] buffer The buffer with bytes to be written. // @param[in] size The size of the buffer. // // @returns On success, the handler should return @c 1. If the handler failed, // the returned value should be @c 0. // type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error type yaml_emitter_state_t int // The emitter states. const ( // Expect STREAM-START. yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. yaml_EMIT_END_STATE // Expect nothing. ) // The emitter structure. // // All members are internal. Manage the structure using the @c yaml_emitter_ // family of functions. type yaml_emitter_t struct { // Error handling error yaml_error_type_t // Error type. problem string // Error description. // Writer stuff write_handler yaml_write_handler_t // Write handler. output_buffer *[]byte // String output data. output_file io.Writer // File output data. buffer []byte // The working buffer. buffer_pos int // The current position of the buffer. raw_buffer []byte // The raw buffer. raw_buffer_pos int // The current position of the buffer. encoding yaml_encoding_t // The stream encoding. // Emitter stuff canonical bool // If the output is in the canonical style? best_indent int // The number of indentation spaces. best_width int // The preferred width of the output lines. unicode bool // Allow unescaped non-ASCII characters? line_break yaml_break_t // The preferred line break. state yaml_emitter_state_t // The current emitter state. states []yaml_emitter_state_t // The stack of states. events []yaml_event_t // The event queue. events_head int // The head of the event queue. indents []int // The stack of indentation levels. tag_directives []yaml_tag_directive_t // The list of tag directives. indent int // The current indentation level. flow_level int // The current flow level. root_context bool // Is it the document root context? sequence_context bool // Is it a sequence context? mapping_context bool // Is it a mapping context? simple_key_context bool // Is it a simple mapping key context? line int // The current line. column int // The current column. whitespace bool // If the last character was a whitespace? indention bool // If the last character was an indentation character (' ', '-', '?', ':')? open_ended bool // If an explicit document end is required? // Anchor analysis. anchor_data struct { anchor []byte // The anchor value. alias bool // Is it an alias? } // Tag analysis. tag_data struct { handle []byte // The tag handle. suffix []byte // The tag suffix. } // Scalar analysis. scalar_data struct { value []byte // The scalar value. multiline bool // Does the scalar contain line breaks? flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? block_plain_allowed bool // Can the scalar be expressed in the block plain style? single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? block_allowed bool // Can the scalar be expressed in the literal or folded styles? style yaml_scalar_style_t // The output style. } // Dumper stuff opened bool // If the stream was already opened? closed bool // If the stream was already closed? // The information associated with the document nodes. anchors *struct { references int // The number of references. anchor int // The anchor id. serialized bool // If the node has been emitted? } last_anchor_id int // The last assigned anchor id. document *yaml_document_t // The currently emitted document. } charm-2.1.1/src/gopkg.in/yaml.v1/LICENSE0000664000175000017500000002112612672604524016404 0ustar marcomarco Copyright (c) 2011-2014 - Canonical Inc. This software is licensed under the LGPLv3, included below. As a special exception to the GNU Lesser General Public License version 3 ("LGPL3"), the copyright holders of this Library give you permission to convey to a third party a Combined Work that links statically or dynamically to this Library without providing any Minimal Corresponding Source or Minimal Application Code as set out in 4d or providing the installation information set out in section 4e, provided that you comply with the other provisions of LGPL3 and provided that you meet, for the Application the terms and conditions of the license(s) which apply to the Application. Except as stated in this special exception, the provisions of LGPL3 will continue to comply in full to this Library. If you modify this Library, you may apply this exception to your version of this Library, but you are not obliged to do so. If you do not wish to do so, delete this exception statement from your version. This exception does not (and cannot) modify any license terms which apply to the Application, with which you must still comply. GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. charm-2.1.1/src/gopkg.in/yaml.v1/scannerc.go0000664000175000017500000022671712672604524017537 0ustar marcomarcopackage yaml import ( "bytes" "fmt" ) // Introduction // ************ // // The following notes assume that you are familiar with the YAML specification // (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in // some cases we are less restrictive that it requires. // // The process of transforming a YAML stream into a sequence of events is // divided on two steps: Scanning and Parsing. // // The Scanner transforms the input stream into a sequence of tokens, while the // parser transform the sequence of tokens produced by the Scanner into a // sequence of parsing events. // // The Scanner is rather clever and complicated. The Parser, on the contrary, // is a straightforward implementation of a recursive-descendant parser (or, // LL(1) parser, as it is usually called). // // Actually there are two issues of Scanning that might be called "clever", the // rest is quite straightforward. The issues are "block collection start" and // "simple keys". Both issues are explained below in details. // // Here the Scanning step is explained and implemented. We start with the list // of all the tokens produced by the Scanner together with short descriptions. // // Now, tokens: // // STREAM-START(encoding) # The stream start. // STREAM-END # The stream end. // VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. // TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. // DOCUMENT-START # '---' // DOCUMENT-END # '...' // BLOCK-SEQUENCE-START # Indentation increase denoting a block // BLOCK-MAPPING-START # sequence or a block mapping. // BLOCK-END # Indentation decrease. // FLOW-SEQUENCE-START # '[' // FLOW-SEQUENCE-END # ']' // BLOCK-SEQUENCE-START # '{' // BLOCK-SEQUENCE-END # '}' // BLOCK-ENTRY # '-' // FLOW-ENTRY # ',' // KEY # '?' or nothing (simple keys). // VALUE # ':' // ALIAS(anchor) # '*anchor' // ANCHOR(anchor) # '&anchor' // TAG(handle,suffix) # '!handle!suffix' // SCALAR(value,style) # A scalar. // // The following two tokens are "virtual" tokens denoting the beginning and the // end of the stream: // // STREAM-START(encoding) // STREAM-END // // We pass the information about the input stream encoding with the // STREAM-START token. // // The next two tokens are responsible for tags: // // VERSION-DIRECTIVE(major,minor) // TAG-DIRECTIVE(handle,prefix) // // Example: // // %YAML 1.1 // %TAG ! !foo // %TAG !yaml! tag:yaml.org,2002: // --- // // The correspoding sequence of tokens: // // STREAM-START(utf-8) // VERSION-DIRECTIVE(1,1) // TAG-DIRECTIVE("!","!foo") // TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") // DOCUMENT-START // STREAM-END // // Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole // line. // // The document start and end indicators are represented by: // // DOCUMENT-START // DOCUMENT-END // // Note that if a YAML stream contains an implicit document (without '---' // and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be // produced. // // In the following examples, we present whole documents together with the // produced tokens. // // 1. An implicit document: // // 'a scalar' // // Tokens: // // STREAM-START(utf-8) // SCALAR("a scalar",single-quoted) // STREAM-END // // 2. An explicit document: // // --- // 'a scalar' // ... // // Tokens: // // STREAM-START(utf-8) // DOCUMENT-START // SCALAR("a scalar",single-quoted) // DOCUMENT-END // STREAM-END // // 3. Several documents in a stream: // // 'a scalar' // --- // 'another scalar' // --- // 'yet another scalar' // // Tokens: // // STREAM-START(utf-8) // SCALAR("a scalar",single-quoted) // DOCUMENT-START // SCALAR("another scalar",single-quoted) // DOCUMENT-START // SCALAR("yet another scalar",single-quoted) // STREAM-END // // We have already introduced the SCALAR token above. The following tokens are // used to describe aliases, anchors, tag, and scalars: // // ALIAS(anchor) // ANCHOR(anchor) // TAG(handle,suffix) // SCALAR(value,style) // // The following series of examples illustrate the usage of these tokens: // // 1. A recursive sequence: // // &A [ *A ] // // Tokens: // // STREAM-START(utf-8) // ANCHOR("A") // FLOW-SEQUENCE-START // ALIAS("A") // FLOW-SEQUENCE-END // STREAM-END // // 2. A tagged scalar: // // !!float "3.14" # A good approximation. // // Tokens: // // STREAM-START(utf-8) // TAG("!!","float") // SCALAR("3.14",double-quoted) // STREAM-END // // 3. Various scalar styles: // // --- # Implicit empty plain scalars do not produce tokens. // --- a plain scalar // --- 'a single-quoted scalar' // --- "a double-quoted scalar" // --- |- // a literal scalar // --- >- // a folded // scalar // // Tokens: // // STREAM-START(utf-8) // DOCUMENT-START // DOCUMENT-START // SCALAR("a plain scalar",plain) // DOCUMENT-START // SCALAR("a single-quoted scalar",single-quoted) // DOCUMENT-START // SCALAR("a double-quoted scalar",double-quoted) // DOCUMENT-START // SCALAR("a literal scalar",literal) // DOCUMENT-START // SCALAR("a folded scalar",folded) // STREAM-END // // Now it's time to review collection-related tokens. We will start with // flow collections: // // FLOW-SEQUENCE-START // FLOW-SEQUENCE-END // FLOW-MAPPING-START // FLOW-MAPPING-END // FLOW-ENTRY // KEY // VALUE // // The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and // FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' // correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the // indicators '?' and ':', which are used for denoting mapping keys and values, // are represented by the KEY and VALUE tokens. // // The following examples show flow collections: // // 1. A flow sequence: // // [item 1, item 2, item 3] // // Tokens: // // STREAM-START(utf-8) // FLOW-SEQUENCE-START // SCALAR("item 1",plain) // FLOW-ENTRY // SCALAR("item 2",plain) // FLOW-ENTRY // SCALAR("item 3",plain) // FLOW-SEQUENCE-END // STREAM-END // // 2. A flow mapping: // // { // a simple key: a value, # Note that the KEY token is produced. // ? a complex key: another value, // } // // Tokens: // // STREAM-START(utf-8) // FLOW-MAPPING-START // KEY // SCALAR("a simple key",plain) // VALUE // SCALAR("a value",plain) // FLOW-ENTRY // KEY // SCALAR("a complex key",plain) // VALUE // SCALAR("another value",plain) // FLOW-ENTRY // FLOW-MAPPING-END // STREAM-END // // A simple key is a key which is not denoted by the '?' indicator. Note that // the Scanner still produce the KEY token whenever it encounters a simple key. // // For scanning block collections, the following tokens are used (note that we // repeat KEY and VALUE here): // // BLOCK-SEQUENCE-START // BLOCK-MAPPING-START // BLOCK-END // BLOCK-ENTRY // KEY // VALUE // // The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation // increase that precedes a block collection (cf. the INDENT token in Python). // The token BLOCK-END denote indentation decrease that ends a block collection // (cf. the DEDENT token in Python). However YAML has some syntax pecularities // that makes detections of these tokens more complex. // // The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators // '-', '?', and ':' correspondingly. // // The following examples show how the tokens BLOCK-SEQUENCE-START, // BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: // // 1. Block sequences: // // - item 1 // - item 2 // - // - item 3.1 // - item 3.2 // - // key 1: value 1 // key 2: value 2 // // Tokens: // // STREAM-START(utf-8) // BLOCK-SEQUENCE-START // BLOCK-ENTRY // SCALAR("item 1",plain) // BLOCK-ENTRY // SCALAR("item 2",plain) // BLOCK-ENTRY // BLOCK-SEQUENCE-START // BLOCK-ENTRY // SCALAR("item 3.1",plain) // BLOCK-ENTRY // SCALAR("item 3.2",plain) // BLOCK-END // BLOCK-ENTRY // BLOCK-MAPPING-START // KEY // SCALAR("key 1",plain) // VALUE // SCALAR("value 1",plain) // KEY // SCALAR("key 2",plain) // VALUE // SCALAR("value 2",plain) // BLOCK-END // BLOCK-END // STREAM-END // // 2. Block mappings: // // a simple key: a value # The KEY token is produced here. // ? a complex key // : another value // a mapping: // key 1: value 1 // key 2: value 2 // a sequence: // - item 1 // - item 2 // // Tokens: // // STREAM-START(utf-8) // BLOCK-MAPPING-START // KEY // SCALAR("a simple key",plain) // VALUE // SCALAR("a value",plain) // KEY // SCALAR("a complex key",plain) // VALUE // SCALAR("another value",plain) // KEY // SCALAR("a mapping",plain) // BLOCK-MAPPING-START // KEY // SCALAR("key 1",plain) // VALUE // SCALAR("value 1",plain) // KEY // SCALAR("key 2",plain) // VALUE // SCALAR("value 2",plain) // BLOCK-END // KEY // SCALAR("a sequence",plain) // VALUE // BLOCK-SEQUENCE-START // BLOCK-ENTRY // SCALAR("item 1",plain) // BLOCK-ENTRY // SCALAR("item 2",plain) // BLOCK-END // BLOCK-END // STREAM-END // // YAML does not always require to start a new block collection from a new // line. If the current line contains only '-', '?', and ':' indicators, a new // block collection may start at the current line. The following examples // illustrate this case: // // 1. Collections in a sequence: // // - - item 1 // - item 2 // - key 1: value 1 // key 2: value 2 // - ? complex key // : complex value // // Tokens: // // STREAM-START(utf-8) // BLOCK-SEQUENCE-START // BLOCK-ENTRY // BLOCK-SEQUENCE-START // BLOCK-ENTRY // SCALAR("item 1",plain) // BLOCK-ENTRY // SCALAR("item 2",plain) // BLOCK-END // BLOCK-ENTRY // BLOCK-MAPPING-START // KEY // SCALAR("key 1",plain) // VALUE // SCALAR("value 1",plain) // KEY // SCALAR("key 2",plain) // VALUE // SCALAR("value 2",plain) // BLOCK-END // BLOCK-ENTRY // BLOCK-MAPPING-START // KEY // SCALAR("complex key") // VALUE // SCALAR("complex value") // BLOCK-END // BLOCK-END // STREAM-END // // 2. Collections in a mapping: // // ? a sequence // : - item 1 // - item 2 // ? a mapping // : key 1: value 1 // key 2: value 2 // // Tokens: // // STREAM-START(utf-8) // BLOCK-MAPPING-START // KEY // SCALAR("a sequence",plain) // VALUE // BLOCK-SEQUENCE-START // BLOCK-ENTRY // SCALAR("item 1",plain) // BLOCK-ENTRY // SCALAR("item 2",plain) // BLOCK-END // KEY // SCALAR("a mapping",plain) // VALUE // BLOCK-MAPPING-START // KEY // SCALAR("key 1",plain) // VALUE // SCALAR("value 1",plain) // KEY // SCALAR("key 2",plain) // VALUE // SCALAR("value 2",plain) // BLOCK-END // BLOCK-END // STREAM-END // // YAML also permits non-indented sequences if they are included into a block // mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: // // key: // - item 1 # BLOCK-SEQUENCE-START is NOT produced here. // - item 2 // // Tokens: // // STREAM-START(utf-8) // BLOCK-MAPPING-START // KEY // SCALAR("key",plain) // VALUE // BLOCK-ENTRY // SCALAR("item 1",plain) // BLOCK-ENTRY // SCALAR("item 2",plain) // BLOCK-END // // Ensure that the buffer contains the required number of characters. // Return true on success, false on failure (reader error or memory error). func cache(parser *yaml_parser_t, length int) bool { // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) return parser.unread >= length || yaml_parser_update_buffer(parser, length) } // Advance the buffer pointer. func skip(parser *yaml_parser_t) { parser.mark.index++ parser.mark.column++ parser.unread-- parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) } func skip_line(parser *yaml_parser_t) { if is_crlf(parser.buffer, parser.buffer_pos) { parser.mark.index += 2 parser.mark.column = 0 parser.mark.line++ parser.unread -= 2 parser.buffer_pos += 2 } else if is_break(parser.buffer, parser.buffer_pos) { parser.mark.index++ parser.mark.column = 0 parser.mark.line++ parser.unread-- parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) } } // Copy a character to a string buffer and advance pointers. func read(parser *yaml_parser_t, s []byte) []byte { w := width(parser.buffer[parser.buffer_pos]) if w == 0 { panic("invalid character sequence") } if len(s) == 0 { s = make([]byte, 0, 32) } if w == 1 && len(s)+w <= cap(s) { s = s[:len(s)+1] s[len(s)-1] = parser.buffer[parser.buffer_pos] parser.buffer_pos++ } else { s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) parser.buffer_pos += w } parser.mark.index++ parser.mark.column++ parser.unread-- return s } // Copy a line break character to a string buffer and advance pointers. func read_line(parser *yaml_parser_t, s []byte) []byte { buf := parser.buffer pos := parser.buffer_pos switch { case buf[pos] == '\r' && buf[pos+1] == '\n': // CR LF . LF s = append(s, '\n') parser.buffer_pos += 2 parser.mark.index++ parser.unread-- case buf[pos] == '\r' || buf[pos] == '\n': // CR|LF . LF s = append(s, '\n') parser.buffer_pos += 1 case buf[pos] == '\xC2' && buf[pos+1] == '\x85': // NEL . LF s = append(s, '\n') parser.buffer_pos += 2 case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): // LS|PS . LS|PS s = append(s, buf[parser.buffer_pos:pos+3]...) parser.buffer_pos += 3 default: return s } parser.mark.index++ parser.mark.column = 0 parser.mark.line++ parser.unread-- return s } // Get the next token. func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { // Erase the token object. *token = yaml_token_t{} // [Go] Is this necessary? // No tokens after STREAM-END or error. if parser.stream_end_produced || parser.error != yaml_NO_ERROR { return true } // Ensure that the tokens queue contains enough tokens. if !parser.token_available { if !yaml_parser_fetch_more_tokens(parser) { return false } } // Fetch the next token from the queue. *token = parser.tokens[parser.tokens_head] parser.tokens_head++ parser.tokens_parsed++ parser.token_available = false if token.typ == yaml_STREAM_END_TOKEN { parser.stream_end_produced = true } return true } // Set the scanner error and return false. func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { parser.error = yaml_SCANNER_ERROR parser.context = context parser.context_mark = context_mark parser.problem = problem parser.problem_mark = parser.mark return false } func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { context := "while parsing a tag" if directive { context = "while parsing a %TAG directive" } return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") } func trace(args ...interface{}) func() { pargs := append([]interface{}{"+++"}, args...) fmt.Println(pargs...) pargs = append([]interface{}{"---"}, args...) return func() { fmt.Println(pargs...) } } // Ensure that the tokens queue contains at least one token which can be // returned to the Parser. func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { // While we need more tokens to fetch, do it. for { // Check if we really need to fetch more tokens. need_more_tokens := false if parser.tokens_head == len(parser.tokens) { // Queue is empty. need_more_tokens = true } else { // Check if any potential simple key may occupy the head position. if !yaml_parser_stale_simple_keys(parser) { return false } for i := range parser.simple_keys { simple_key := &parser.simple_keys[i] if simple_key.possible && simple_key.token_number == parser.tokens_parsed { need_more_tokens = true break } } } // We are finished. if !need_more_tokens { break } // Fetch the next token. if !yaml_parser_fetch_next_token(parser) { return false } } parser.token_available = true return true } // The dispatcher for token fetchers. func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { // Ensure that the buffer is initialized. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } // Check if we just started scanning. Fetch STREAM-START then. if !parser.stream_start_produced { return yaml_parser_fetch_stream_start(parser) } // Eat whitespaces and comments until we reach the next token. if !yaml_parser_scan_to_next_token(parser) { return false } // Remove obsolete potential simple keys. if !yaml_parser_stale_simple_keys(parser) { return false } // Check the indentation level against the current column. if !yaml_parser_unroll_indent(parser, parser.mark.column) { return false } // Ensure that the buffer contains at least 4 characters. 4 is the length // of the longest indicators ('--- ' and '... '). if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { return false } // Is it the end of the stream? if is_z(parser.buffer, parser.buffer_pos) { return yaml_parser_fetch_stream_end(parser) } // Is it a directive? if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { return yaml_parser_fetch_directive(parser) } buf := parser.buffer pos := parser.buffer_pos // Is it the document start indicator? if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) } // Is it the document end indicator? if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) } // Is it the flow sequence start indicator? if buf[pos] == '[' { return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) } // Is it the flow mapping start indicator? if parser.buffer[parser.buffer_pos] == '{' { return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) } // Is it the flow sequence end indicator? if parser.buffer[parser.buffer_pos] == ']' { return yaml_parser_fetch_flow_collection_end(parser, yaml_FLOW_SEQUENCE_END_TOKEN) } // Is it the flow mapping end indicator? if parser.buffer[parser.buffer_pos] == '}' { return yaml_parser_fetch_flow_collection_end(parser, yaml_FLOW_MAPPING_END_TOKEN) } // Is it the flow entry indicator? if parser.buffer[parser.buffer_pos] == ',' { return yaml_parser_fetch_flow_entry(parser) } // Is it the block entry indicator? if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { return yaml_parser_fetch_block_entry(parser) } // Is it the key indicator? if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { return yaml_parser_fetch_key(parser) } // Is it the value indicator? if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { return yaml_parser_fetch_value(parser) } // Is it an alias? if parser.buffer[parser.buffer_pos] == '*' { return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) } // Is it an anchor? if parser.buffer[parser.buffer_pos] == '&' { return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) } // Is it a tag? if parser.buffer[parser.buffer_pos] == '!' { return yaml_parser_fetch_tag(parser) } // Is it a literal scalar? if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { return yaml_parser_fetch_block_scalar(parser, true) } // Is it a folded scalar? if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { return yaml_parser_fetch_block_scalar(parser, false) } // Is it a single-quoted scalar? if parser.buffer[parser.buffer_pos] == '\'' { return yaml_parser_fetch_flow_scalar(parser, true) } // Is it a double-quoted scalar? if parser.buffer[parser.buffer_pos] == '"' { return yaml_parser_fetch_flow_scalar(parser, false) } // Is it a plain scalar? // // A plain scalar may start with any non-blank characters except // // '-', '?', ':', ',', '[', ']', '{', '}', // '#', '&', '*', '!', '|', '>', '\'', '\"', // '%', '@', '`'. // // In the block context (and, for the '-' indicator, in the flow context // too), it may also start with the characters // // '-', '?', ':' // // if it is followed by a non-space character. // // The last rule is more restrictive than the specification requires. // [Go] Make this logic more reasonable. //switch parser.buffer[parser.buffer_pos] { //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': //} if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || (parser.flow_level == 0 && (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && !is_blankz(parser.buffer, parser.buffer_pos+1)) { return yaml_parser_fetch_plain_scalar(parser) } // If we don't determine the token type so far, it is an error. return yaml_parser_set_scanner_error(parser, "while scanning for the next token", parser.mark, "found character that cannot start any token") } // Check the list of potential simple keys and remove the positions that // cannot contain simple keys anymore. func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { // Check for a potential simple key for each flow level. for i := range parser.simple_keys { simple_key := &parser.simple_keys[i] // The specification requires that a simple key // // - is limited to a single line, // - is shorter than 1024 characters. if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { // Check if the potential simple key to be removed is required. if simple_key.required { return yaml_parser_set_scanner_error(parser, "while scanning a simple key", simple_key.mark, "could not find expected ':'") } simple_key.possible = false } } return true } // Check if a simple key may start at the current position and add it if // needed. func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { // A simple key is required at the current position if the scanner is in // the block context and the current column coincides with the indentation // level. required := parser.flow_level == 0 && parser.indent == parser.mark.column // A simple key is required only when it is the first token in the current // line. Therefore it is always allowed. But we add a check anyway. if required && !parser.simple_key_allowed { panic("should not happen") } // // If the current position may start a simple key, save it. // if parser.simple_key_allowed { simple_key := yaml_simple_key_t{ possible: true, required: required, token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), } simple_key.mark = parser.mark if !yaml_parser_remove_simple_key(parser) { return false } parser.simple_keys[len(parser.simple_keys)-1] = simple_key } return true } // Remove a potential simple key at the current flow level. func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { i := len(parser.simple_keys) - 1 if parser.simple_keys[i].possible { // If the key is required, it is an error. if parser.simple_keys[i].required { return yaml_parser_set_scanner_error(parser, "while scanning a simple key", parser.simple_keys[i].mark, "could not find expected ':'") } } // Remove the key from the stack. parser.simple_keys[i].possible = false return true } // Increase the flow level and resize the simple key list if needed. func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { // Reset the simple key on the next level. parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) // Increase the flow level. parser.flow_level++ return true } // Decrease the flow level. func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { if parser.flow_level > 0 { parser.flow_level-- parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] } return true } // Push the current indentation level to the stack and set the new level // the current column is greater than the indentation level. In this case, // append or insert the specified token into the token queue. func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { // In the flow context, do nothing. if parser.flow_level > 0 { return true } if parser.indent < column { // Push the current indentation level to the stack and set the new // indentation level. parser.indents = append(parser.indents, parser.indent) parser.indent = column // Create a token and insert it into the queue. token := yaml_token_t{ typ: typ, start_mark: mark, end_mark: mark, } if number > -1 { number -= parser.tokens_parsed } yaml_insert_token(parser, number, &token) } return true } // Pop indentation levels from the indents stack until the current level // becomes less or equal to the column. For each intendation level, append // the BLOCK-END token. func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { // In the flow context, do nothing. if parser.flow_level > 0 { return true } // Loop through the intendation levels in the stack. for parser.indent > column { // Create a token and append it to the queue. token := yaml_token_t{ typ: yaml_BLOCK_END_TOKEN, start_mark: parser.mark, end_mark: parser.mark, } yaml_insert_token(parser, -1, &token) // Pop the indentation level. parser.indent = parser.indents[len(parser.indents)-1] parser.indents = parser.indents[:len(parser.indents)-1] } return true } // Initialize the scanner and produce the STREAM-START token. func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { // Set the initial indentation. parser.indent = -1 // Initialize the simple key stack. parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) // A simple key is allowed at the beginning of the stream. parser.simple_key_allowed = true // We have started. parser.stream_start_produced = true // Create the STREAM-START token and append it to the queue. token := yaml_token_t{ typ: yaml_STREAM_START_TOKEN, start_mark: parser.mark, end_mark: parser.mark, encoding: parser.encoding, } yaml_insert_token(parser, -1, &token) return true } // Produce the STREAM-END token and shut down the scanner. func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { // Force new line. if parser.mark.column != 0 { parser.mark.column = 0 parser.mark.line++ } // Reset the indentation level. if !yaml_parser_unroll_indent(parser, -1) { return false } // Reset simple keys. if !yaml_parser_remove_simple_key(parser) { return false } parser.simple_key_allowed = false // Create the STREAM-END token and append it to the queue. token := yaml_token_t{ typ: yaml_STREAM_END_TOKEN, start_mark: parser.mark, end_mark: parser.mark, } yaml_insert_token(parser, -1, &token) return true } // Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { // Reset the indentation level. if !yaml_parser_unroll_indent(parser, -1) { return false } // Reset simple keys. if !yaml_parser_remove_simple_key(parser) { return false } parser.simple_key_allowed = false // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. token := yaml_token_t{} if !yaml_parser_scan_directive(parser, &token) { return false } // Append the token to the queue. yaml_insert_token(parser, -1, &token) return true } // Produce the DOCUMENT-START or DOCUMENT-END token. func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { // Reset the indentation level. if !yaml_parser_unroll_indent(parser, -1) { return false } // Reset simple keys. if !yaml_parser_remove_simple_key(parser) { return false } parser.simple_key_allowed = false // Consume the token. start_mark := parser.mark skip(parser) skip(parser) skip(parser) end_mark := parser.mark // Create the DOCUMENT-START or DOCUMENT-END token. token := yaml_token_t{ typ: typ, start_mark: start_mark, end_mark: end_mark, } // Append the token to the queue. yaml_insert_token(parser, -1, &token) return true } // Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { // The indicators '[' and '{' may start a simple key. if !yaml_parser_save_simple_key(parser) { return false } // Increase the flow level. if !yaml_parser_increase_flow_level(parser) { return false } // A simple key may follow the indicators '[' and '{'. parser.simple_key_allowed = true // Consume the token. start_mark := parser.mark skip(parser) end_mark := parser.mark // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. token := yaml_token_t{ typ: typ, start_mark: start_mark, end_mark: end_mark, } // Append the token to the queue. yaml_insert_token(parser, -1, &token) return true } // Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { // Reset any potential simple key on the current flow level. if !yaml_parser_remove_simple_key(parser) { return false } // Decrease the flow level. if !yaml_parser_decrease_flow_level(parser) { return false } // No simple keys after the indicators ']' and '}'. parser.simple_key_allowed = false // Consume the token. start_mark := parser.mark skip(parser) end_mark := parser.mark // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. token := yaml_token_t{ typ: typ, start_mark: start_mark, end_mark: end_mark, } // Append the token to the queue. yaml_insert_token(parser, -1, &token) return true } // Produce the FLOW-ENTRY token. func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { // Reset any potential simple keys on the current flow level. if !yaml_parser_remove_simple_key(parser) { return false } // Simple keys are allowed after ','. parser.simple_key_allowed = true // Consume the token. start_mark := parser.mark skip(parser) end_mark := parser.mark // Create the FLOW-ENTRY token and append it to the queue. token := yaml_token_t{ typ: yaml_FLOW_ENTRY_TOKEN, start_mark: start_mark, end_mark: end_mark, } yaml_insert_token(parser, -1, &token) return true } // Produce the BLOCK-ENTRY token. func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { // Check if the scanner is in the block context. if parser.flow_level == 0 { // Check if we are allowed to start a new entry. if !parser.simple_key_allowed { return yaml_parser_set_scanner_error(parser, "", parser.mark, "block sequence entries are not allowed in this context") } // Add the BLOCK-SEQUENCE-START token if needed. if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { return false } } else { // It is an error for the '-' indicator to occur in the flow context, // but we let the Parser detect and report about it because the Parser // is able to point to the context. } // Reset any potential simple keys on the current flow level. if !yaml_parser_remove_simple_key(parser) { return false } // Simple keys are allowed after '-'. parser.simple_key_allowed = true // Consume the token. start_mark := parser.mark skip(parser) end_mark := parser.mark // Create the BLOCK-ENTRY token and append it to the queue. token := yaml_token_t{ typ: yaml_BLOCK_ENTRY_TOKEN, start_mark: start_mark, end_mark: end_mark, } yaml_insert_token(parser, -1, &token) return true } // Produce the KEY token. func yaml_parser_fetch_key(parser *yaml_parser_t) bool { // In the block context, additional checks are required. if parser.flow_level == 0 { // Check if we are allowed to start a new key (not nessesary simple). if !parser.simple_key_allowed { return yaml_parser_set_scanner_error(parser, "", parser.mark, "mapping keys are not allowed in this context") } // Add the BLOCK-MAPPING-START token if needed. if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { return false } } // Reset any potential simple keys on the current flow level. if !yaml_parser_remove_simple_key(parser) { return false } // Simple keys are allowed after '?' in the block context. parser.simple_key_allowed = parser.flow_level == 0 // Consume the token. start_mark := parser.mark skip(parser) end_mark := parser.mark // Create the KEY token and append it to the queue. token := yaml_token_t{ typ: yaml_KEY_TOKEN, start_mark: start_mark, end_mark: end_mark, } yaml_insert_token(parser, -1, &token) return true } // Produce the VALUE token. func yaml_parser_fetch_value(parser *yaml_parser_t) bool { simple_key := &parser.simple_keys[len(parser.simple_keys)-1] // Have we found a simple key? if simple_key.possible { // Create the KEY token and insert it into the queue. token := yaml_token_t{ typ: yaml_KEY_TOKEN, start_mark: simple_key.mark, end_mark: simple_key.mark, } yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) // In the block context, we may need to add the BLOCK-MAPPING-START token. if !yaml_parser_roll_indent(parser, simple_key.mark.column, simple_key.token_number, yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { return false } // Remove the simple key. simple_key.possible = false // A simple key cannot follow another simple key. parser.simple_key_allowed = false } else { // The ':' indicator follows a complex key. // In the block context, extra checks are required. if parser.flow_level == 0 { // Check if we are allowed to start a complex value. if !parser.simple_key_allowed { return yaml_parser_set_scanner_error(parser, "", parser.mark, "mapping values are not allowed in this context") } // Add the BLOCK-MAPPING-START token if needed. if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { return false } } // Simple keys after ':' are allowed in the block context. parser.simple_key_allowed = parser.flow_level == 0 } // Consume the token. start_mark := parser.mark skip(parser) end_mark := parser.mark // Create the VALUE token and append it to the queue. token := yaml_token_t{ typ: yaml_VALUE_TOKEN, start_mark: start_mark, end_mark: end_mark, } yaml_insert_token(parser, -1, &token) return true } // Produce the ALIAS or ANCHOR token. func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { // An anchor or an alias could be a simple key. if !yaml_parser_save_simple_key(parser) { return false } // A simple key cannot follow an anchor or an alias. parser.simple_key_allowed = false // Create the ALIAS or ANCHOR token and append it to the queue. var token yaml_token_t if !yaml_parser_scan_anchor(parser, &token, typ) { return false } yaml_insert_token(parser, -1, &token) return true } // Produce the TAG token. func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { // A tag could be a simple key. if !yaml_parser_save_simple_key(parser) { return false } // A simple key cannot follow a tag. parser.simple_key_allowed = false // Create the TAG token and append it to the queue. var token yaml_token_t if !yaml_parser_scan_tag(parser, &token) { return false } yaml_insert_token(parser, -1, &token) return true } // Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { // Remove any potential simple keys. if !yaml_parser_remove_simple_key(parser) { return false } // A simple key may follow a block scalar. parser.simple_key_allowed = true // Create the SCALAR token and append it to the queue. var token yaml_token_t if !yaml_parser_scan_block_scalar(parser, &token, literal) { return false } yaml_insert_token(parser, -1, &token) return true } // Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { // A plain scalar could be a simple key. if !yaml_parser_save_simple_key(parser) { return false } // A simple key cannot follow a flow scalar. parser.simple_key_allowed = false // Create the SCALAR token and append it to the queue. var token yaml_token_t if !yaml_parser_scan_flow_scalar(parser, &token, single) { return false } yaml_insert_token(parser, -1, &token) return true } // Produce the SCALAR(...,plain) token. func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { // A plain scalar could be a simple key. if !yaml_parser_save_simple_key(parser) { return false } // A simple key cannot follow a flow scalar. parser.simple_key_allowed = false // Create the SCALAR token and append it to the queue. var token yaml_token_t if !yaml_parser_scan_plain_scalar(parser, &token) { return false } yaml_insert_token(parser, -1, &token) return true } // Eat whitespaces and comments until the next token is found. func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { // Until the next token is not found. for { // Allow the BOM mark to start a line. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { skip(parser) } // Eat whitespaces. // Tabs are allowed: // - in the flow context // - in the block context, but not at the beginning of the line or // after '-', '?', or ':' (complex value). if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } // Eat a comment until a line break. if parser.buffer[parser.buffer_pos] == '#' { for !is_breakz(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } } // If it is a line break, eat it. if is_break(parser.buffer, parser.buffer_pos) { if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { return false } skip_line(parser) // In the block context, a new line may start a simple key. if parser.flow_level == 0 { parser.simple_key_allowed = true } } else { break // We have found a token. } } return true } // Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. // // Scope: // %YAML 1.1 # a comment \n // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ // %TAG !yaml! tag:yaml.org,2002: \n // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ // func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { // Eat '%'. start_mark := parser.mark skip(parser) // Scan the directive name. var name []byte if !yaml_parser_scan_directive_name(parser, start_mark, &name) { return false } // Is it a YAML directive? if bytes.Equal(name, []byte("YAML")) { // Scan the VERSION directive value. var major, minor int8 if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { return false } end_mark := parser.mark // Create a VERSION-DIRECTIVE token. *token = yaml_token_t{ typ: yaml_VERSION_DIRECTIVE_TOKEN, start_mark: start_mark, end_mark: end_mark, major: major, minor: minor, } // Is it a TAG directive? } else if bytes.Equal(name, []byte("TAG")) { // Scan the TAG directive value. var handle, prefix []byte if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { return false } end_mark := parser.mark // Create a TAG-DIRECTIVE token. *token = yaml_token_t{ typ: yaml_TAG_DIRECTIVE_TOKEN, start_mark: start_mark, end_mark: end_mark, value: handle, prefix: prefix, } // Unknown directive. } else { yaml_parser_set_scanner_error(parser, "while scanning a directive", start_mark, "found uknown directive name") return false } // Eat the rest of the line including any comments. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } for is_blank(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } if parser.buffer[parser.buffer_pos] == '#' { for !is_breakz(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } } // Check if we are at the end of the line. if !is_breakz(parser.buffer, parser.buffer_pos) { yaml_parser_set_scanner_error(parser, "while scanning a directive", start_mark, "did not find expected comment or line break") return false } // Eat a line break. if is_break(parser.buffer, parser.buffer_pos) { if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { return false } skip_line(parser) } return true } // Scan the directive name. // // Scope: // %YAML 1.1 # a comment \n // ^^^^ // %TAG !yaml! tag:yaml.org,2002: \n // ^^^ // func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { // Consume the directive name. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } var s []byte for is_alpha(parser.buffer, parser.buffer_pos) { s = read(parser, s) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } // Check if the name is empty. if len(s) == 0 { yaml_parser_set_scanner_error(parser, "while scanning a directive", start_mark, "could not find expected directive name") return false } // Check for an blank character after the name. if !is_blankz(parser.buffer, parser.buffer_pos) { yaml_parser_set_scanner_error(parser, "while scanning a directive", start_mark, "found unexpected non-alphabetical character") return false } *name = s return true } // Scan the value of VERSION-DIRECTIVE. // // Scope: // %YAML 1.1 # a comment \n // ^^^^^^ func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { // Eat whitespaces. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } for is_blank(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } // Consume the major version number. if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { return false } // Eat '.'. if parser.buffer[parser.buffer_pos] != '.' { return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", start_mark, "did not find expected digit or '.' character") } skip(parser) // Consume the minor version number. if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { return false } return true } const max_number_length = 2 // Scan the version number of VERSION-DIRECTIVE. // // Scope: // %YAML 1.1 # a comment \n // ^ // %YAML 1.1 # a comment \n // ^ func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { // Repeat while the next character is digit. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } var value, length int8 for is_digit(parser.buffer, parser.buffer_pos) { // Check if the number is too long. length++ if length > max_number_length { return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", start_mark, "found extremely long version number") } value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } // Check if the number was present. if length == 0 { return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", start_mark, "did not find expected version number") } *number = value return true } // Scan the value of a TAG-DIRECTIVE token. // // Scope: // %TAG !yaml! tag:yaml.org,2002: \n // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ // func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { var handle_value, prefix_value []byte // Eat whitespaces. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } for is_blank(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } // Scan a handle. if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { return false } // Expect a whitespace. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } if !is_blank(parser.buffer, parser.buffer_pos) { yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", start_mark, "did not find expected whitespace") return false } // Eat whitespaces. for is_blank(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } // Scan a prefix. if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { return false } // Expect a whitespace or line break. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } if !is_blankz(parser.buffer, parser.buffer_pos) { yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", start_mark, "did not find expected whitespace or line break") return false } *handle = handle_value *prefix = prefix_value return true } func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { var s []byte // Eat the indicator character. start_mark := parser.mark skip(parser) // Consume the value. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } for is_alpha(parser.buffer, parser.buffer_pos) { s = read(parser, s) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } end_mark := parser.mark /* * Check if length of the anchor is greater than 0 and it is followed by * a whitespace character or one of the indicators: * * '?', ':', ',', ']', '}', '%', '@', '`'. */ if len(s) == 0 || !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') { context := "while scanning an alias" if typ == yaml_ANCHOR_TOKEN { context = "while scanning an anchor" } yaml_parser_set_scanner_error(parser, context, start_mark, "did not find expected alphabetic or numeric character") return false } // Create a token. *token = yaml_token_t{ typ: typ, start_mark: start_mark, end_mark: end_mark, value: s, } return true } /* * Scan a TAG token. */ func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { var handle, suffix []byte start_mark := parser.mark // Check if the tag is in the canonical form. if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { return false } if parser.buffer[parser.buffer_pos+1] == '<' { // Keep the handle as '' // Eat '!<' skip(parser) skip(parser) // Consume the tag value. if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { return false } // Check for '>' and eat it. if parser.buffer[parser.buffer_pos] != '>' { yaml_parser_set_scanner_error(parser, "while scanning a tag", start_mark, "did not find the expected '>'") return false } skip(parser) } else { // The tag has either the '!suffix' or the '!handle!suffix' form. // First, try to scan a handle. if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { return false } // Check if it is, indeed, handle. if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { // Scan the suffix now. if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { return false } } else { // It wasn't a handle after all. Scan the rest of the tag. if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { return false } // Set the handle to '!'. handle = []byte{'!'} // A special case: the '!' tag. Set the handle to '' and the // suffix to '!'. if len(suffix) == 0 { handle, suffix = suffix, handle } } } // Check the character which ends the tag. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } if !is_blankz(parser.buffer, parser.buffer_pos) { yaml_parser_set_scanner_error(parser, "while scanning a tag", start_mark, "did not find expected whitespace or line break") return false } end_mark := parser.mark // Create a token. *token = yaml_token_t{ typ: yaml_TAG_TOKEN, start_mark: start_mark, end_mark: end_mark, value: handle, suffix: suffix, } return true } // Scan a tag handle. func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { // Check the initial '!' character. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } if parser.buffer[parser.buffer_pos] != '!' { yaml_parser_set_scanner_tag_error(parser, directive, start_mark, "did not find expected '!'") return false } var s []byte // Copy the '!' character. s = read(parser, s) // Copy all subsequent alphabetical and numerical characters. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } for is_alpha(parser.buffer, parser.buffer_pos) { s = read(parser, s) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } // Check if the trailing character is '!' and copy it. if parser.buffer[parser.buffer_pos] == '!' { s = read(parser, s) } else { // It's either the '!' tag or not really a tag handle. If it's a %TAG // directive, it's an error. If it's a tag token, it must be a part of URI. if directive && !(s[0] == '!' && s[1] == 0) { yaml_parser_set_scanner_tag_error(parser, directive, start_mark, "did not find expected '!'") return false } } *handle = s return true } // Scan a tag. func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { //size_t length = head ? strlen((char *)head) : 0 var s []byte // Copy the head if needed. // // Note that we don't copy the leading '!' character. if len(head) > 1 { s = append(s, head[1:]...) } // Scan the tag. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } // The set of characters that may appear in URI is as follows: // // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', // '%'. // [Go] Convert this into more reasonable logic. for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '%' { // Check if it is a URI-escape sequence. if parser.buffer[parser.buffer_pos] == '%' { if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { return false } } else { s = read(parser, s) } if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } // Check if the tag is non-empty. if len(s) == 0 { yaml_parser_set_scanner_tag_error(parser, directive, start_mark, "did not find expected tag URI") return false } *uri = s return true } // Decode an URI-escape sequence corresponding to a single UTF-8 character. func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { // Decode the required number of characters. w := 1024 for w > 0 { // Check for a URI-escaped octet. if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { return false } if !(parser.buffer[parser.buffer_pos] == '%' && is_hex(parser.buffer, parser.buffer_pos+1) && is_hex(parser.buffer, parser.buffer_pos+2)) { return yaml_parser_set_scanner_tag_error(parser, directive, start_mark, "did not find URI escaped octet") } // Get the octet. octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) // If it is the leading octet, determine the length of the UTF-8 sequence. if w == 1024 { w = width(octet) if w == 0 { return yaml_parser_set_scanner_tag_error(parser, directive, start_mark, "found an incorrect leading UTF-8 octet") } } else { // Check if the trailing octet is correct. if octet&0xC0 != 0x80 { return yaml_parser_set_scanner_tag_error(parser, directive, start_mark, "found an incorrect trailing UTF-8 octet") } } // Copy the octet and move the pointers. *s = append(*s, octet) skip(parser) skip(parser) skip(parser) w-- } return true } // Scan a block scalar. func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { // Eat the indicator '|' or '>'. start_mark := parser.mark skip(parser) // Scan the additional block scalar indicators. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } // Check for a chomping indicator. var chomping, increment int if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { // Set the chomping method and eat the indicator. if parser.buffer[parser.buffer_pos] == '+' { chomping = +1 } else { chomping = -1 } skip(parser) // Check for an indentation indicator. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } if is_digit(parser.buffer, parser.buffer_pos) { // Check that the intendation is greater than 0. if parser.buffer[parser.buffer_pos] == '0' { yaml_parser_set_scanner_error(parser, "while scanning a block scalar", start_mark, "found an intendation indicator equal to 0") return false } // Get the intendation level and eat the indicator. increment = as_digit(parser.buffer, parser.buffer_pos) skip(parser) } } else if is_digit(parser.buffer, parser.buffer_pos) { // Do the same as above, but in the opposite order. if parser.buffer[parser.buffer_pos] == '0' { yaml_parser_set_scanner_error(parser, "while scanning a block scalar", start_mark, "found an intendation indicator equal to 0") return false } increment = as_digit(parser.buffer, parser.buffer_pos) skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { if parser.buffer[parser.buffer_pos] == '+' { chomping = +1 } else { chomping = -1 } skip(parser) } } // Eat whitespaces and comments to the end of the line. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } for is_blank(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } if parser.buffer[parser.buffer_pos] == '#' { for !is_breakz(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } } // Check if we are at the end of the line. if !is_breakz(parser.buffer, parser.buffer_pos) { yaml_parser_set_scanner_error(parser, "while scanning a block scalar", start_mark, "did not find expected comment or line break") return false } // Eat a line break. if is_break(parser.buffer, parser.buffer_pos) { if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { return false } skip_line(parser) } end_mark := parser.mark // Set the intendation level if it was specified. var indent int if increment > 0 { if parser.indent >= 0 { indent = parser.indent + increment } else { indent = increment } } // Scan the leading line breaks and determine the indentation level if needed. var s, leading_break, trailing_breaks []byte if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { return false } // Scan the block scalar content. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } var leading_blank, trailing_blank bool for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { // We are at the beginning of a non-empty line. // Is it a trailing whitespace? trailing_blank = is_blank(parser.buffer, parser.buffer_pos) // Check if we need to fold the leading line break. if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { // Do we need to join the lines by space? if len(trailing_breaks) == 0 { s = append(s, ' ') } } else { s = append(s, leading_break...) } leading_break = leading_break[:0] // Append the remaining line breaks. s = append(s, trailing_breaks...) trailing_breaks = trailing_breaks[:0] // Is it a leading whitespace? leading_blank = is_blank(parser.buffer, parser.buffer_pos) // Consume the current line. for !is_breakz(parser.buffer, parser.buffer_pos) { s = read(parser, s) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } // Consume the line break. if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { return false } leading_break = read_line(parser, leading_break) // Eat the following intendation spaces and line breaks. if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { return false } } // Chomp the tail. if chomping != -1 { s = append(s, leading_break...) } if chomping == 1 { s = append(s, trailing_breaks...) } // Create a token. *token = yaml_token_t{ typ: yaml_SCALAR_TOKEN, start_mark: start_mark, end_mark: end_mark, value: s, style: yaml_LITERAL_SCALAR_STYLE, } if !literal { token.style = yaml_FOLDED_SCALAR_STYLE } return true } // Scan intendation spaces and line breaks for a block scalar. Determine the // intendation level if needed. func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { *end_mark = parser.mark // Eat the intendation spaces and line breaks. max_indent := 0 for { // Eat the intendation spaces. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } if parser.mark.column > max_indent { max_indent = parser.mark.column } // Check for a tab character messing the intendation. if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", start_mark, "found a tab character where an intendation space is expected") } // Have we found a non-empty line? if !is_break(parser.buffer, parser.buffer_pos) { break } // Consume the line break. if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { return false } // [Go] Should really be returning breaks instead. *breaks = read_line(parser, *breaks) *end_mark = parser.mark } // Determine the indentation level if needed. if *indent == 0 { *indent = max_indent if *indent < parser.indent+1 { *indent = parser.indent + 1 } if *indent < 1 { *indent = 1 } } return true } // Scan a quoted scalar. func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { // Eat the left quote. start_mark := parser.mark skip(parser) // Consume the content of the quoted scalar. var s, leading_break, trailing_breaks, whitespaces []byte for { // Check that there are no document indicators at the beginning of the line. if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { return false } if parser.mark.column == 0 && ((parser.buffer[parser.buffer_pos+0] == '-' && parser.buffer[parser.buffer_pos+1] == '-' && parser.buffer[parser.buffer_pos+2] == '-') || (parser.buffer[parser.buffer_pos+0] == '.' && parser.buffer[parser.buffer_pos+1] == '.' && parser.buffer[parser.buffer_pos+2] == '.')) && is_blankz(parser.buffer, parser.buffer_pos+3) { yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", start_mark, "found unexpected document indicator") return false } // Check for EOF. if is_z(parser.buffer, parser.buffer_pos) { yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", start_mark, "found unexpected end of stream") return false } // Consume non-blank characters. leading_blanks := false for !is_blankz(parser.buffer, parser.buffer_pos) { if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { // Is is an escaped single quote. s = append(s, '\'') skip(parser) skip(parser) } else if single && parser.buffer[parser.buffer_pos] == '\'' { // It is a right single quote. break } else if !single && parser.buffer[parser.buffer_pos] == '"' { // It is a right double quote. break } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { // It is an escaped line break. if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { return false } skip(parser) skip_line(parser) leading_blanks = true break } else if !single && parser.buffer[parser.buffer_pos] == '\\' { // It is an escape sequence. code_length := 0 // Check the escape character. switch parser.buffer[parser.buffer_pos+1] { case '0': s = append(s, 0) case 'a': s = append(s, '\x07') case 'b': s = append(s, '\x08') case 't', '\t': s = append(s, '\x09') case 'n': s = append(s, '\x0A') case 'v': s = append(s, '\x0B') case 'f': s = append(s, '\x0C') case 'r': s = append(s, '\x0D') case 'e': s = append(s, '\x1B') case ' ': s = append(s, '\x20') case '"': s = append(s, '"') case '\'': s = append(s, '\'') case '\\': s = append(s, '\\') case 'N': // NEL (#x85) s = append(s, '\xC2') s = append(s, '\x85') case '_': // #xA0 s = append(s, '\xC2') s = append(s, '\xA0') case 'L': // LS (#x2028) s = append(s, '\xE2') s = append(s, '\x80') s = append(s, '\xA8') case 'P': // PS (#x2029) s = append(s, '\xE2') s = append(s, '\x80') s = append(s, '\xA9') case 'x': code_length = 2 case 'u': code_length = 4 case 'U': code_length = 8 default: yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", start_mark, "found unknown escape character") return false } skip(parser) skip(parser) // Consume an arbitrary escape code. if code_length > 0 { var value int // Scan the character value. if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { return false } for k := 0; k < code_length; k++ { if !is_hex(parser.buffer, parser.buffer_pos+k) { yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", start_mark, "did not find expected hexdecimal number") return false } value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) } // Check the value and write the character. if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", start_mark, "found invalid Unicode character escape code") return false } if value <= 0x7F { s = append(s, byte(value)) } else if value <= 0x7FF { s = append(s, byte(0xC0+(value>>6))) s = append(s, byte(0x80+(value&0x3F))) } else if value <= 0xFFFF { s = append(s, byte(0xE0+(value>>12))) s = append(s, byte(0x80+((value>>6)&0x3F))) s = append(s, byte(0x80+(value&0x3F))) } else { s = append(s, byte(0xF0+(value>>18))) s = append(s, byte(0x80+((value>>12)&0x3F))) s = append(s, byte(0x80+((value>>6)&0x3F))) s = append(s, byte(0x80+(value&0x3F))) } // Advance the pointer. for k := 0; k < code_length; k++ { skip(parser) } } } else { // It is a non-escaped non-blank character. s = read(parser, s) } if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { return false } } // Check if we are at the end of the scalar. if single { if parser.buffer[parser.buffer_pos] == '\'' { break } } else { if parser.buffer[parser.buffer_pos] == '"' { break } } // Consume blank characters. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { if is_blank(parser.buffer, parser.buffer_pos) { // Consume a space or a tab character. if !leading_blanks { whitespaces = read(parser, whitespaces) } else { skip(parser) } } else { if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { return false } // Check if it is a first line break. if !leading_blanks { whitespaces = whitespaces[:0] leading_break = read_line(parser, leading_break) leading_blanks = true } else { trailing_breaks = read_line(parser, trailing_breaks) } } if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } // Join the whitespaces or fold line breaks. if leading_blanks { // Do we need to fold line breaks? if len(leading_break) > 0 && leading_break[0] == '\n' { if len(trailing_breaks) == 0 { s = append(s, ' ') } else { s = append(s, trailing_breaks...) } } else { s = append(s, leading_break...) s = append(s, trailing_breaks...) } trailing_breaks = trailing_breaks[:0] leading_break = leading_break[:0] } else { s = append(s, whitespaces...) whitespaces = whitespaces[:0] } } // Eat the right quote. skip(parser) end_mark := parser.mark // Create a token. *token = yaml_token_t{ typ: yaml_SCALAR_TOKEN, start_mark: start_mark, end_mark: end_mark, value: s, style: yaml_SINGLE_QUOTED_SCALAR_STYLE, } if !single { token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE } return true } // Scan a plain scalar. func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { var s, leading_break, trailing_breaks, whitespaces []byte var leading_blanks bool var indent = parser.indent + 1 start_mark := parser.mark end_mark := parser.mark // Consume the content of the plain scalar. for { // Check for a document indicator. if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { return false } if parser.mark.column == 0 && ((parser.buffer[parser.buffer_pos+0] == '-' && parser.buffer[parser.buffer_pos+1] == '-' && parser.buffer[parser.buffer_pos+2] == '-') || (parser.buffer[parser.buffer_pos+0] == '.' && parser.buffer[parser.buffer_pos+1] == '.' && parser.buffer[parser.buffer_pos+2] == '.')) && is_blankz(parser.buffer, parser.buffer_pos+3) { break } // Check for a comment. if parser.buffer[parser.buffer_pos] == '#' { break } // Consume non-blank characters. for !is_blankz(parser.buffer, parser.buffer_pos) { // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". if parser.flow_level > 0 && parser.buffer[parser.buffer_pos] == ':' && !is_blankz(parser.buffer, parser.buffer_pos+1) { yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", start_mark, "found unexpected ':'") return false } // Check for indicators that may end a plain scalar. if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || (parser.flow_level > 0 && (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || parser.buffer[parser.buffer_pos] == '}')) { break } // Check if we need to join whitespaces and breaks. if leading_blanks || len(whitespaces) > 0 { if leading_blanks { // Do we need to fold line breaks? if leading_break[0] == '\n' { if len(trailing_breaks) == 0 { s = append(s, ' ') } else { s = append(s, trailing_breaks...) } } else { s = append(s, leading_break...) s = append(s, trailing_breaks...) } trailing_breaks = trailing_breaks[:0] leading_break = leading_break[:0] leading_blanks = false } else { s = append(s, whitespaces...) whitespaces = whitespaces[:0] } } // Copy the character. s = read(parser, s) end_mark = parser.mark if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { return false } } // Is it the end? if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { break } // Consume blank characters. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { if is_blank(parser.buffer, parser.buffer_pos) { // Check for tab character that abuse intendation. if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", start_mark, "found a tab character that violate intendation") return false } // Consume a space or a tab character. if !leading_blanks { whitespaces = read(parser, whitespaces) } else { skip(parser) } } else { if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { return false } // Check if it is a first line break. if !leading_blanks { whitespaces = whitespaces[:0] leading_break = read_line(parser, leading_break) leading_blanks = true } else { trailing_breaks = read_line(parser, trailing_breaks) } } if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } // Check intendation level. if parser.flow_level == 0 && parser.mark.column < indent { break } } // Create a token. *token = yaml_token_t{ typ: yaml_SCALAR_TOKEN, start_mark: start_mark, end_mark: end_mark, value: s, style: yaml_PLAIN_SCALAR_STYLE, } // Note that we change the 'simple_key_allowed' flag. if leading_blanks { parser.simple_key_allowed = true } return true } charm-2.1.1/src/gopkg.in/yaml.v1/decode_test.go0000664000175000017500000003607712672604524020223 0ustar marcomarcopackage yaml_test import ( . "gopkg.in/check.v1" "gopkg.in/yaml.v1" "math" "reflect" "strings" "time" ) var unmarshalIntTest = 123 var unmarshalTests = []struct { data string value interface{} }{ { "", &struct{}{}, }, { "{}", &struct{}{}, }, { "v: hi", map[string]string{"v": "hi"}, }, { "v: hi", map[string]interface{}{"v": "hi"}, }, { "v: true", map[string]string{"v": "true"}, }, { "v: true", map[string]interface{}{"v": true}, }, { "v: 10", map[string]interface{}{"v": 10}, }, { "v: 0b10", map[string]interface{}{"v": 2}, }, { "v: 0xA", map[string]interface{}{"v": 10}, }, { "v: 4294967296", map[string]int64{"v": 4294967296}, }, { "v: 0.1", map[string]interface{}{"v": 0.1}, }, { "v: .1", map[string]interface{}{"v": 0.1}, }, { "v: .Inf", map[string]interface{}{"v": math.Inf(+1)}, }, { "v: -.Inf", map[string]interface{}{"v": math.Inf(-1)}, }, { "v: -10", map[string]interface{}{"v": -10}, }, { "v: -.1", map[string]interface{}{"v": -0.1}, }, // Simple values. { "123", &unmarshalIntTest, }, // Floats from spec { "canonical: 6.8523e+5", map[string]interface{}{"canonical": 6.8523e+5}, }, { "expo: 685.230_15e+03", map[string]interface{}{"expo": 685.23015e+03}, }, { "fixed: 685_230.15", map[string]interface{}{"fixed": 685230.15}, }, { "neginf: -.inf", map[string]interface{}{"neginf": math.Inf(-1)}, }, { "fixed: 685_230.15", map[string]float64{"fixed": 685230.15}, }, //{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported //{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails. // Bools from spec { "canonical: y", map[string]interface{}{"canonical": true}, }, { "answer: NO", map[string]interface{}{"answer": false}, }, { "logical: True", map[string]interface{}{"logical": true}, }, { "option: on", map[string]interface{}{"option": true}, }, { "option: on", map[string]bool{"option": true}, }, // Ints from spec { "canonical: 685230", map[string]interface{}{"canonical": 685230}, }, { "decimal: +685_230", map[string]interface{}{"decimal": 685230}, }, { "octal: 02472256", map[string]interface{}{"octal": 685230}, }, { "hexa: 0x_0A_74_AE", map[string]interface{}{"hexa": 685230}, }, { "bin: 0b1010_0111_0100_1010_1110", map[string]interface{}{"bin": 685230}, }, { "bin: -0b101010", map[string]interface{}{"bin": -42}, }, { "decimal: +685_230", map[string]int{"decimal": 685230}, }, //{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported // Nulls from spec { "empty:", map[string]interface{}{"empty": nil}, }, { "canonical: ~", map[string]interface{}{"canonical": nil}, }, { "english: null", map[string]interface{}{"english": nil}, }, { "~: null key", map[interface{}]string{nil: "null key"}, }, { "empty:", map[string]*bool{"empty": nil}, }, // Flow sequence { "seq: [A,B]", map[string]interface{}{"seq": []interface{}{"A", "B"}}, }, { "seq: [A,B,C,]", map[string][]string{"seq": []string{"A", "B", "C"}}, }, { "seq: [A,1,C]", map[string][]string{"seq": []string{"A", "1", "C"}}, }, { "seq: [A,1,C]", map[string][]int{"seq": []int{1}}, }, { "seq: [A,1,C]", map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, }, // Block sequence { "seq:\n - A\n - B", map[string]interface{}{"seq": []interface{}{"A", "B"}}, }, { "seq:\n - A\n - B\n - C", map[string][]string{"seq": []string{"A", "B", "C"}}, }, { "seq:\n - A\n - 1\n - C", map[string][]string{"seq": []string{"A", "1", "C"}}, }, { "seq:\n - A\n - 1\n - C", map[string][]int{"seq": []int{1}}, }, { "seq:\n - A\n - 1\n - C", map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, }, // Literal block scalar { "scalar: | # Comment\n\n literal\n\n \ttext\n\n", map[string]string{"scalar": "\nliteral\n\n\ttext\n"}, }, // Folded block scalar { "scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n", map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"}, }, // Map inside interface with no type hints. { "a: {b: c}", map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, }, // Structs and type conversions. { "hello: world", &struct{ Hello string }{"world"}, }, { "a: {b: c}", &struct{ A struct{ B string } }{struct{ B string }{"c"}}, }, { "a: {b: c}", &struct{ A *struct{ B string } }{&struct{ B string }{"c"}}, }, { "a: {b: c}", &struct{ A map[string]string }{map[string]string{"b": "c"}}, }, { "a: {b: c}", &struct{ A *map[string]string }{&map[string]string{"b": "c"}}, }, { "a:", &struct{ A map[string]string }{}, }, { "a: 1", &struct{ A int }{1}, }, { "a: 1", &struct{ A float64 }{1}, }, { "a: 1.0", &struct{ A int }{1}, }, { "a: 1.0", &struct{ A uint }{1}, }, { "a: [1, 2]", &struct{ A []int }{[]int{1, 2}}, }, { "a: 1", &struct{ B int }{0}, }, { "a: 1", &struct { B int "a" }{1}, }, { "a: y", &struct{ A bool }{true}, }, // Some cross type conversions { "v: 42", map[string]uint{"v": 42}, }, { "v: -42", map[string]uint{}, }, { "v: 4294967296", map[string]uint64{"v": 4294967296}, }, { "v: -4294967296", map[string]uint64{}, }, // Overflow cases. { "v: 4294967297", map[string]int32{}, }, { "v: 128", map[string]int8{}, }, // Quoted values. { "'1': '\"2\"'", map[interface{}]interface{}{"1": "\"2\""}, }, { "v:\n- A\n- 'B\n\n C'\n", map[string][]string{"v": []string{"A", "B\nC"}}, }, // Explicit tags. { "v: !!float '1.1'", map[string]interface{}{"v": 1.1}, }, { "v: !!null ''", map[string]interface{}{"v": nil}, }, { "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'", map[string]interface{}{"v": 1}, }, // Anchors and aliases. { "a: &x 1\nb: &y 2\nc: *x\nd: *y\n", &struct{ A, B, C, D int }{1, 2, 1, 2}, }, { "a: &a {c: 1}\nb: *a", &struct { A, B struct { C int } }{struct{ C int }{1}, struct{ C int }{1}}, }, { "a: &a [1, 2]\nb: *a", &struct{ B []int }{[]int{1, 2}}, }, // Bug #1133337 { "foo: ''", map[string]*string{"foo": new(string)}, }, { "foo: null", map[string]string{"foo": ""}, }, { "foo: null", map[string]interface{}{"foo": nil}, }, // Ignored field { "a: 1\nb: 2\n", &struct { A int B int "-" }{1, 0}, }, // Bug #1191981 { "" + "%YAML 1.1\n" + "--- !!str\n" + `"Generic line break (no glyph)\n\` + "\n" + ` Generic line break (glyphed)\n\` + "\n" + ` Line separator\u2028\` + "\n" + ` Paragraph separator\u2029"` + "\n", "" + "Generic line break (no glyph)\n" + "Generic line break (glyphed)\n" + "Line separator\u2028Paragraph separator\u2029", }, // Struct inlining { "a: 1\nb: 2\nc: 3\n", &struct { A int C inlineB `yaml:",inline"` }{1, inlineB{2, inlineC{3}}}, }, // bug 1243827 { "a: -b_c", map[string]interface{}{"a": "-b_c"}, }, { "a: +b_c", map[string]interface{}{"a": "+b_c"}, }, { "a: 50cent_of_dollar", map[string]interface{}{"a": "50cent_of_dollar"}, }, // Duration { "a: 3s", map[string]time.Duration{"a": 3 * time.Second}, }, // Issue #24. { "a: ", map[string]string{"a": ""}, }, // Base 60 floats are obsolete and unsupported. { "a: 1:1\n", map[string]string{"a": "1:1"}, }, // Binary data. { "a: !!binary gIGC\n", map[string]string{"a": "\x80\x81\x82"}, }, { "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", map[string]string{"a": strings.Repeat("\x90", 54)}, }, { "a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n", map[string]string{"a": strings.Repeat("\x00", 52)}, }, } type inlineB struct { B int inlineC `yaml:",inline"` } type inlineC struct { C int } func (s *S) TestUnmarshal(c *C) { for i, item := range unmarshalTests { t := reflect.ValueOf(item.value).Type() var value interface{} switch t.Kind() { case reflect.Map: value = reflect.MakeMap(t).Interface() case reflect.String: t := reflect.ValueOf(item.value).Type() v := reflect.New(t) value = v.Interface() default: pt := reflect.ValueOf(item.value).Type() pv := reflect.New(pt.Elem()) value = pv.Interface() } err := yaml.Unmarshal([]byte(item.data), value) c.Assert(err, IsNil, Commentf("Item #%d", i)) if t.Kind() == reflect.String { c.Assert(*value.(*string), Equals, item.value, Commentf("Item #%d", i)) } else { c.Assert(value, DeepEquals, item.value, Commentf("Item #%d", i)) } } } func (s *S) TestUnmarshalNaN(c *C) { value := map[string]interface{}{} err := yaml.Unmarshal([]byte("notanum: .NaN"), &value) c.Assert(err, IsNil) c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true) } var unmarshalErrorTests = []struct { data, error string }{ {"v: !!float 'error'", "YAML error: cannot decode !!str `error` as a !!float"}, {"v: [A,", "YAML error: line 1: did not find expected node content"}, {"v:\n- [A,", "YAML error: line 2: did not find expected node content"}, {"a: *b\n", "YAML error: Unknown anchor 'b' referenced"}, {"a: &a\n b: *a\n", "YAML error: Anchor 'a' value contains itself"}, {"value: -", "YAML error: block sequence entries are not allowed in this context"}, {"a: !!binary ==", "YAML error: !!binary value contains invalid base64 data"}, {"{[.]}", `YAML error: invalid map key: \[\]interface \{\}\{"\."\}`}, {"{{.}}", `YAML error: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`}, } func (s *S) TestUnmarshalErrors(c *C) { for _, item := range unmarshalErrorTests { var value interface{} err := yaml.Unmarshal([]byte(item.data), &value) c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value)) } } var setterTests = []struct { data, tag string value interface{} }{ {"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}}, {"_: [1,A]", "!!seq", []interface{}{1, "A"}}, {"_: 10", "!!int", 10}, {"_: null", "!!null", nil}, {`_: BAR!`, "!!str", "BAR!"}, {`_: "BAR!"`, "!!str", "BAR!"}, {"_: !!foo 'BAR!'", "!!foo", "BAR!"}, } var setterResult = map[int]bool{} type typeWithSetter struct { tag string value interface{} } func (o *typeWithSetter) SetYAML(tag string, value interface{}) (ok bool) { o.tag = tag o.value = value if i, ok := value.(int); ok { if result, ok := setterResult[i]; ok { return result } } return true } type setterPointerType struct { Field *typeWithSetter "_" } type setterValueType struct { Field typeWithSetter "_" } func (s *S) TestUnmarshalWithPointerSetter(c *C) { for _, item := range setterTests { obj := &setterPointerType{} err := yaml.Unmarshal([]byte(item.data), obj) c.Assert(err, IsNil) c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) c.Assert(obj.Field.tag, Equals, item.tag) c.Assert(obj.Field.value, DeepEquals, item.value) } } func (s *S) TestUnmarshalWithValueSetter(c *C) { for _, item := range setterTests { obj := &setterValueType{} err := yaml.Unmarshal([]byte(item.data), obj) c.Assert(err, IsNil) c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) c.Assert(obj.Field.tag, Equals, item.tag) c.Assert(obj.Field.value, DeepEquals, item.value) } } func (s *S) TestUnmarshalWholeDocumentWithSetter(c *C) { obj := &typeWithSetter{} err := yaml.Unmarshal([]byte(setterTests[0].data), obj) c.Assert(err, IsNil) c.Assert(obj.tag, Equals, setterTests[0].tag) value, ok := obj.value.(map[interface{}]interface{}) c.Assert(ok, Equals, true) c.Assert(value["_"], DeepEquals, setterTests[0].value) } func (s *S) TestUnmarshalWithFalseSetterIgnoresValue(c *C) { setterResult[2] = false setterResult[4] = false defer func() { delete(setterResult, 2) delete(setterResult, 4) }() m := map[string]*typeWithSetter{} data := `{abc: 1, def: 2, ghi: 3, jkl: 4}` err := yaml.Unmarshal([]byte(data), m) c.Assert(err, IsNil) c.Assert(m["abc"], NotNil) c.Assert(m["def"], IsNil) c.Assert(m["ghi"], NotNil) c.Assert(m["jkl"], IsNil) c.Assert(m["abc"].value, Equals, 1) c.Assert(m["ghi"].value, Equals, 3) } // From http://yaml.org/type/merge.html var mergeTests = ` anchors: - &CENTER { "x": 1, "y": 2 } - &LEFT { "x": 0, "y": 2 } - &BIG { "r": 10 } - &SMALL { "r": 1 } # All the following maps are equal: plain: # Explicit keys "x": 1 "y": 2 "r": 10 label: center/big mergeOne: # Merge one map << : *CENTER "r": 10 label: center/big mergeMultiple: # Merge multiple maps << : [ *CENTER, *BIG ] label: center/big override: # Override << : [ *BIG, *LEFT, *SMALL ] "x": 1 label: center/big shortTag: # Explicit short merge tag !!merge "<<" : [ *CENTER, *BIG ] label: center/big longTag: # Explicit merge long tag ! "<<" : [ *CENTER, *BIG ] label: center/big inlineMap: # Inlined map << : {"x": 1, "y": 2, "r": 10} label: center/big inlineSequenceMap: # Inlined map in sequence << : [ *CENTER, {"r": 10} ] label: center/big ` func (s *S) TestMerge(c *C) { var want = map[interface{}]interface{}{ "x": 1, "y": 2, "r": 10, "label": "center/big", } var m map[string]interface{} err := yaml.Unmarshal([]byte(mergeTests), &m) c.Assert(err, IsNil) for name, test := range m { if name == "anchors" { continue } c.Assert(test, DeepEquals, want, Commentf("test %q failed", name)) } } func (s *S) TestMergeStruct(c *C) { type Data struct { X, Y, R int Label string } want := Data{1, 2, 10, "center/big"} var m map[string]Data err := yaml.Unmarshal([]byte(mergeTests), &m) c.Assert(err, IsNil) for name, test := range m { if name == "anchors" { continue } c.Assert(test, Equals, want, Commentf("test %q failed", name)) } } var unmarshalNullTests = []func() interface{}{ func() interface{} { var v interface{}; v = "v"; return &v }, func() interface{} { var s = "s"; return &s }, func() interface{} { var s = "s"; sptr := &s; return &sptr }, func() interface{} { var i = 1; return &i }, func() interface{} { var i = 1; iptr := &i; return &iptr }, func() interface{} { m := map[string]int{"s": 1}; return &m }, func() interface{} { m := map[string]int{"s": 1}; return m }, } func (s *S) TestUnmarshalNull(c *C) { for _, test := range unmarshalNullTests { item := test() zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface() err := yaml.Unmarshal([]byte("null"), item) c.Assert(err, IsNil) if reflect.TypeOf(item).Kind() == reflect.Map { c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface()) } else { c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero) } } } //var data []byte //func init() { // var err error // data, err = ioutil.ReadFile("/tmp/file.yaml") // if err != nil { // panic(err) // } //} // //func (s *S) BenchmarkUnmarshal(c *C) { // var err error // for i := 0; i < c.N; i++ { // var v map[string]interface{} // err = yaml.Unmarshal(data, &v) // } // if err != nil { // panic(err) // } //} // //func (s *S) BenchmarkMarshal(c *C) { // var v map[string]interface{} // yaml.Unmarshal(data, &v) // c.ResetTimer() // for i := 0; i < c.N; i++ { // yaml.Marshal(&v) // } //} charm-2.1.1/src/gopkg.in/yaml.v1/readerc.go0000664000175000017500000002716512672604524017344 0ustar marcomarcopackage yaml import ( "io" ) // Set the reader error and return 0. func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { parser.error = yaml_READER_ERROR parser.problem = problem parser.problem_offset = offset parser.problem_value = value return false } // Byte order marks. const ( bom_UTF8 = "\xef\xbb\xbf" bom_UTF16LE = "\xff\xfe" bom_UTF16BE = "\xfe\xff" ) // Determine the input stream encoding by checking the BOM symbol. If no BOM is // found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { // Ensure that we had enough bytes in the raw buffer. for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { if !yaml_parser_update_raw_buffer(parser) { return false } } // Determine the encoding. buf := parser.raw_buffer pos := parser.raw_buffer_pos avail := len(buf) - pos if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { parser.encoding = yaml_UTF16LE_ENCODING parser.raw_buffer_pos += 2 parser.offset += 2 } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { parser.encoding = yaml_UTF16BE_ENCODING parser.raw_buffer_pos += 2 parser.offset += 2 } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { parser.encoding = yaml_UTF8_ENCODING parser.raw_buffer_pos += 3 parser.offset += 3 } else { parser.encoding = yaml_UTF8_ENCODING } return true } // Update the raw buffer. func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { size_read := 0 // Return if the raw buffer is full. if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { return true } // Return on EOF. if parser.eof { return true } // Move the remaining bytes in the raw buffer to the beginning. if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) } parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] parser.raw_buffer_pos = 0 // Call the read handler to fill the buffer. size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] if err == io.EOF { parser.eof = true } else if err != nil { return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) } return true } // Ensure that the buffer contains at least `length` characters. // Return true on success, false on failure. // // The length is supposed to be significantly less that the buffer size. func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { if parser.read_handler == nil { panic("read handler must be set") } // If the EOF flag is set and the raw buffer is empty, do nothing. if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { return true } // Return if the buffer contains enough characters. if parser.unread >= length { return true } // Determine the input encoding if it is not known yet. if parser.encoding == yaml_ANY_ENCODING { if !yaml_parser_determine_encoding(parser) { return false } } // Move the unread characters to the beginning of the buffer. buffer_len := len(parser.buffer) if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { copy(parser.buffer, parser.buffer[parser.buffer_pos:]) buffer_len -= parser.buffer_pos parser.buffer_pos = 0 } else if parser.buffer_pos == buffer_len { buffer_len = 0 parser.buffer_pos = 0 } // Open the whole buffer for writing, and cut it before returning. parser.buffer = parser.buffer[:cap(parser.buffer)] // Fill the buffer until it has enough characters. first := true for parser.unread < length { // Fill the raw buffer if necessary. if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { if !yaml_parser_update_raw_buffer(parser) { parser.buffer = parser.buffer[:buffer_len] return false } } first = false // Decode the raw buffer. inner: for parser.raw_buffer_pos != len(parser.raw_buffer) { var value rune var width int raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos // Decode the next character. switch parser.encoding { case yaml_UTF8_ENCODING: // Decode a UTF-8 character. Check RFC 3629 // (http://www.ietf.org/rfc/rfc3629.txt) for more details. // // The following table (taken from the RFC) is used for // decoding. // // Char. number range | UTF-8 octet sequence // (hexadecimal) | (binary) // --------------------+------------------------------------ // 0000 0000-0000 007F | 0xxxxxxx // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx // // Additionally, the characters in the range 0xD800-0xDFFF // are prohibited as they are reserved for use with UTF-16 // surrogate pairs. // Determine the length of the UTF-8 sequence. octet := parser.raw_buffer[parser.raw_buffer_pos] switch { case octet&0x80 == 0x00: width = 1 case octet&0xE0 == 0xC0: width = 2 case octet&0xF0 == 0xE0: width = 3 case octet&0xF8 == 0xF0: width = 4 default: // The leading octet is invalid. return yaml_parser_set_reader_error(parser, "invalid leading UTF-8 octet", parser.offset, int(octet)) } // Check if the raw buffer contains an incomplete character. if width > raw_unread { if parser.eof { return yaml_parser_set_reader_error(parser, "incomplete UTF-8 octet sequence", parser.offset, -1) } break inner } // Decode the leading octet. switch { case octet&0x80 == 0x00: value = rune(octet & 0x7F) case octet&0xE0 == 0xC0: value = rune(octet & 0x1F) case octet&0xF0 == 0xE0: value = rune(octet & 0x0F) case octet&0xF8 == 0xF0: value = rune(octet & 0x07) default: value = 0 } // Check and decode the trailing octets. for k := 1; k < width; k++ { octet = parser.raw_buffer[parser.raw_buffer_pos+k] // Check if the octet is valid. if (octet & 0xC0) != 0x80 { return yaml_parser_set_reader_error(parser, "invalid trailing UTF-8 octet", parser.offset+k, int(octet)) } // Decode the octet. value = (value << 6) + rune(octet&0x3F) } // Check the length of the sequence against the value. switch { case width == 1: case width == 2 && value >= 0x80: case width == 3 && value >= 0x800: case width == 4 && value >= 0x10000: default: return yaml_parser_set_reader_error(parser, "invalid length of a UTF-8 sequence", parser.offset, -1) } // Check the range of the value. if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { return yaml_parser_set_reader_error(parser, "invalid Unicode character", parser.offset, int(value)) } case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: var low, high int if parser.encoding == yaml_UTF16LE_ENCODING { low, high = 0, 1 } else { high, low = 1, 0 } // The UTF-16 encoding is not as simple as one might // naively think. Check RFC 2781 // (http://www.ietf.org/rfc/rfc2781.txt). // // Normally, two subsequent bytes describe a Unicode // character. However a special technique (called a // surrogate pair) is used for specifying character // values larger than 0xFFFF. // // A surrogate pair consists of two pseudo-characters: // high surrogate area (0xD800-0xDBFF) // low surrogate area (0xDC00-0xDFFF) // // The following formulas are used for decoding // and encoding characters using surrogate pairs: // // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) // W1 = 110110yyyyyyyyyy // W2 = 110111xxxxxxxxxx // // where U is the character value, W1 is the high surrogate // area, W2 is the low surrogate area. // Check for incomplete UTF-16 character. if raw_unread < 2 { if parser.eof { return yaml_parser_set_reader_error(parser, "incomplete UTF-16 character", parser.offset, -1) } break inner } // Get the character. value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) // Check for unexpected low surrogate area. if value&0xFC00 == 0xDC00 { return yaml_parser_set_reader_error(parser, "unexpected low surrogate area", parser.offset, int(value)) } // Check for a high surrogate area. if value&0xFC00 == 0xD800 { width = 4 // Check for incomplete surrogate pair. if raw_unread < 4 { if parser.eof { return yaml_parser_set_reader_error(parser, "incomplete UTF-16 surrogate pair", parser.offset, -1) } break inner } // Get the next character. value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) // Check for a low surrogate area. if value2&0xFC00 != 0xDC00 { return yaml_parser_set_reader_error(parser, "expected low surrogate area", parser.offset+2, int(value2)) } // Generate the value of the surrogate pair. value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) } else { width = 2 } default: panic("impossible") } // Check if the character is in the allowed range: // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) // | [#x10000-#x10FFFF] (32 bit) switch { case value == 0x09: case value == 0x0A: case value == 0x0D: case value >= 0x20 && value <= 0x7E: case value == 0x85: case value >= 0xA0 && value <= 0xD7FF: case value >= 0xE000 && value <= 0xFFFD: case value >= 0x10000 && value <= 0x10FFFF: default: return yaml_parser_set_reader_error(parser, "control characters are not allowed", parser.offset, int(value)) } // Move the raw pointers. parser.raw_buffer_pos += width parser.offset += width // Finally put the character into the buffer. if value <= 0x7F { // 0000 0000-0000 007F . 0xxxxxxx parser.buffer[buffer_len+0] = byte(value) } else if value <= 0x7FF { // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) } else if value <= 0xFFFF { // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) } else { // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) } buffer_len += width parser.unread++ } // On EOF, put NUL into the buffer and return. if parser.eof { parser.buffer[buffer_len] = 0 buffer_len++ parser.unread++ break } } parser.buffer = parser.buffer[:buffer_len] return true } charm-2.1.1/src/gopkg.in/yaml.v1/yamlprivateh.go0000664000175000017500000001154112672604524020433 0ustar marcomarcopackage yaml const ( // The size of the input raw buffer. input_raw_buffer_size = 512 // The size of the input buffer. // It should be possible to decode the whole raw buffer. input_buffer_size = input_raw_buffer_size * 3 // The size of the output buffer. output_buffer_size = 128 // The size of the output raw buffer. // It should be possible to encode the whole output buffer. output_raw_buffer_size = (output_buffer_size*2 + 2) // The size of other stacks and queues. initial_stack_size = 16 initial_queue_size = 16 initial_string_size = 16 ) // Check if the character at the specified position is an alphabetical // character, a digit, '_', or '-'. func is_alpha(b []byte, i int) bool { return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' } // Check if the character at the specified position is a digit. func is_digit(b []byte, i int) bool { return b[i] >= '0' && b[i] <= '9' } // Get the value of a digit. func as_digit(b []byte, i int) int { return int(b[i]) - '0' } // Check if the character at the specified position is a hex-digit. func is_hex(b []byte, i int) bool { return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' } // Get the value of a hex-digit. func as_hex(b []byte, i int) int { bi := b[i] if bi >= 'A' && bi <= 'F' { return int(bi) - 'A' + 10 } if bi >= 'a' && bi <= 'f' { return int(bi) - 'a' + 10 } return int(bi) - '0' } // Check if the character is ASCII. func is_ascii(b []byte, i int) bool { return b[i] <= 0x7F } // Check if the character at the start of the buffer can be printed unescaped. func is_printable(b []byte, i int) bool { return ((b[i] == 0x0A) || // . == #x0A (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF (b[i] > 0xC2 && b[i] < 0xED) || (b[i] == 0xED && b[i+1] < 0xA0) || (b[i] == 0xEE) || (b[i] == 0xEF && // #xE000 <= . <= #xFFFD !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) } // Check if the character at the specified position is NUL. func is_z(b []byte, i int) bool { return b[i] == 0x00 } // Check if the beginning of the buffer is a BOM. func is_bom(b []byte, i int) bool { return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF } // Check if the character at the specified position is space. func is_space(b []byte, i int) bool { return b[i] == ' ' } // Check if the character at the specified position is tab. func is_tab(b []byte, i int) bool { return b[i] == '\t' } // Check if the character at the specified position is blank (space or tab). func is_blank(b []byte, i int) bool { //return is_space(b, i) || is_tab(b, i) return b[i] == ' ' || b[i] == '\t' } // Check if the character at the specified position is a line break. func is_break(b []byte, i int) bool { return (b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) } func is_crlf(b []byte, i int) bool { return b[i] == '\r' && b[i+1] == '\n' } // Check if the character is a line break or NUL. func is_breakz(b []byte, i int) bool { //return is_break(b, i) || is_z(b, i) return ( // is_break: b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) // is_z: b[i] == 0) } // Check if the character is a line break, space, or NUL. func is_spacez(b []byte, i int) bool { //return is_space(b, i) || is_breakz(b, i) return ( // is_space: b[i] == ' ' || // is_breakz: b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) b[i] == 0) } // Check if the character is a line break, space, tab, or NUL. func is_blankz(b []byte, i int) bool { //return is_blank(b, i) || is_breakz(b, i) return ( // is_blank: b[i] == ' ' || b[i] == '\t' || // is_breakz: b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) b[i] == 0) } // Determine the width of the character. func width(b byte) int { // Don't replace these by a switch without first // confirming that it is being inlined. if b&0x80 == 0x00 { return 1 } if b&0xE0 == 0xC0 { return 2 } if b&0xF0 == 0xE0 { return 3 } if b&0xF8 == 0xF0 { return 4 } return 0 } charm-2.1.1/src/gopkg.in/yaml.v1/README.md0000664000175000017500000000500612672604524016655 0ustar marcomarco# YAML support for the Go language Introduction ------------ The yaml package enables Go programs to comfortably encode and decode YAML values. It was developed within [Canonical](https://www.canonical.com) as part of the [juju](https://juju.ubuntu.com) project, and is based on a pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) C library to parse and generate YAML data quickly and reliably. Compatibility ------------- The yaml package supports most of YAML 1.1 and 1.2, including support for anchors, tags, map merging, etc. Multi-document unmarshalling is not yet implemented, and base-60 floats from YAML 1.1 are purposefully not supported since they're a poor design and are gone in YAML 1.2. Installation and usage ---------------------- The import path for the package is *gopkg.in/yaml.v1*. To install it, run: go get gopkg.in/yaml.v1 API documentation ----------------- If opened in a browser, the import path itself leads to the API documentation: * [https://gopkg.in/yaml.v1](https://gopkg.in/yaml.v1) API stability ------------- The package API for yaml v1 will remain stable as described in [gopkg.in](https://gopkg.in). License ------- The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details. Example ------- ```Go package main import ( "fmt" "log" "gopkg.in/yaml.v1" ) var data = ` a: Easy! b: c: 2 d: [3, 4] ` type T struct { A string B struct{C int; D []int ",flow"} } func main() { t := T{} err := yaml.Unmarshal([]byte(data), &t) if err != nil { log.Fatalf("error: %v", err) } fmt.Printf("--- t:\n%v\n\n", t) d, err := yaml.Marshal(&t) if err != nil { log.Fatalf("error: %v", err) } fmt.Printf("--- t dump:\n%s\n\n", string(d)) m := make(map[interface{}]interface{}) err = yaml.Unmarshal([]byte(data), &m) if err != nil { log.Fatalf("error: %v", err) } fmt.Printf("--- m:\n%v\n\n", m) d, err = yaml.Marshal(&m) if err != nil { log.Fatalf("error: %v", err) } fmt.Printf("--- m dump:\n%s\n\n", string(d)) } ``` This example will generate the following output: ``` --- t: {Easy! {2 [3 4]}} --- t dump: a: Easy! b: c: 2 d: [3, 4] --- m: map[a:Easy! b:map[c:2 d:[3 4]]] --- m dump: a: Easy! b: c: 2 d: - 3 - 4 ``` charm-2.1.1/src/gopkg.in/yaml.v1/LICENSE.libyaml0000664000175000017500000000244112672604524020033 0ustar marcomarcoThe following files were ported to Go from C files of libyaml, and thus are still covered by their original copyright and license: apic.go emitterc.go parserc.go readerc.go scannerc.go writerc.go yamlh.go yamlprivateh.go Copyright (c) 2006 Kirill Simonov Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. charm-2.1.1/src/gopkg.in/yaml.v1/encode_test.go0000664000175000017500000001757612672604524020240 0ustar marcomarcopackage yaml_test import ( "fmt" "math" "strconv" "strings" "time" . "gopkg.in/check.v1" "gopkg.in/yaml.v1" ) var marshalIntTest = 123 var marshalTests = []struct { value interface{} data string }{ { nil, "null\n", }, { &struct{}{}, "{}\n", }, { map[string]string{"v": "hi"}, "v: hi\n", }, { map[string]interface{}{"v": "hi"}, "v: hi\n", }, { map[string]string{"v": "true"}, "v: \"true\"\n", }, { map[string]string{"v": "false"}, "v: \"false\"\n", }, { map[string]interface{}{"v": true}, "v: true\n", }, { map[string]interface{}{"v": false}, "v: false\n", }, { map[string]interface{}{"v": 10}, "v: 10\n", }, { map[string]interface{}{"v": -10}, "v: -10\n", }, { map[string]uint{"v": 42}, "v: 42\n", }, { map[string]interface{}{"v": int64(4294967296)}, "v: 4294967296\n", }, { map[string]int64{"v": int64(4294967296)}, "v: 4294967296\n", }, { map[string]uint64{"v": 4294967296}, "v: 4294967296\n", }, { map[string]interface{}{"v": "10"}, "v: \"10\"\n", }, { map[string]interface{}{"v": 0.1}, "v: 0.1\n", }, { map[string]interface{}{"v": float64(0.1)}, "v: 0.1\n", }, { map[string]interface{}{"v": -0.1}, "v: -0.1\n", }, { map[string]interface{}{"v": math.Inf(+1)}, "v: .inf\n", }, { map[string]interface{}{"v": math.Inf(-1)}, "v: -.inf\n", }, { map[string]interface{}{"v": math.NaN()}, "v: .nan\n", }, { map[string]interface{}{"v": nil}, "v: null\n", }, { map[string]interface{}{"v": ""}, "v: \"\"\n", }, { map[string][]string{"v": []string{"A", "B"}}, "v:\n- A\n- B\n", }, { map[string][]string{"v": []string{"A", "B\nC"}}, "v:\n- A\n- |-\n B\n C\n", }, { map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}}, "v:\n- A\n- 1\n- B:\n - 2\n - 3\n", }, { map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, "a:\n b: c\n", }, { map[string]interface{}{"a": "-"}, "a: '-'\n", }, // Simple values. { &marshalIntTest, "123\n", }, // Structures { &struct{ Hello string }{"world"}, "hello: world\n", }, { &struct { A struct { B string } }{struct{ B string }{"c"}}, "a:\n b: c\n", }, { &struct { A *struct { B string } }{&struct{ B string }{"c"}}, "a:\n b: c\n", }, { &struct { A *struct { B string } }{}, "a: null\n", }, { &struct{ A int }{1}, "a: 1\n", }, { &struct{ A []int }{[]int{1, 2}}, "a:\n- 1\n- 2\n", }, { &struct { B int "a" }{1}, "a: 1\n", }, { &struct{ A bool }{true}, "a: true\n", }, // Conditional flag { &struct { A int "a,omitempty" B int "b,omitempty" }{1, 0}, "a: 1\n", }, { &struct { A int "a,omitempty" B int "b,omitempty" }{0, 0}, "{}\n", }, { &struct { A *struct{ X int } "a,omitempty" B int "b,omitempty" }{nil, 0}, "{}\n", }, // Flow flag { &struct { A []int "a,flow" }{[]int{1, 2}}, "a: [1, 2]\n", }, { &struct { A map[string]string "a,flow" }{map[string]string{"b": "c", "d": "e"}}, "a: {b: c, d: e}\n", }, { &struct { A struct { B, D string } "a,flow" }{struct{ B, D string }{"c", "e"}}, "a: {b: c, d: e}\n", }, // Unexported field { &struct { u int A int }{0, 1}, "a: 1\n", }, // Ignored field { &struct { A int B int "-" }{1, 2}, "a: 1\n", }, // Struct inlining { &struct { A int C inlineB `yaml:",inline"` }{1, inlineB{2, inlineC{3}}}, "a: 1\nb: 2\nc: 3\n", }, // Duration { map[string]time.Duration{"a": 3 * time.Second}, "a: 3s\n", }, // Issue #24: bug in map merging logic. { map[string]string{"a": ""}, "a: \n", }, // Issue #34: marshal unsupported base 60 floats quoted for compatibility // with old YAML 1.1 parsers. { map[string]string{"a": "1:1"}, "a: \"1:1\"\n", }, // Binary data. { map[string]string{"a": "\x00"}, "a: \"\\0\"\n", }, { map[string]string{"a": "\x80\x81\x82"}, "a: !!binary gIGC\n", }, { map[string]string{"a": strings.Repeat("\x90", 54)}, "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", }, { map[string]interface{}{"a": typeWithGetter{"!!str", "\x80\x81\x82"}}, "a: !!binary gIGC\n", }, // Escaping of tags. { map[string]interface{}{"a": typeWithGetter{"foo!bar", 1}}, "a: ! 1\n", }, } func (s *S) TestMarshal(c *C) { for _, item := range marshalTests { data, err := yaml.Marshal(item.value) c.Assert(err, IsNil) c.Assert(string(data), Equals, item.data) } } var marshalErrorTests = []struct { value interface{} error string panic string }{{ value: &struct { B int inlineB ",inline" }{1, inlineB{2, inlineC{3}}}, panic: `Duplicated key 'b' in struct struct \{ B int; .*`, }, { value: typeWithGetter{"!!binary", "\x80"}, error: "YAML error: explicitly tagged !!binary data must be base64-encoded", }, { value: typeWithGetter{"!!float", "\x80"}, error: `YAML error: cannot marshal invalid UTF-8 data as !!float`, }} func (s *S) TestMarshalErrors(c *C) { for _, item := range marshalErrorTests { if item.panic != "" { c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic) } else { _, err := yaml.Marshal(item.value) c.Assert(err, ErrorMatches, item.error) } } } var marshalTaggedIfaceTest interface{} = &struct{ A string }{"B"} var getterTests = []struct { data, tag string value interface{} }{ {"_:\n hi: there\n", "", map[interface{}]interface{}{"hi": "there"}}, {"_:\n- 1\n- A\n", "", []interface{}{1, "A"}}, {"_: 10\n", "", 10}, {"_: null\n", "", nil}, {"_: !foo BAR!\n", "!foo", "BAR!"}, {"_: !foo 1\n", "!foo", "1"}, {"_: !foo '\"1\"'\n", "!foo", "\"1\""}, {"_: !foo 1.1\n", "!foo", 1.1}, {"_: !foo 1\n", "!foo", 1}, {"_: !foo 1\n", "!foo", uint(1)}, {"_: !foo true\n", "!foo", true}, {"_: !foo\n- A\n- B\n", "!foo", []string{"A", "B"}}, {"_: !foo\n A: B\n", "!foo", map[string]string{"A": "B"}}, {"_: !foo\n a: B\n", "!foo", &marshalTaggedIfaceTest}, } func (s *S) TestMarshalTypeCache(c *C) { var data []byte var err error func() { type T struct{ A int } data, err = yaml.Marshal(&T{}) c.Assert(err, IsNil) }() func() { type T struct{ B int } data, err = yaml.Marshal(&T{}) c.Assert(err, IsNil) }() c.Assert(string(data), Equals, "b: 0\n") } type typeWithGetter struct { tag string value interface{} } func (o typeWithGetter) GetYAML() (tag string, value interface{}) { return o.tag, o.value } type typeWithGetterField struct { Field typeWithGetter "_" } func (s *S) TestMashalWithGetter(c *C) { for _, item := range getterTests { obj := &typeWithGetterField{} obj.Field.tag = item.tag obj.Field.value = item.value data, err := yaml.Marshal(obj) c.Assert(err, IsNil) c.Assert(string(data), Equals, string(item.data)) } } func (s *S) TestUnmarshalWholeDocumentWithGetter(c *C) { obj := &typeWithGetter{} obj.tag = "" obj.value = map[string]string{"hello": "world!"} data, err := yaml.Marshal(obj) c.Assert(err, IsNil) c.Assert(string(data), Equals, "hello: world!\n") } func (s *S) TestSortedOutput(c *C) { order := []interface{}{ false, true, 1, uint(1), 1.0, 1.1, 1.2, 2, uint(2), 2.0, 2.1, "", ".1", ".2", ".a", "1", "2", "a!10", "a/2", "a/10", "a~10", "ab/1", "b/1", "b/01", "b/2", "b/02", "b/3", "b/03", "b1", "b01", "b3", "c2.10", "c10.2", "d1", "d12", "d12a", } m := make(map[interface{}]int) for _, k := range order { m[k] = 1 } data, err := yaml.Marshal(m) c.Assert(err, IsNil) out := "\n" + string(data) last := 0 for i, k := range order { repr := fmt.Sprint(k) if s, ok := k.(string); ok { if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil { repr = `"` + repr + `"` } } index := strings.Index(out, "\n"+repr+":") if index == -1 { c.Fatalf("%#v is not in the output: %#v", k, out) } if index < last { c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out) } last = index } } charm-2.1.1/src/gopkg.in/yaml.v1/resolve.go0000664000175000017500000001124212672604524017403 0ustar marcomarcopackage yaml import ( "encoding/base64" "fmt" "math" "strconv" "strings" "unicode/utf8" ) // TODO: merge, timestamps, base 60 floats, omap. type resolveMapItem struct { value interface{} tag string } var resolveTable = make([]byte, 256) var resolveMap = make(map[string]resolveMapItem) func init() { t := resolveTable t[int('+')] = 'S' // Sign t[int('-')] = 'S' for _, c := range "0123456789" { t[int(c)] = 'D' // Digit } for _, c := range "yYnNtTfFoO~" { t[int(c)] = 'M' // In map } t[int('.')] = '.' // Float (potentially in map) var resolveMapList = []struct { v interface{} tag string l []string }{ {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, {"<<", yaml_MERGE_TAG, []string{"<<"}}, } m := resolveMap for _, item := range resolveMapList { for _, s := range item.l { m[s] = resolveMapItem{item.v, item.tag} } } } const longTagPrefix = "tag:yaml.org,2002:" func shortTag(tag string) string { // TODO This can easily be made faster and produce less garbage. if strings.HasPrefix(tag, longTagPrefix) { return "!!" + tag[len(longTagPrefix):] } return tag } func longTag(tag string) string { if strings.HasPrefix(tag, "!!") { return longTagPrefix + tag[2:] } return tag } func resolvableTag(tag string) bool { switch tag { case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: return true } return false } func resolve(tag string, in string) (rtag string, out interface{}) { if !resolvableTag(tag) { return tag, in } defer func() { switch tag { case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: return } fail(fmt.Sprintf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))) }() // Any data is accepted as a !!str or !!binary. // Otherwise, the prefix is enough of a hint about what it might be. hint := byte('N') if in != "" { hint = resolveTable[in[0]] } if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { // Handle things we can lookup in a map. if item, ok := resolveMap[in]; ok { return item.tag, item.value } // Base 60 floats are a bad idea, were dropped in YAML 1.2, and // are purposefully unsupported here. They're still quoted on // the way out for compatibility with other parser, though. switch hint { case 'M': // We've already checked the map above. case '.': // Not in the map, so maybe a normal float. floatv, err := strconv.ParseFloat(in, 64) if err == nil { return yaml_FLOAT_TAG, floatv } case 'D', 'S': // Int, float, or timestamp. plain := strings.Replace(in, "_", "", -1) intv, err := strconv.ParseInt(plain, 0, 64) if err == nil { if intv == int64(int(intv)) { return yaml_INT_TAG, int(intv) } else { return yaml_INT_TAG, intv } } floatv, err := strconv.ParseFloat(plain, 64) if err == nil { return yaml_FLOAT_TAG, floatv } if strings.HasPrefix(plain, "0b") { intv, err := strconv.ParseInt(plain[2:], 2, 64) if err == nil { return yaml_INT_TAG, int(intv) } } else if strings.HasPrefix(plain, "-0b") { intv, err := strconv.ParseInt(plain[3:], 2, 64) if err == nil { return yaml_INT_TAG, -int(intv) } } // XXX Handle timestamps here. default: panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") } } if tag == yaml_BINARY_TAG { return yaml_BINARY_TAG, in } if utf8.ValidString(in) { return yaml_STR_TAG, in } return yaml_BINARY_TAG, encodeBase64(in) } // encodeBase64 encodes s as base64 that is broken up into multiple lines // as appropriate for the resulting length. func encodeBase64(s string) string { const lineLen = 70 encLen := base64.StdEncoding.EncodedLen(len(s)) lines := encLen/lineLen + 1 buf := make([]byte, encLen*2+lines) in := buf[0:encLen] out := buf[encLen:] base64.StdEncoding.Encode(in, []byte(s)) k := 0 for i := 0; i < len(in); i += lineLen { j := i + lineLen if j > len(in) { j = len(in) } k += copy(out[k:], in[i:j]) if lines > 1 { out[k] = '\n' k++ } } return string(out[:k]) } charm-2.1.1/src/gopkg.in/yaml.v1/suite_test.go0000664000175000017500000000021712672604524020114 0ustar marcomarcopackage yaml_test import ( . "gopkg.in/check.v1" "testing" ) func Test(t *testing.T) { TestingT(t) } type S struct{} var _ = Suite(&S{}) charm-2.1.1/src/gopkg.in/yaml.v1/parserc.go0000664000175000017500000010372712672604524017375 0ustar marcomarcopackage yaml import ( "bytes" ) // The parser implements the following grammar: // // stream ::= STREAM-START implicit_document? explicit_document* STREAM-END // implicit_document ::= block_node DOCUMENT-END* // explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* // block_node_or_indentless_sequence ::= // ALIAS // | properties (block_content | indentless_block_sequence)? // | block_content // | indentless_block_sequence // block_node ::= ALIAS // | properties block_content? // | block_content // flow_node ::= ALIAS // | properties flow_content? // | flow_content // properties ::= TAG ANCHOR? | ANCHOR TAG? // block_content ::= block_collection | flow_collection | SCALAR // flow_content ::= flow_collection | SCALAR // block_collection ::= block_sequence | block_mapping // flow_collection ::= flow_sequence | flow_mapping // block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END // indentless_sequence ::= (BLOCK-ENTRY block_node?)+ // block_mapping ::= BLOCK-MAPPING_START // ((KEY block_node_or_indentless_sequence?)? // (VALUE block_node_or_indentless_sequence?)?)* // BLOCK-END // flow_sequence ::= FLOW-SEQUENCE-START // (flow_sequence_entry FLOW-ENTRY)* // flow_sequence_entry? // FLOW-SEQUENCE-END // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? // flow_mapping ::= FLOW-MAPPING-START // (flow_mapping_entry FLOW-ENTRY)* // flow_mapping_entry? // FLOW-MAPPING-END // flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? // Peek the next token in the token queue. func peek_token(parser *yaml_parser_t) *yaml_token_t { if parser.token_available || yaml_parser_fetch_more_tokens(parser) { return &parser.tokens[parser.tokens_head] } return nil } // Remove the next token from the queue (must be called after peek_token). func skip_token(parser *yaml_parser_t) { parser.token_available = false parser.tokens_parsed++ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN parser.tokens_head++ } // Get the next event. func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { // Erase the event object. *event = yaml_event_t{} // No events after the end of the stream or error. if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { return true } // Generate the next event. return yaml_parser_state_machine(parser, event) } // Set parser error. func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { parser.error = yaml_PARSER_ERROR parser.problem = problem parser.problem_mark = problem_mark return false } func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { parser.error = yaml_PARSER_ERROR parser.context = context parser.context_mark = context_mark parser.problem = problem parser.problem_mark = problem_mark return false } // State dispatcher. func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { //trace("yaml_parser_state_machine", "state:", parser.state.String()) switch parser.state { case yaml_PARSE_STREAM_START_STATE: return yaml_parser_parse_stream_start(parser, event) case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: return yaml_parser_parse_document_start(parser, event, true) case yaml_PARSE_DOCUMENT_START_STATE: return yaml_parser_parse_document_start(parser, event, false) case yaml_PARSE_DOCUMENT_CONTENT_STATE: return yaml_parser_parse_document_content(parser, event) case yaml_PARSE_DOCUMENT_END_STATE: return yaml_parser_parse_document_end(parser, event) case yaml_PARSE_BLOCK_NODE_STATE: return yaml_parser_parse_node(parser, event, true, false) case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: return yaml_parser_parse_node(parser, event, true, true) case yaml_PARSE_FLOW_NODE_STATE: return yaml_parser_parse_node(parser, event, false, false) case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: return yaml_parser_parse_block_sequence_entry(parser, event, true) case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: return yaml_parser_parse_block_sequence_entry(parser, event, false) case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: return yaml_parser_parse_indentless_sequence_entry(parser, event) case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: return yaml_parser_parse_block_mapping_key(parser, event, true) case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: return yaml_parser_parse_block_mapping_key(parser, event, false) case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: return yaml_parser_parse_block_mapping_value(parser, event) case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: return yaml_parser_parse_flow_sequence_entry(parser, event, true) case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: return yaml_parser_parse_flow_sequence_entry(parser, event, false) case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: return yaml_parser_parse_flow_mapping_key(parser, event, true) case yaml_PARSE_FLOW_MAPPING_KEY_STATE: return yaml_parser_parse_flow_mapping_key(parser, event, false) case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: return yaml_parser_parse_flow_mapping_value(parser, event, false) case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: return yaml_parser_parse_flow_mapping_value(parser, event, true) default: panic("invalid parser state") } return false } // Parse the production: // stream ::= STREAM-START implicit_document? explicit_document* STREAM-END // ************ func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { return false } if token.typ != yaml_STREAM_START_TOKEN { return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) } parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE *event = yaml_event_t{ typ: yaml_STREAM_START_EVENT, start_mark: token.start_mark, end_mark: token.end_mark, encoding: token.encoding, } skip_token(parser) return true } // Parse the productions: // implicit_document ::= block_node DOCUMENT-END* // * // explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* // ************************* func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { token := peek_token(parser) if token == nil { return false } // Parse extra document end indicators. if !implicit { for token.typ == yaml_DOCUMENT_END_TOKEN { skip_token(parser) token = peek_token(parser) if token == nil { return false } } } if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && token.typ != yaml_TAG_DIRECTIVE_TOKEN && token.typ != yaml_DOCUMENT_START_TOKEN && token.typ != yaml_STREAM_END_TOKEN { // Parse an implicit document. if !yaml_parser_process_directives(parser, nil, nil) { return false } parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) parser.state = yaml_PARSE_BLOCK_NODE_STATE *event = yaml_event_t{ typ: yaml_DOCUMENT_START_EVENT, start_mark: token.start_mark, end_mark: token.end_mark, } } else if token.typ != yaml_STREAM_END_TOKEN { // Parse an explicit document. var version_directive *yaml_version_directive_t var tag_directives []yaml_tag_directive_t start_mark := token.start_mark if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { return false } token = peek_token(parser) if token == nil { return false } if token.typ != yaml_DOCUMENT_START_TOKEN { yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) return false } parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE end_mark := token.end_mark *event = yaml_event_t{ typ: yaml_DOCUMENT_START_EVENT, start_mark: start_mark, end_mark: end_mark, version_directive: version_directive, tag_directives: tag_directives, implicit: false, } skip_token(parser) } else { // Parse the stream end. parser.state = yaml_PARSE_END_STATE *event = yaml_event_t{ typ: yaml_STREAM_END_EVENT, start_mark: token.start_mark, end_mark: token.end_mark, } skip_token(parser) } return true } // Parse the productions: // explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* // *********** // func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { return false } if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN || token.typ == yaml_DOCUMENT_START_TOKEN || token.typ == yaml_DOCUMENT_END_TOKEN || token.typ == yaml_STREAM_END_TOKEN { parser.state = parser.states[len(parser.states)-1] parser.states = parser.states[:len(parser.states)-1] return yaml_parser_process_empty_scalar(parser, event, token.start_mark) } return yaml_parser_parse_node(parser, event, true, false) } // Parse the productions: // implicit_document ::= block_node DOCUMENT-END* // ************* // explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* // func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { return false } start_mark := token.start_mark end_mark := token.start_mark implicit := true if token.typ == yaml_DOCUMENT_END_TOKEN { end_mark = token.end_mark skip_token(parser) implicit = false } parser.tag_directives = parser.tag_directives[:0] parser.state = yaml_PARSE_DOCUMENT_START_STATE *event = yaml_event_t{ typ: yaml_DOCUMENT_END_EVENT, start_mark: start_mark, end_mark: end_mark, implicit: implicit, } return true } // Parse the productions: // block_node_or_indentless_sequence ::= // ALIAS // ***** // | properties (block_content | indentless_block_sequence)? // ********** * // | block_content | indentless_block_sequence // * // block_node ::= ALIAS // ***** // | properties block_content? // ********** * // | block_content // * // flow_node ::= ALIAS // ***** // | properties flow_content? // ********** * // | flow_content // * // properties ::= TAG ANCHOR? | ANCHOR TAG? // ************************* // block_content ::= block_collection | flow_collection | SCALAR // ****** // flow_content ::= flow_collection | SCALAR // ****** func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() token := peek_token(parser) if token == nil { return false } if token.typ == yaml_ALIAS_TOKEN { parser.state = parser.states[len(parser.states)-1] parser.states = parser.states[:len(parser.states)-1] *event = yaml_event_t{ typ: yaml_ALIAS_EVENT, start_mark: token.start_mark, end_mark: token.end_mark, anchor: token.value, } skip_token(parser) return true } start_mark := token.start_mark end_mark := token.start_mark var tag_token bool var tag_handle, tag_suffix, anchor []byte var tag_mark yaml_mark_t if token.typ == yaml_ANCHOR_TOKEN { anchor = token.value start_mark = token.start_mark end_mark = token.end_mark skip_token(parser) token = peek_token(parser) if token == nil { return false } if token.typ == yaml_TAG_TOKEN { tag_token = true tag_handle = token.value tag_suffix = token.suffix tag_mark = token.start_mark end_mark = token.end_mark skip_token(parser) token = peek_token(parser) if token == nil { return false } } } else if token.typ == yaml_TAG_TOKEN { tag_token = true tag_handle = token.value tag_suffix = token.suffix start_mark = token.start_mark tag_mark = token.start_mark end_mark = token.end_mark skip_token(parser) token = peek_token(parser) if token == nil { return false } if token.typ == yaml_ANCHOR_TOKEN { anchor = token.value end_mark = token.end_mark skip_token(parser) token = peek_token(parser) if token == nil { return false } } } var tag []byte if tag_token { if len(tag_handle) == 0 { tag = tag_suffix tag_suffix = nil } else { for i := range parser.tag_directives { if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { tag = append([]byte(nil), parser.tag_directives[i].prefix...) tag = append(tag, tag_suffix...) break } } if len(tag) == 0 { yaml_parser_set_parser_error_context(parser, "while parsing a node", start_mark, "found undefined tag handle", tag_mark) return false } } } implicit := len(tag) == 0 if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { end_mark = token.end_mark parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE *event = yaml_event_t{ typ: yaml_SEQUENCE_START_EVENT, start_mark: start_mark, end_mark: end_mark, anchor: anchor, tag: tag, implicit: implicit, style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), } return true } if token.typ == yaml_SCALAR_TOKEN { var plain_implicit, quoted_implicit bool end_mark = token.end_mark if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { plain_implicit = true } else if len(tag) == 0 { quoted_implicit = true } parser.state = parser.states[len(parser.states)-1] parser.states = parser.states[:len(parser.states)-1] *event = yaml_event_t{ typ: yaml_SCALAR_EVENT, start_mark: start_mark, end_mark: end_mark, anchor: anchor, tag: tag, value: token.value, implicit: plain_implicit, quoted_implicit: quoted_implicit, style: yaml_style_t(token.style), } skip_token(parser) return true } if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { // [Go] Some of the events below can be merged as they differ only on style. end_mark = token.end_mark parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE *event = yaml_event_t{ typ: yaml_SEQUENCE_START_EVENT, start_mark: start_mark, end_mark: end_mark, anchor: anchor, tag: tag, implicit: implicit, style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), } return true } if token.typ == yaml_FLOW_MAPPING_START_TOKEN { end_mark = token.end_mark parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE *event = yaml_event_t{ typ: yaml_MAPPING_START_EVENT, start_mark: start_mark, end_mark: end_mark, anchor: anchor, tag: tag, implicit: implicit, style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), } return true } if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { end_mark = token.end_mark parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE *event = yaml_event_t{ typ: yaml_SEQUENCE_START_EVENT, start_mark: start_mark, end_mark: end_mark, anchor: anchor, tag: tag, implicit: implicit, style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), } return true } if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { end_mark = token.end_mark parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE *event = yaml_event_t{ typ: yaml_MAPPING_START_EVENT, start_mark: start_mark, end_mark: end_mark, anchor: anchor, tag: tag, implicit: implicit, style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), } return true } if len(anchor) > 0 || len(tag) > 0 { parser.state = parser.states[len(parser.states)-1] parser.states = parser.states[:len(parser.states)-1] *event = yaml_event_t{ typ: yaml_SCALAR_EVENT, start_mark: start_mark, end_mark: end_mark, anchor: anchor, tag: tag, implicit: implicit, quoted_implicit: false, style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), } return true } context := "while parsing a flow node" if block { context = "while parsing a block node" } yaml_parser_set_parser_error_context(parser, context, start_mark, "did not find expected node content", token.start_mark) return false } // Parse the productions: // block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END // ******************** *********** * ********* // func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } token := peek_token(parser) if token == nil { return false } if token.typ == yaml_BLOCK_ENTRY_TOKEN { mark := token.end_mark skip_token(parser) token = peek_token(parser) if token == nil { return false } if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) return yaml_parser_parse_node(parser, event, true, false) } else { parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE return yaml_parser_process_empty_scalar(parser, event, mark) } } if token.typ == yaml_BLOCK_END_TOKEN { parser.state = parser.states[len(parser.states)-1] parser.states = parser.states[:len(parser.states)-1] parser.marks = parser.marks[:len(parser.marks)-1] *event = yaml_event_t{ typ: yaml_SEQUENCE_END_EVENT, start_mark: token.start_mark, end_mark: token.end_mark, } skip_token(parser) return true } context_mark := parser.marks[len(parser.marks)-1] parser.marks = parser.marks[:len(parser.marks)-1] return yaml_parser_set_parser_error_context(parser, "while parsing a block collection", context_mark, "did not find expected '-' indicator", token.start_mark) } // Parse the productions: // indentless_sequence ::= (BLOCK-ENTRY block_node?)+ // *********** * func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { return false } if token.typ == yaml_BLOCK_ENTRY_TOKEN { mark := token.end_mark skip_token(parser) token = peek_token(parser) if token == nil { return false } if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_KEY_TOKEN && token.typ != yaml_VALUE_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) return yaml_parser_parse_node(parser, event, true, false) } parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE return yaml_parser_process_empty_scalar(parser, event, mark) } parser.state = parser.states[len(parser.states)-1] parser.states = parser.states[:len(parser.states)-1] *event = yaml_event_t{ typ: yaml_SEQUENCE_END_EVENT, start_mark: token.start_mark, end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? } return true } // Parse the productions: // block_mapping ::= BLOCK-MAPPING_START // ******************* // ((KEY block_node_or_indentless_sequence?)? // *** * // (VALUE block_node_or_indentless_sequence?)?)* // // BLOCK-END // ********* // func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } token := peek_token(parser) if token == nil { return false } if token.typ == yaml_KEY_TOKEN { mark := token.end_mark skip_token(parser) token = peek_token(parser) if token == nil { return false } if token.typ != yaml_KEY_TOKEN && token.typ != yaml_VALUE_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) return yaml_parser_parse_node(parser, event, true, true) } else { parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE return yaml_parser_process_empty_scalar(parser, event, mark) } } else if token.typ == yaml_BLOCK_END_TOKEN { parser.state = parser.states[len(parser.states)-1] parser.states = parser.states[:len(parser.states)-1] parser.marks = parser.marks[:len(parser.marks)-1] *event = yaml_event_t{ typ: yaml_MAPPING_END_EVENT, start_mark: token.start_mark, end_mark: token.end_mark, } skip_token(parser) return true } context_mark := parser.marks[len(parser.marks)-1] parser.marks = parser.marks[:len(parser.marks)-1] return yaml_parser_set_parser_error_context(parser, "while parsing a block mapping", context_mark, "did not find expected key", token.start_mark) } // Parse the productions: // block_mapping ::= BLOCK-MAPPING_START // // ((KEY block_node_or_indentless_sequence?)? // // (VALUE block_node_or_indentless_sequence?)?)* // ***** * // BLOCK-END // // func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { return false } if token.typ == yaml_VALUE_TOKEN { mark := token.end_mark skip_token(parser) token = peek_token(parser) if token == nil { return false } if token.typ != yaml_KEY_TOKEN && token.typ != yaml_VALUE_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) return yaml_parser_parse_node(parser, event, true, true) } parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE return yaml_parser_process_empty_scalar(parser, event, mark) } parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE return yaml_parser_process_empty_scalar(parser, event, token.start_mark) } // Parse the productions: // flow_sequence ::= FLOW-SEQUENCE-START // ******************* // (flow_sequence_entry FLOW-ENTRY)* // * ********** // flow_sequence_entry? // * // FLOW-SEQUENCE-END // ***************** // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? // * // func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } token := peek_token(parser) if token == nil { return false } if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { if !first { if token.typ == yaml_FLOW_ENTRY_TOKEN { skip_token(parser) token = peek_token(parser) if token == nil { return false } } else { context_mark := parser.marks[len(parser.marks)-1] parser.marks = parser.marks[:len(parser.marks)-1] return yaml_parser_set_parser_error_context(parser, "while parsing a flow sequence", context_mark, "did not find expected ',' or ']'", token.start_mark) } } if token.typ == yaml_KEY_TOKEN { parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE *event = yaml_event_t{ typ: yaml_MAPPING_START_EVENT, start_mark: token.start_mark, end_mark: token.end_mark, implicit: true, style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), } skip_token(parser) return true } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) return yaml_parser_parse_node(parser, event, false, false) } } parser.state = parser.states[len(parser.states)-1] parser.states = parser.states[:len(parser.states)-1] parser.marks = parser.marks[:len(parser.marks)-1] *event = yaml_event_t{ typ: yaml_SEQUENCE_END_EVENT, start_mark: token.start_mark, end_mark: token.end_mark, } skip_token(parser) return true } // // Parse the productions: // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? // *** * // func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { return false } if token.typ != yaml_VALUE_TOKEN && token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) return yaml_parser_parse_node(parser, event, false, false) } mark := token.end_mark skip_token(parser) parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE return yaml_parser_process_empty_scalar(parser, event, mark) } // Parse the productions: // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? // ***** * // func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { return false } if token.typ == yaml_VALUE_TOKEN { skip_token(parser) token := peek_token(parser) if token == nil { return false } if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) return yaml_parser_parse_node(parser, event, false, false) } } parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE return yaml_parser_process_empty_scalar(parser, event, token.start_mark) } // Parse the productions: // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? // * // func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { return false } parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE *event = yaml_event_t{ typ: yaml_MAPPING_END_EVENT, start_mark: token.start_mark, end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? } return true } // Parse the productions: // flow_mapping ::= FLOW-MAPPING-START // ****************** // (flow_mapping_entry FLOW-ENTRY)* // * ********** // flow_mapping_entry? // ****************** // FLOW-MAPPING-END // **************** // flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? // * *** * // func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } token := peek_token(parser) if token == nil { return false } if token.typ != yaml_FLOW_MAPPING_END_TOKEN { if !first { if token.typ == yaml_FLOW_ENTRY_TOKEN { skip_token(parser) token = peek_token(parser) if token == nil { return false } } else { context_mark := parser.marks[len(parser.marks)-1] parser.marks = parser.marks[:len(parser.marks)-1] return yaml_parser_set_parser_error_context(parser, "while parsing a flow mapping", context_mark, "did not find expected ',' or '}'", token.start_mark) } } if token.typ == yaml_KEY_TOKEN { skip_token(parser) token = peek_token(parser) if token == nil { return false } if token.typ != yaml_VALUE_TOKEN && token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) return yaml_parser_parse_node(parser, event, false, false) } else { parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE return yaml_parser_process_empty_scalar(parser, event, token.start_mark) } } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) return yaml_parser_parse_node(parser, event, false, false) } } parser.state = parser.states[len(parser.states)-1] parser.states = parser.states[:len(parser.states)-1] parser.marks = parser.marks[:len(parser.marks)-1] *event = yaml_event_t{ typ: yaml_MAPPING_END_EVENT, start_mark: token.start_mark, end_mark: token.end_mark, } skip_token(parser) return true } // Parse the productions: // flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? // * ***** * // func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { token := peek_token(parser) if token == nil { return false } if empty { parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE return yaml_parser_process_empty_scalar(parser, event, token.start_mark) } if token.typ == yaml_VALUE_TOKEN { skip_token(parser) token = peek_token(parser) if token == nil { return false } if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) return yaml_parser_parse_node(parser, event, false, false) } } parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE return yaml_parser_process_empty_scalar(parser, event, token.start_mark) } // Generate an empty scalar event. func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { *event = yaml_event_t{ typ: yaml_SCALAR_EVENT, start_mark: mark, end_mark: mark, value: nil, // Empty implicit: true, style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), } return true } var default_tag_directives = []yaml_tag_directive_t{ {[]byte("!"), []byte("!")}, {[]byte("!!"), []byte("tag:yaml.org,2002:")}, } // Parse directives. func yaml_parser_process_directives(parser *yaml_parser_t, version_directive_ref **yaml_version_directive_t, tag_directives_ref *[]yaml_tag_directive_t) bool { var version_directive *yaml_version_directive_t var tag_directives []yaml_tag_directive_t token := peek_token(parser) if token == nil { return false } for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { if version_directive != nil { yaml_parser_set_parser_error(parser, "found duplicate %YAML directive", token.start_mark) return false } if token.major != 1 || token.minor != 1 { yaml_parser_set_parser_error(parser, "found incompatible YAML document", token.start_mark) return false } version_directive = &yaml_version_directive_t{ major: token.major, minor: token.minor, } } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { value := yaml_tag_directive_t{ handle: token.value, prefix: token.prefix, } if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { return false } tag_directives = append(tag_directives, value) } skip_token(parser) token = peek_token(parser) if token == nil { return false } } for i := range default_tag_directives { if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { return false } } if version_directive_ref != nil { *version_directive_ref = version_directive } if tag_directives_ref != nil { *tag_directives_ref = tag_directives } return true } // Append a tag directive to the directives stack. func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { for i := range parser.tag_directives { if bytes.Equal(value.handle, parser.tag_directives[i].handle) { if allow_duplicates { return true } return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) } } // [Go] I suspect the copy is unnecessary. This was likely done // because there was no way to track ownership of the data. value_copy := yaml_tag_directive_t{ handle: make([]byte, len(value.handle)), prefix: make([]byte, len(value.prefix)), } copy(value_copy.handle, value.handle) copy(value_copy.prefix, value.prefix) parser.tag_directives = append(parser.tag_directives, value_copy) return true } charm-2.1.1/src/gopkg.in/yaml.v1/encode.go0000664000175000017500000001444212672604524017166 0ustar marcomarcopackage yaml import ( "reflect" "regexp" "sort" "strconv" "strings" "time" ) type encoder struct { emitter yaml_emitter_t event yaml_event_t out []byte flow bool } func newEncoder() (e *encoder) { e = &encoder{} e.must(yaml_emitter_initialize(&e.emitter)) yaml_emitter_set_output_string(&e.emitter, &e.out) e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) e.emit() e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) e.emit() return e } func (e *encoder) finish() { e.must(yaml_document_end_event_initialize(&e.event, true)) e.emit() e.emitter.open_ended = false e.must(yaml_stream_end_event_initialize(&e.event)) e.emit() } func (e *encoder) destroy() { yaml_emitter_delete(&e.emitter) } func (e *encoder) emit() { // This will internally delete the e.event value. if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { e.must(false) } } func (e *encoder) must(ok bool) { if !ok { msg := e.emitter.problem if msg == "" { msg = "Unknown problem generating YAML content" } fail(msg) } } func (e *encoder) marshal(tag string, in reflect.Value) { if !in.IsValid() { e.nilv() return } var value interface{} if getter, ok := in.Interface().(Getter); ok { tag, value = getter.GetYAML() tag = longTag(tag) if value == nil { e.nilv() return } in = reflect.ValueOf(value) } switch in.Kind() { case reflect.Interface: if in.IsNil() { e.nilv() } else { e.marshal(tag, in.Elem()) } case reflect.Map: e.mapv(tag, in) case reflect.Ptr: if in.IsNil() { e.nilv() } else { e.marshal(tag, in.Elem()) } case reflect.Struct: e.structv(tag, in) case reflect.Slice: e.slicev(tag, in) case reflect.String: e.stringv(tag, in) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: if in.Type() == durationType { e.stringv(tag, reflect.ValueOf(in.Interface().(time.Duration).String())) } else { e.intv(tag, in) } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: e.uintv(tag, in) case reflect.Float32, reflect.Float64: e.floatv(tag, in) case reflect.Bool: e.boolv(tag, in) default: panic("Can't marshal type: " + in.Type().String()) } } func (e *encoder) mapv(tag string, in reflect.Value) { e.mappingv(tag, func() { keys := keyList(in.MapKeys()) sort.Sort(keys) for _, k := range keys { e.marshal("", k) e.marshal("", in.MapIndex(k)) } }) } func (e *encoder) structv(tag string, in reflect.Value) { sinfo, err := getStructInfo(in.Type()) if err != nil { panic(err) } e.mappingv(tag, func() { for _, info := range sinfo.FieldsList { var value reflect.Value if info.Inline == nil { value = in.Field(info.Num) } else { value = in.FieldByIndex(info.Inline) } if info.OmitEmpty && isZero(value) { continue } e.marshal("", reflect.ValueOf(info.Key)) e.flow = info.Flow e.marshal("", value) } }) } func (e *encoder) mappingv(tag string, f func()) { implicit := tag == "" style := yaml_BLOCK_MAPPING_STYLE if e.flow { e.flow = false style = yaml_FLOW_MAPPING_STYLE } e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) e.emit() f() e.must(yaml_mapping_end_event_initialize(&e.event)) e.emit() } func (e *encoder) slicev(tag string, in reflect.Value) { implicit := tag == "" style := yaml_BLOCK_SEQUENCE_STYLE if e.flow { e.flow = false style = yaml_FLOW_SEQUENCE_STYLE } e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) e.emit() n := in.Len() for i := 0; i < n; i++ { e.marshal("", in.Index(i)) } e.must(yaml_sequence_end_event_initialize(&e.event)) e.emit() } // isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. // // The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported // in YAML 1.2 and by this package, but these should be marshalled quoted for // the time being for compatibility with other parsers. func isBase60Float(s string) (result bool) { // Fast path. if s == "" { return false } c := s[0] if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { return false } // Do the full match. return base60float.MatchString(s) } // From http://yaml.org/type/float.html, except the regular expression there // is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) func (e *encoder) stringv(tag string, in reflect.Value) { var style yaml_scalar_style_t s := in.String() rtag, rs := resolve("", s) if rtag == yaml_BINARY_TAG { if tag == "" || tag == yaml_STR_TAG { tag = rtag s = rs.(string) } else if tag == yaml_BINARY_TAG { fail("explicitly tagged !!binary data must be base64-encoded") } else { fail("cannot marshal invalid UTF-8 data as " + shortTag(tag)) } } if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { style = yaml_DOUBLE_QUOTED_SCALAR_STYLE } else if strings.Contains(s, "\n") { style = yaml_LITERAL_SCALAR_STYLE } else { style = yaml_PLAIN_SCALAR_STYLE } e.emitScalar(s, "", tag, style) } func (e *encoder) boolv(tag string, in reflect.Value) { var s string if in.Bool() { s = "true" } else { s = "false" } e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) } func (e *encoder) intv(tag string, in reflect.Value) { s := strconv.FormatInt(in.Int(), 10) e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) } func (e *encoder) uintv(tag string, in reflect.Value) { s := strconv.FormatUint(in.Uint(), 10) e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) } func (e *encoder) floatv(tag string, in reflect.Value) { // FIXME: Handle 64 bits here. s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) switch s { case "+Inf": s = ".inf" case "-Inf": s = "-.inf" case "NaN": s = ".nan" } e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) } func (e *encoder) nilv() { e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) } func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { implicit := tag == "" e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) e.emit() } charm-2.1.1/src/gopkg.in/yaml.v1/sorter.go0000664000175000017500000000465412672604524017253 0ustar marcomarcopackage yaml import ( "reflect" "unicode" ) type keyList []reflect.Value func (l keyList) Len() int { return len(l) } func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } func (l keyList) Less(i, j int) bool { a := l[i] b := l[j] ak := a.Kind() bk := b.Kind() for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { a = a.Elem() ak = a.Kind() } for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { b = b.Elem() bk = b.Kind() } af, aok := keyFloat(a) bf, bok := keyFloat(b) if aok && bok { if af != bf { return af < bf } if ak != bk { return ak < bk } return numLess(a, b) } if ak != reflect.String || bk != reflect.String { return ak < bk } ar, br := []rune(a.String()), []rune(b.String()) for i := 0; i < len(ar) && i < len(br); i++ { if ar[i] == br[i] { continue } al := unicode.IsLetter(ar[i]) bl := unicode.IsLetter(br[i]) if al && bl { return ar[i] < br[i] } if al || bl { return bl } var ai, bi int var an, bn int64 for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { an = an*10 + int64(ar[ai]-'0') } for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { bn = bn*10 + int64(br[bi]-'0') } if an != bn { return an < bn } if ai != bi { return ai < bi } return ar[i] < br[i] } return len(ar) < len(br) } // keyFloat returns a float value for v if it is a number/bool // and whether it is a number/bool or not. func keyFloat(v reflect.Value) (f float64, ok bool) { switch v.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return float64(v.Int()), true case reflect.Float32, reflect.Float64: return v.Float(), true case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return float64(v.Uint()), true case reflect.Bool: if v.Bool() { return 1, true } return 0, true } return 0, false } // numLess returns whether a < b. // a and b must necessarily have the same kind. func numLess(a, b reflect.Value) bool { switch a.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return a.Int() < b.Int() case reflect.Float32, reflect.Float64: return a.Float() < b.Float() case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return a.Uint() < b.Uint() case reflect.Bool: return !a.Bool() && b.Bool() } panic("not a number") } charm-2.1.1/src/gopkg.in/yaml.v1/apic.go0000664000175000017500000005137612672604524016654 0ustar marcomarcopackage yaml import ( "io" "os" ) func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) // Check if we can move the queue at the beginning of the buffer. if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { if parser.tokens_head != len(parser.tokens) { copy(parser.tokens, parser.tokens[parser.tokens_head:]) } parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] parser.tokens_head = 0 } parser.tokens = append(parser.tokens, *token) if pos < 0 { return } copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) parser.tokens[parser.tokens_head+pos] = *token } // Create a new parser object. func yaml_parser_initialize(parser *yaml_parser_t) bool { *parser = yaml_parser_t{ raw_buffer: make([]byte, 0, input_raw_buffer_size), buffer: make([]byte, 0, input_buffer_size), } return true } // Destroy a parser object. func yaml_parser_delete(parser *yaml_parser_t) { *parser = yaml_parser_t{} } // String read handler. func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { if parser.input_pos == len(parser.input) { return 0, io.EOF } n = copy(buffer, parser.input[parser.input_pos:]) parser.input_pos += n return n, nil } // File read handler. func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { return parser.input_file.Read(buffer) } // Set a string input. func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { if parser.read_handler != nil { panic("must set the input source only once") } parser.read_handler = yaml_string_read_handler parser.input = input parser.input_pos = 0 } // Set a file input. func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { if parser.read_handler != nil { panic("must set the input source only once") } parser.read_handler = yaml_file_read_handler parser.input_file = file } // Set the source encoding. func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { if parser.encoding != yaml_ANY_ENCODING { panic("must set the encoding only once") } parser.encoding = encoding } // Create a new emitter object. func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { *emitter = yaml_emitter_t{ buffer: make([]byte, output_buffer_size), raw_buffer: make([]byte, 0, output_raw_buffer_size), states: make([]yaml_emitter_state_t, 0, initial_stack_size), events: make([]yaml_event_t, 0, initial_queue_size), } return true } // Destroy an emitter object. func yaml_emitter_delete(emitter *yaml_emitter_t) { *emitter = yaml_emitter_t{} } // String write handler. func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { *emitter.output_buffer = append(*emitter.output_buffer, buffer...) return nil } // File write handler. func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { _, err := emitter.output_file.Write(buffer) return err } // Set a string output. func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { if emitter.write_handler != nil { panic("must set the output target only once") } emitter.write_handler = yaml_string_write_handler emitter.output_buffer = output_buffer } // Set a file output. func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { if emitter.write_handler != nil { panic("must set the output target only once") } emitter.write_handler = yaml_file_write_handler emitter.output_file = file } // Set the output encoding. func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { if emitter.encoding != yaml_ANY_ENCODING { panic("must set the output encoding only once") } emitter.encoding = encoding } // Set the canonical output style. func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { emitter.canonical = canonical } //// Set the indentation increment. func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { if indent < 2 || indent > 9 { indent = 2 } emitter.best_indent = indent } // Set the preferred line width. func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { if width < 0 { width = -1 } emitter.best_width = width } // Set if unescaped non-ASCII characters are allowed. func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { emitter.unicode = unicode } // Set the preferred line break character. func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { emitter.line_break = line_break } ///* // * Destroy a token object. // */ // //YAML_DECLARE(void) //yaml_token_delete(yaml_token_t *token) //{ // assert(token); // Non-NULL token object expected. // // switch (token.type) // { // case YAML_TAG_DIRECTIVE_TOKEN: // yaml_free(token.data.tag_directive.handle); // yaml_free(token.data.tag_directive.prefix); // break; // // case YAML_ALIAS_TOKEN: // yaml_free(token.data.alias.value); // break; // // case YAML_ANCHOR_TOKEN: // yaml_free(token.data.anchor.value); // break; // // case YAML_TAG_TOKEN: // yaml_free(token.data.tag.handle); // yaml_free(token.data.tag.suffix); // break; // // case YAML_SCALAR_TOKEN: // yaml_free(token.data.scalar.value); // break; // // default: // break; // } // // memset(token, 0, sizeof(yaml_token_t)); //} // ///* // * Check if a string is a valid UTF-8 sequence. // * // * Check 'reader.c' for more details on UTF-8 encoding. // */ // //static int //yaml_check_utf8(yaml_char_t *start, size_t length) //{ // yaml_char_t *end = start+length; // yaml_char_t *pointer = start; // // while (pointer < end) { // unsigned char octet; // unsigned int width; // unsigned int value; // size_t k; // // octet = pointer[0]; // width = (octet & 0x80) == 0x00 ? 1 : // (octet & 0xE0) == 0xC0 ? 2 : // (octet & 0xF0) == 0xE0 ? 3 : // (octet & 0xF8) == 0xF0 ? 4 : 0; // value = (octet & 0x80) == 0x00 ? octet & 0x7F : // (octet & 0xE0) == 0xC0 ? octet & 0x1F : // (octet & 0xF0) == 0xE0 ? octet & 0x0F : // (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; // if (!width) return 0; // if (pointer+width > end) return 0; // for (k = 1; k < width; k ++) { // octet = pointer[k]; // if ((octet & 0xC0) != 0x80) return 0; // value = (value << 6) + (octet & 0x3F); // } // if (!((width == 1) || // (width == 2 && value >= 0x80) || // (width == 3 && value >= 0x800) || // (width == 4 && value >= 0x10000))) return 0; // // pointer += width; // } // // return 1; //} // // Create STREAM-START. func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { *event = yaml_event_t{ typ: yaml_STREAM_START_EVENT, encoding: encoding, } return true } // Create STREAM-END. func yaml_stream_end_event_initialize(event *yaml_event_t) bool { *event = yaml_event_t{ typ: yaml_STREAM_END_EVENT, } return true } // Create DOCUMENT-START. func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, tag_directives []yaml_tag_directive_t, implicit bool) bool { *event = yaml_event_t{ typ: yaml_DOCUMENT_START_EVENT, version_directive: version_directive, tag_directives: tag_directives, implicit: implicit, } return true } // Create DOCUMENT-END. func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { *event = yaml_event_t{ typ: yaml_DOCUMENT_END_EVENT, implicit: implicit, } return true } ///* // * Create ALIAS. // */ // //YAML_DECLARE(int) //yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) //{ // mark yaml_mark_t = { 0, 0, 0 } // anchor_copy *yaml_char_t = NULL // // assert(event) // Non-NULL event object is expected. // assert(anchor) // Non-NULL anchor is expected. // // if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 // // anchor_copy = yaml_strdup(anchor) // if (!anchor_copy) // return 0 // // ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) // // return 1 //} // Create SCALAR. func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { *event = yaml_event_t{ typ: yaml_SCALAR_EVENT, anchor: anchor, tag: tag, value: value, implicit: plain_implicit, quoted_implicit: quoted_implicit, style: yaml_style_t(style), } return true } // Create SEQUENCE-START. func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { *event = yaml_event_t{ typ: yaml_SEQUENCE_START_EVENT, anchor: anchor, tag: tag, implicit: implicit, style: yaml_style_t(style), } return true } // Create SEQUENCE-END. func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { *event = yaml_event_t{ typ: yaml_SEQUENCE_END_EVENT, } return true } // Create MAPPING-START. func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { *event = yaml_event_t{ typ: yaml_MAPPING_START_EVENT, anchor: anchor, tag: tag, implicit: implicit, style: yaml_style_t(style), } return true } // Create MAPPING-END. func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { *event = yaml_event_t{ typ: yaml_MAPPING_END_EVENT, } return true } // Destroy an event object. func yaml_event_delete(event *yaml_event_t) { *event = yaml_event_t{} } ///* // * Create a document object. // */ // //YAML_DECLARE(int) //yaml_document_initialize(document *yaml_document_t, // version_directive *yaml_version_directive_t, // tag_directives_start *yaml_tag_directive_t, // tag_directives_end *yaml_tag_directive_t, // start_implicit int, end_implicit int) //{ // struct { // error yaml_error_type_t // } context // struct { // start *yaml_node_t // end *yaml_node_t // top *yaml_node_t // } nodes = { NULL, NULL, NULL } // version_directive_copy *yaml_version_directive_t = NULL // struct { // start *yaml_tag_directive_t // end *yaml_tag_directive_t // top *yaml_tag_directive_t // } tag_directives_copy = { NULL, NULL, NULL } // value yaml_tag_directive_t = { NULL, NULL } // mark yaml_mark_t = { 0, 0, 0 } // // assert(document) // Non-NULL document object is expected. // assert((tag_directives_start && tag_directives_end) || // (tag_directives_start == tag_directives_end)) // // Valid tag directives are expected. // // if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error // // if (version_directive) { // version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) // if (!version_directive_copy) goto error // version_directive_copy.major = version_directive.major // version_directive_copy.minor = version_directive.minor // } // // if (tag_directives_start != tag_directives_end) { // tag_directive *yaml_tag_directive_t // if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) // goto error // for (tag_directive = tag_directives_start // tag_directive != tag_directives_end; tag_directive ++) { // assert(tag_directive.handle) // assert(tag_directive.prefix) // if (!yaml_check_utf8(tag_directive.handle, // strlen((char *)tag_directive.handle))) // goto error // if (!yaml_check_utf8(tag_directive.prefix, // strlen((char *)tag_directive.prefix))) // goto error // value.handle = yaml_strdup(tag_directive.handle) // value.prefix = yaml_strdup(tag_directive.prefix) // if (!value.handle || !value.prefix) goto error // if (!PUSH(&context, tag_directives_copy, value)) // goto error // value.handle = NULL // value.prefix = NULL // } // } // // DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, // tag_directives_copy.start, tag_directives_copy.top, // start_implicit, end_implicit, mark, mark) // // return 1 // //error: // STACK_DEL(&context, nodes) // yaml_free(version_directive_copy) // while (!STACK_EMPTY(&context, tag_directives_copy)) { // value yaml_tag_directive_t = POP(&context, tag_directives_copy) // yaml_free(value.handle) // yaml_free(value.prefix) // } // STACK_DEL(&context, tag_directives_copy) // yaml_free(value.handle) // yaml_free(value.prefix) // // return 0 //} // ///* // * Destroy a document object. // */ // //YAML_DECLARE(void) //yaml_document_delete(document *yaml_document_t) //{ // struct { // error yaml_error_type_t // } context // tag_directive *yaml_tag_directive_t // // context.error = YAML_NO_ERROR // Eliminate a compliler warning. // // assert(document) // Non-NULL document object is expected. // // while (!STACK_EMPTY(&context, document.nodes)) { // node yaml_node_t = POP(&context, document.nodes) // yaml_free(node.tag) // switch (node.type) { // case YAML_SCALAR_NODE: // yaml_free(node.data.scalar.value) // break // case YAML_SEQUENCE_NODE: // STACK_DEL(&context, node.data.sequence.items) // break // case YAML_MAPPING_NODE: // STACK_DEL(&context, node.data.mapping.pairs) // break // default: // assert(0) // Should not happen. // } // } // STACK_DEL(&context, document.nodes) // // yaml_free(document.version_directive) // for (tag_directive = document.tag_directives.start // tag_directive != document.tag_directives.end // tag_directive++) { // yaml_free(tag_directive.handle) // yaml_free(tag_directive.prefix) // } // yaml_free(document.tag_directives.start) // // memset(document, 0, sizeof(yaml_document_t)) //} // ///** // * Get a document node. // */ // //YAML_DECLARE(yaml_node_t *) //yaml_document_get_node(document *yaml_document_t, index int) //{ // assert(document) // Non-NULL document object is expected. // // if (index > 0 && document.nodes.start + index <= document.nodes.top) { // return document.nodes.start + index - 1 // } // return NULL //} // ///** // * Get the root object. // */ // //YAML_DECLARE(yaml_node_t *) //yaml_document_get_root_node(document *yaml_document_t) //{ // assert(document) // Non-NULL document object is expected. // // if (document.nodes.top != document.nodes.start) { // return document.nodes.start // } // return NULL //} // ///* // * Add a scalar node to a document. // */ // //YAML_DECLARE(int) //yaml_document_add_scalar(document *yaml_document_t, // tag *yaml_char_t, value *yaml_char_t, length int, // style yaml_scalar_style_t) //{ // struct { // error yaml_error_type_t // } context // mark yaml_mark_t = { 0, 0, 0 } // tag_copy *yaml_char_t = NULL // value_copy *yaml_char_t = NULL // node yaml_node_t // // assert(document) // Non-NULL document object is expected. // assert(value) // Non-NULL value is expected. // // if (!tag) { // tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG // } // // if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error // tag_copy = yaml_strdup(tag) // if (!tag_copy) goto error // // if (length < 0) { // length = strlen((char *)value) // } // // if (!yaml_check_utf8(value, length)) goto error // value_copy = yaml_malloc(length+1) // if (!value_copy) goto error // memcpy(value_copy, value, length) // value_copy[length] = '\0' // // SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) // if (!PUSH(&context, document.nodes, node)) goto error // // return document.nodes.top - document.nodes.start // //error: // yaml_free(tag_copy) // yaml_free(value_copy) // // return 0 //} // ///* // * Add a sequence node to a document. // */ // //YAML_DECLARE(int) //yaml_document_add_sequence(document *yaml_document_t, // tag *yaml_char_t, style yaml_sequence_style_t) //{ // struct { // error yaml_error_type_t // } context // mark yaml_mark_t = { 0, 0, 0 } // tag_copy *yaml_char_t = NULL // struct { // start *yaml_node_item_t // end *yaml_node_item_t // top *yaml_node_item_t // } items = { NULL, NULL, NULL } // node yaml_node_t // // assert(document) // Non-NULL document object is expected. // // if (!tag) { // tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG // } // // if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error // tag_copy = yaml_strdup(tag) // if (!tag_copy) goto error // // if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error // // SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, // style, mark, mark) // if (!PUSH(&context, document.nodes, node)) goto error // // return document.nodes.top - document.nodes.start // //error: // STACK_DEL(&context, items) // yaml_free(tag_copy) // // return 0 //} // ///* // * Add a mapping node to a document. // */ // //YAML_DECLARE(int) //yaml_document_add_mapping(document *yaml_document_t, // tag *yaml_char_t, style yaml_mapping_style_t) //{ // struct { // error yaml_error_type_t // } context // mark yaml_mark_t = { 0, 0, 0 } // tag_copy *yaml_char_t = NULL // struct { // start *yaml_node_pair_t // end *yaml_node_pair_t // top *yaml_node_pair_t // } pairs = { NULL, NULL, NULL } // node yaml_node_t // // assert(document) // Non-NULL document object is expected. // // if (!tag) { // tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG // } // // if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error // tag_copy = yaml_strdup(tag) // if (!tag_copy) goto error // // if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error // // MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, // style, mark, mark) // if (!PUSH(&context, document.nodes, node)) goto error // // return document.nodes.top - document.nodes.start // //error: // STACK_DEL(&context, pairs) // yaml_free(tag_copy) // // return 0 //} // ///* // * Append an item to a sequence node. // */ // //YAML_DECLARE(int) //yaml_document_append_sequence_item(document *yaml_document_t, // sequence int, item int) //{ // struct { // error yaml_error_type_t // } context // // assert(document) // Non-NULL document is required. // assert(sequence > 0 // && document.nodes.start + sequence <= document.nodes.top) // // Valid sequence id is required. // assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) // // A sequence node is required. // assert(item > 0 && document.nodes.start + item <= document.nodes.top) // // Valid item id is required. // // if (!PUSH(&context, // document.nodes.start[sequence-1].data.sequence.items, item)) // return 0 // // return 1 //} // ///* // * Append a pair of a key and a value to a mapping node. // */ // //YAML_DECLARE(int) //yaml_document_append_mapping_pair(document *yaml_document_t, // mapping int, key int, value int) //{ // struct { // error yaml_error_type_t // } context // // pair yaml_node_pair_t // // assert(document) // Non-NULL document is required. // assert(mapping > 0 // && document.nodes.start + mapping <= document.nodes.top) // // Valid mapping id is required. // assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) // // A mapping node is required. // assert(key > 0 && document.nodes.start + key <= document.nodes.top) // // Valid key id is required. // assert(value > 0 && document.nodes.start + value <= document.nodes.top) // // Valid value id is required. // // pair.key = key // pair.value = value // // if (!PUSH(&context, // document.nodes.start[mapping-1].data.mapping.pairs, pair)) // return 0 // // return 1 //} // // charm-2.1.1/src/gopkg.in/yaml.v1/yaml.go0000664000175000017500000002072612672604524016675 0ustar marcomarco// Package yaml implements YAML support for the Go language. // // Source code and other details for the project are available at GitHub: // // https://github.com/go-yaml/yaml // package yaml import ( "errors" "fmt" "reflect" "strings" "sync" ) type yamlError string func fail(msg string) { panic(yamlError(msg)) } func handleErr(err *error) { if r := recover(); r != nil { if e, ok := r.(yamlError); ok { *err = errors.New("YAML error: " + string(e)) } else { panic(r) } } } // The Setter interface may be implemented by types to do their own custom // unmarshalling of YAML values, rather than being implicitly assigned by // the yaml package machinery. If setting the value works, the method should // return true. If it returns false, the value is considered unsupported // and is omitted from maps and slices. type Setter interface { SetYAML(tag string, value interface{}) bool } // The Getter interface is implemented by types to do their own custom // marshalling into a YAML tag and value. type Getter interface { GetYAML() (tag string, value interface{}) } // Unmarshal decodes the first document found within the in byte slice // and assigns decoded values into the out value. // // Maps and pointers (to a struct, string, int, etc) are accepted as out // values. If an internal pointer within a struct is not initialized, // the yaml package will initialize it if necessary for unmarshalling // the provided data. The out parameter must not be nil. // // The type of the decoded values and the type of out will be considered, // and Unmarshal will do the best possible job to unmarshal values // appropriately. It is NOT considered an error, though, to skip values // because they are not available in the decoded YAML, or if they are not // compatible with the out value. To ensure something was properly // unmarshaled use a map or compare against the previous value for the // field (usually the zero value). // // Struct fields are only unmarshalled if they are exported (have an // upper case first letter), and are unmarshalled using the field name // lowercased as the default key. Custom keys may be defined via the // "yaml" name in the field tag: the content preceding the first comma // is used as the key, and the following comma-separated options are // used to tweak the marshalling process (see Marshal). // Conflicting names result in a runtime error. // // For example: // // type T struct { // F int `yaml:"a,omitempty"` // B int // } // var t T // yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) // // See the documentation of Marshal for the format of tags and a list of // supported tag options. // func Unmarshal(in []byte, out interface{}) (err error) { defer handleErr(&err) d := newDecoder() p := newParser(in) defer p.destroy() node := p.parse() if node != nil { v := reflect.ValueOf(out) if v.Kind() == reflect.Ptr && !v.IsNil() { v = v.Elem() } d.unmarshal(node, v) } return nil } // Marshal serializes the value provided into a YAML document. The structure // of the generated document will reflect the structure of the value itself. // Maps and pointers (to struct, string, int, etc) are accepted as the in value. // // Struct fields are only unmarshalled if they are exported (have an upper case // first letter), and are unmarshalled using the field name lowercased as the // default key. Custom keys may be defined via the "yaml" name in the field // tag: the content preceding the first comma is used as the key, and the // following comma-separated options are used to tweak the marshalling process. // Conflicting names result in a runtime error. // // The field tag format accepted is: // // `(...) yaml:"[][,[,]]" (...)` // // The following flags are currently supported: // // omitempty Only include the field if it's not set to the zero // value for the type or to empty slices or maps. // Does not apply to zero valued structs. // // flow Marshal using a flow style (useful for structs, // sequences and maps. // // inline Inline the struct it's applied to, so its fields // are processed as if they were part of the outer // struct. // // In addition, if the key is "-", the field is ignored. // // For example: // // type T struct { // F int "a,omitempty" // B int // } // yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" // yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" // func Marshal(in interface{}) (out []byte, err error) { defer handleErr(&err) e := newEncoder() defer e.destroy() e.marshal("", reflect.ValueOf(in)) e.finish() out = e.out return } // -------------------------------------------------------------------------- // Maintain a mapping of keys to structure field indexes // The code in this section was copied from mgo/bson. // structInfo holds details for the serialization of fields of // a given struct. type structInfo struct { FieldsMap map[string]fieldInfo FieldsList []fieldInfo // InlineMap is the number of the field in the struct that // contains an ,inline map, or -1 if there's none. InlineMap int } type fieldInfo struct { Key string Num int OmitEmpty bool Flow bool // Inline holds the field index if the field is part of an inlined struct. Inline []int } var structMap = make(map[reflect.Type]*structInfo) var fieldMapMutex sync.RWMutex func getStructInfo(st reflect.Type) (*structInfo, error) { fieldMapMutex.RLock() sinfo, found := structMap[st] fieldMapMutex.RUnlock() if found { return sinfo, nil } n := st.NumField() fieldsMap := make(map[string]fieldInfo) fieldsList := make([]fieldInfo, 0, n) inlineMap := -1 for i := 0; i != n; i++ { field := st.Field(i) if field.PkgPath != "" { continue // Private field } info := fieldInfo{Num: i} tag := field.Tag.Get("yaml") if tag == "" && strings.Index(string(field.Tag), ":") < 0 { tag = string(field.Tag) } if tag == "-" { continue } inline := false fields := strings.Split(tag, ",") if len(fields) > 1 { for _, flag := range fields[1:] { switch flag { case "omitempty": info.OmitEmpty = true case "flow": info.Flow = true case "inline": inline = true default: return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) } } tag = fields[0] } if inline { switch field.Type.Kind() { // TODO: Implement support for inline maps. //case reflect.Map: // if inlineMap >= 0 { // return nil, errors.New("Multiple ,inline maps in struct " + st.String()) // } // if field.Type.Key() != reflect.TypeOf("") { // return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) // } // inlineMap = info.Num case reflect.Struct: sinfo, err := getStructInfo(field.Type) if err != nil { return nil, err } for _, finfo := range sinfo.FieldsList { if _, found := fieldsMap[finfo.Key]; found { msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() return nil, errors.New(msg) } if finfo.Inline == nil { finfo.Inline = []int{i, finfo.Num} } else { finfo.Inline = append([]int{i}, finfo.Inline...) } fieldsMap[finfo.Key] = finfo fieldsList = append(fieldsList, finfo) } default: //return nil, errors.New("Option ,inline needs a struct value or map field") return nil, errors.New("Option ,inline needs a struct value field") } continue } if tag != "" { info.Key = tag } else { info.Key = strings.ToLower(field.Name) } if _, found = fieldsMap[info.Key]; found { msg := "Duplicated key '" + info.Key + "' in struct " + st.String() return nil, errors.New(msg) } fieldsList = append(fieldsList, info) fieldsMap[info.Key] = info } sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} fieldMapMutex.Lock() structMap[st] = sinfo fieldMapMutex.Unlock() return sinfo, nil } func isZero(v reflect.Value) bool { switch v.Kind() { case reflect.String: return len(v.String()) == 0 case reflect.Interface, reflect.Ptr: return v.IsNil() case reflect.Slice: return v.Len() == 0 case reflect.Map: return v.Len() == 0 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return v.Int() == 0 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return v.Uint() == 0 case reflect.Bool: return !v.Bool() } return false } charm-2.1.1/src/gopkg.in/check.v1/0000775000175000017500000000000012672604604015507 5ustar marcomarcocharm-2.1.1/src/gopkg.in/check.v1/TODO0000664000175000017500000000007012672604604016174 0ustar marcomarco- Assert(slice, Contains, item) - Parallel test support charm-2.1.1/src/gopkg.in/check.v1/reporter.go0000664000175000017500000000421312672604604017700 0ustar marcomarcopackage check import ( "fmt" "io" "sync" ) // ----------------------------------------------------------------------- // Output writer manages atomic output writing according to settings. type outputWriter struct { m sync.Mutex writer io.Writer wroteCallProblemLast bool Stream bool Verbose bool } func newOutputWriter(writer io.Writer, stream, verbose bool) *outputWriter { return &outputWriter{writer: writer, Stream: stream, Verbose: verbose} } func (ow *outputWriter) Write(content []byte) (n int, err error) { ow.m.Lock() n, err = ow.writer.Write(content) ow.m.Unlock() return } func (ow *outputWriter) WriteCallStarted(label string, c *C) { if ow.Stream { header := renderCallHeader(label, c, "", "\n") ow.m.Lock() ow.writer.Write([]byte(header)) ow.m.Unlock() } } func (ow *outputWriter) WriteCallProblem(label string, c *C) { var prefix string if !ow.Stream { prefix = "\n-----------------------------------" + "-----------------------------------\n" } header := renderCallHeader(label, c, prefix, "\n\n") ow.m.Lock() ow.wroteCallProblemLast = true ow.writer.Write([]byte(header)) if !ow.Stream { c.logb.WriteTo(ow.writer) } ow.m.Unlock() } func (ow *outputWriter) WriteCallSuccess(label string, c *C) { if ow.Stream || (ow.Verbose && c.kind == testKd) { // TODO Use a buffer here. var suffix string if c.reason != "" { suffix = " (" + c.reason + ")" } if c.status() == succeededSt { suffix += "\t" + c.timerString() } suffix += "\n" if ow.Stream { suffix += "\n" } header := renderCallHeader(label, c, "", suffix) ow.m.Lock() // Resist temptation of using line as prefix above due to race. if !ow.Stream && ow.wroteCallProblemLast { header = "\n-----------------------------------" + "-----------------------------------\n" + header } ow.wroteCallProblemLast = false ow.writer.Write([]byte(header)) ow.m.Unlock() } } func renderCallHeader(label string, c *C, prefix, suffix string) string { pc := c.method.PC() return fmt.Sprintf("%s%s: %s: %s%s", prefix, label, niceFuncPath(pc), niceFuncName(pc), suffix) } charm-2.1.1/src/gopkg.in/check.v1/export_test.go0000664000175000017500000000057112672604604020421 0ustar marcomarcopackage check import "io" func PrintLine(filename string, line int) (string, error) { return printLine(filename, line) } func Indent(s, with string) string { return indent(s, with) } func NewOutputWriter(writer io.Writer, stream, verbose bool) *outputWriter { return newOutputWriter(writer, stream, verbose) } func (c *C) FakeSkip(reason string) { c.reason = reason } charm-2.1.1/src/gopkg.in/check.v1/bootstrap_test.go0000664000175000017500000000424312672604604021115 0ustar marcomarco// These initial tests are for bootstrapping. They verify that we can // basically use the testing infrastructure itself to check if the test // system is working. // // These tests use will break down the test runner badly in case of // errors because if they simply fail, we can't be sure the developer // will ever see anything (because failing means the failing system // somehow isn't working! :-) // // Do not assume *any* internal functionality works as expected besides // what's actually tested here. package check_test import ( "fmt" "gopkg.in/check.v1" "strings" ) type BootstrapS struct{} var boostrapS = check.Suite(&BootstrapS{}) func (s *BootstrapS) TestCountSuite(c *check.C) { suitesRun += 1 } func (s *BootstrapS) TestFailedAndFail(c *check.C) { if c.Failed() { critical("c.Failed() must be false first!") } c.Fail() if !c.Failed() { critical("c.Fail() didn't put the test in a failed state!") } c.Succeed() } func (s *BootstrapS) TestFailedAndSucceed(c *check.C) { c.Fail() c.Succeed() if c.Failed() { critical("c.Succeed() didn't put the test back in a non-failed state") } } func (s *BootstrapS) TestLogAndGetTestLog(c *check.C) { c.Log("Hello there!") log := c.GetTestLog() if log != "Hello there!\n" { critical(fmt.Sprintf("Log() or GetTestLog() is not working! Got: %#v", log)) } } func (s *BootstrapS) TestLogfAndGetTestLog(c *check.C) { c.Logf("Hello %v", "there!") log := c.GetTestLog() if log != "Hello there!\n" { critical(fmt.Sprintf("Logf() or GetTestLog() is not working! Got: %#v", log)) } } func (s *BootstrapS) TestRunShowsErrors(c *check.C) { output := String{} check.Run(&FailHelper{}, &check.RunConf{Output: &output}) if strings.Index(output.value, "Expected failure!") == -1 { critical(fmt.Sprintf("RunWithWriter() output did not contain the "+ "expected failure! Got: %#v", output.value)) } } func (s *BootstrapS) TestRunDoesntShowSuccesses(c *check.C) { output := String{} check.Run(&SuccessHelper{}, &check.RunConf{Output: &output}) if strings.Index(output.value, "Expected success!") != -1 { critical(fmt.Sprintf("RunWithWriter() output contained a successful "+ "test! Got: %#v", output.value)) } } charm-2.1.1/src/gopkg.in/check.v1/helpers.go0000664000175000017500000001610512672604604017503 0ustar marcomarcopackage check import ( "fmt" "strings" "time" ) // TestName returns the current test name in the form "SuiteName.TestName" func (c *C) TestName() string { return c.testName } // ----------------------------------------------------------------------- // Basic succeeding/failing logic. // Failed returns whether the currently running test has already failed. func (c *C) Failed() bool { return c.status() == failedSt } // Fail marks the currently running test as failed. // // Something ought to have been previously logged so the developer can tell // what went wrong. The higher level helper functions will fail the test // and do the logging properly. func (c *C) Fail() { c.setStatus(failedSt) } // FailNow marks the currently running test as failed and stops running it. // Something ought to have been previously logged so the developer can tell // what went wrong. The higher level helper functions will fail the test // and do the logging properly. func (c *C) FailNow() { c.Fail() c.stopNow() } // Succeed marks the currently running test as succeeded, undoing any // previous failures. func (c *C) Succeed() { c.setStatus(succeededSt) } // SucceedNow marks the currently running test as succeeded, undoing any // previous failures, and stops running the test. func (c *C) SucceedNow() { c.Succeed() c.stopNow() } // ExpectFailure informs that the running test is knowingly broken for // the provided reason. If the test does not fail, an error will be reported // to raise attention to this fact. This method is useful to temporarily // disable tests which cover well known problems until a better time to // fix the problem is found, without forgetting about the fact that a // failure still exists. func (c *C) ExpectFailure(reason string) { if reason == "" { panic("Missing reason why the test is expected to fail") } c.mustFail = true c.reason = reason } // Skip skips the running test for the provided reason. If run from within // SetUpTest, the individual test being set up will be skipped, and if run // from within SetUpSuite, the whole suite is skipped. func (c *C) Skip(reason string) { if reason == "" { panic("Missing reason why the test is being skipped") } c.reason = reason c.setStatus(skippedSt) c.stopNow() } // ----------------------------------------------------------------------- // Basic logging. // GetTestLog returns the current test error output. func (c *C) GetTestLog() string { return c.logb.String() } // Log logs some information into the test error output. // The provided arguments are assembled together into a string with fmt.Sprint. func (c *C) Log(args ...interface{}) { c.log(args...) } // Log logs some information into the test error output. // The provided arguments are assembled together into a string with fmt.Sprintf. func (c *C) Logf(format string, args ...interface{}) { c.logf(format, args...) } // Output enables *C to be used as a logger in functions that require only // the minimum interface of *log.Logger. func (c *C) Output(calldepth int, s string) error { d := time.Now().Sub(c.startTime) msec := d / time.Millisecond sec := d / time.Second min := d / time.Minute c.Logf("[LOG] %d:%02d.%03d %s", min, sec%60, msec%1000, s) return nil } // Error logs an error into the test error output and marks the test as failed. // The provided arguments are assembled together into a string with fmt.Sprint. func (c *C) Error(args ...interface{}) { c.logCaller(1) c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...))) c.logNewLine() c.Fail() } // Errorf logs an error into the test error output and marks the test as failed. // The provided arguments are assembled together into a string with fmt.Sprintf. func (c *C) Errorf(format string, args ...interface{}) { c.logCaller(1) c.logString(fmt.Sprintf("Error: "+format, args...)) c.logNewLine() c.Fail() } // Fatal logs an error into the test error output, marks the test as failed, and // stops the test execution. The provided arguments are assembled together into // a string with fmt.Sprint. func (c *C) Fatal(args ...interface{}) { c.logCaller(1) c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...))) c.logNewLine() c.FailNow() } // Fatlaf logs an error into the test error output, marks the test as failed, and // stops the test execution. The provided arguments are assembled together into // a string with fmt.Sprintf. func (c *C) Fatalf(format string, args ...interface{}) { c.logCaller(1) c.logString(fmt.Sprint("Error: ", fmt.Sprintf(format, args...))) c.logNewLine() c.FailNow() } // ----------------------------------------------------------------------- // Generic checks and assertions based on checkers. // Check verifies if the first value matches the expected value according // to the provided checker. If they do not match, an error is logged, the // test is marked as failed, and the test execution continues. // // Some checkers may not need the expected argument (e.g. IsNil). // // Extra arguments provided to the function are logged next to the reported // problem when the matching fails. func (c *C) Check(obtained interface{}, checker Checker, args ...interface{}) bool { return c.internalCheck("Check", obtained, checker, args...) } // Assert ensures that the first value matches the expected value according // to the provided checker. If they do not match, an error is logged, the // test is marked as failed, and the test execution stops. // // Some checkers may not need the expected argument (e.g. IsNil). // // Extra arguments provided to the function are logged next to the reported // problem when the matching fails. func (c *C) Assert(obtained interface{}, checker Checker, args ...interface{}) { if !c.internalCheck("Assert", obtained, checker, args...) { c.stopNow() } } func (c *C) internalCheck(funcName string, obtained interface{}, checker Checker, args ...interface{}) bool { if checker == nil { c.logCaller(2) c.logString(fmt.Sprintf("%s(obtained, nil!?, ...):", funcName)) c.logString("Oops.. you've provided a nil checker!") c.logNewLine() c.Fail() return false } // If the last argument is a bug info, extract it out. var comment CommentInterface if len(args) > 0 { if c, ok := args[len(args)-1].(CommentInterface); ok { comment = c args = args[:len(args)-1] } } params := append([]interface{}{obtained}, args...) info := checker.Info() if len(params) != len(info.Params) { names := append([]string{info.Params[0], info.Name}, info.Params[1:]...) c.logCaller(2) c.logString(fmt.Sprintf("%s(%s):", funcName, strings.Join(names, ", "))) c.logString(fmt.Sprintf("Wrong number of parameters for %s: want %d, got %d", info.Name, len(names), len(params)+1)) c.logNewLine() c.Fail() return false } // Copy since it may be mutated by Check. names := append([]string{}, info.Params...) // Do the actual check. result, error := checker.Check(params, names) if !result || error != "" { c.logCaller(2) for i := 0; i != len(params); i++ { c.logValue(names[i], params[i]) } if comment != nil { c.logString(comment.CheckCommentString()) } if error != "" { c.logString(error) } c.logNewLine() c.Fail() return false } return true } charm-2.1.1/src/gopkg.in/check.v1/benchmark_test.go0000664000175000017500000000520712672604604021033 0ustar marcomarco// These tests verify the test running logic. package check_test import ( "time" . "gopkg.in/check.v1" ) var benchmarkS = Suite(&BenchmarkS{}) type BenchmarkS struct{} func (s *BenchmarkS) TestCountSuite(c *C) { suitesRun += 1 } func (s *BenchmarkS) TestBasicTestTiming(c *C) { helper := FixtureHelper{sleepOn: "Test1", sleep: 1000000 * time.Nanosecond} output := String{} runConf := RunConf{Output: &output, Verbose: true} Run(&helper, &runConf) expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test1\t0\\.001s\n" + "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t0\\.000s\n" c.Assert(output.value, Matches, expected) } func (s *BenchmarkS) TestStreamTestTiming(c *C) { helper := FixtureHelper{sleepOn: "SetUpSuite", sleep: 1000000 * time.Nanosecond} output := String{} runConf := RunConf{Output: &output, Stream: true} Run(&helper, &runConf) expected := "(?s).*\nPASS: check_test\\.go:[0-9]+: FixtureHelper\\.SetUpSuite\t *0\\.001s\n.*" c.Assert(output.value, Matches, expected) } func (s *BenchmarkS) TestBenchmark(c *C) { helper := FixtureHelper{sleep: 100000} output := String{} runConf := RunConf{ Output: &output, Benchmark: true, BenchmarkTime: 10000000, Filter: "Benchmark1", } Run(&helper, &runConf) c.Check(helper.calls[0], Equals, "SetUpSuite") c.Check(helper.calls[1], Equals, "SetUpTest") c.Check(helper.calls[2], Equals, "Benchmark1") c.Check(helper.calls[3], Equals, "TearDownTest") c.Check(helper.calls[4], Equals, "SetUpTest") c.Check(helper.calls[5], Equals, "Benchmark1") c.Check(helper.calls[6], Equals, "TearDownTest") // ... and more. expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark1\t *100\t *[12][0-9]{5} ns/op\n" c.Assert(output.value, Matches, expected) } func (s *BenchmarkS) TestBenchmarkBytes(c *C) { helper := FixtureHelper{sleep: 100000} output := String{} runConf := RunConf{ Output: &output, Benchmark: true, BenchmarkTime: 10000000, Filter: "Benchmark2", } Run(&helper, &runConf) expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark2\t *100\t *[12][0-9]{5} ns/op\t *[4-9]\\.[0-9]{2} MB/s\n" c.Assert(output.value, Matches, expected) } func (s *BenchmarkS) TestBenchmarkMem(c *C) { helper := FixtureHelper{sleep: 100000} output := String{} runConf := RunConf{ Output: &output, Benchmark: true, BenchmarkMem: true, BenchmarkTime: 10000000, Filter: "Benchmark3", } Run(&helper, &runConf) expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark3\t *100\t *[12][0-9]{5} ns/op\t *[0-9]+ B/op\t *[1-9] allocs/op\n" c.Assert(output.value, Matches, expected) } charm-2.1.1/src/gopkg.in/check.v1/checkers_test.go0000664000175000017500000002407012672604604020667 0ustar marcomarcopackage check_test import ( "errors" "gopkg.in/check.v1" "reflect" "runtime" ) type CheckersS struct{} var _ = check.Suite(&CheckersS{}) func testInfo(c *check.C, checker check.Checker, name string, paramNames []string) { info := checker.Info() if info.Name != name { c.Fatalf("Got name %s, expected %s", info.Name, name) } if !reflect.DeepEqual(info.Params, paramNames) { c.Fatalf("Got param names %#v, expected %#v", info.Params, paramNames) } } func testCheck(c *check.C, checker check.Checker, result bool, error string, params ...interface{}) ([]interface{}, []string) { info := checker.Info() if len(params) != len(info.Params) { c.Fatalf("unexpected param count in test; expected %d got %d", len(info.Params), len(params)) } names := append([]string{}, info.Params...) result_, error_ := checker.Check(params, names) if result_ != result || error_ != error { c.Fatalf("%s.Check(%#v) returned (%#v, %#v) rather than (%#v, %#v)", info.Name, params, result_, error_, result, error) } return params, names } func (s *CheckersS) TestComment(c *check.C) { bug := check.Commentf("a %d bc", 42) comment := bug.CheckCommentString() if comment != "a 42 bc" { c.Fatalf("Commentf returned %#v", comment) } } func (s *CheckersS) TestIsNil(c *check.C) { testInfo(c, check.IsNil, "IsNil", []string{"value"}) testCheck(c, check.IsNil, true, "", nil) testCheck(c, check.IsNil, false, "", "a") testCheck(c, check.IsNil, true, "", (chan int)(nil)) testCheck(c, check.IsNil, false, "", make(chan int)) testCheck(c, check.IsNil, true, "", (error)(nil)) testCheck(c, check.IsNil, false, "", errors.New("")) testCheck(c, check.IsNil, true, "", ([]int)(nil)) testCheck(c, check.IsNil, false, "", make([]int, 1)) testCheck(c, check.IsNil, false, "", int(0)) } func (s *CheckersS) TestNotNil(c *check.C) { testInfo(c, check.NotNil, "NotNil", []string{"value"}) testCheck(c, check.NotNil, false, "", nil) testCheck(c, check.NotNil, true, "", "a") testCheck(c, check.NotNil, false, "", (chan int)(nil)) testCheck(c, check.NotNil, true, "", make(chan int)) testCheck(c, check.NotNil, false, "", (error)(nil)) testCheck(c, check.NotNil, true, "", errors.New("")) testCheck(c, check.NotNil, false, "", ([]int)(nil)) testCheck(c, check.NotNil, true, "", make([]int, 1)) } func (s *CheckersS) TestNot(c *check.C) { testInfo(c, check.Not(check.IsNil), "Not(IsNil)", []string{"value"}) testCheck(c, check.Not(check.IsNil), false, "", nil) testCheck(c, check.Not(check.IsNil), true, "", "a") } type simpleStruct struct { i int } func (s *CheckersS) TestEquals(c *check.C) { testInfo(c, check.Equals, "Equals", []string{"obtained", "expected"}) // The simplest. testCheck(c, check.Equals, true, "", 42, 42) testCheck(c, check.Equals, false, "", 42, 43) // Different native types. testCheck(c, check.Equals, false, "", int32(42), int64(42)) // With nil. testCheck(c, check.Equals, false, "", 42, nil) // Slices testCheck(c, check.Equals, false, "runtime error: comparing uncomparable type []uint8", []byte{1, 2}, []byte{1, 2}) // Struct values testCheck(c, check.Equals, true, "", simpleStruct{1}, simpleStruct{1}) testCheck(c, check.Equals, false, "", simpleStruct{1}, simpleStruct{2}) // Struct pointers testCheck(c, check.Equals, false, "", &simpleStruct{1}, &simpleStruct{1}) testCheck(c, check.Equals, false, "", &simpleStruct{1}, &simpleStruct{2}) } func (s *CheckersS) TestDeepEquals(c *check.C) { testInfo(c, check.DeepEquals, "DeepEquals", []string{"obtained", "expected"}) // The simplest. testCheck(c, check.DeepEquals, true, "", 42, 42) testCheck(c, check.DeepEquals, false, "", 42, 43) // Different native types. testCheck(c, check.DeepEquals, false, "", int32(42), int64(42)) // With nil. testCheck(c, check.DeepEquals, false, "", 42, nil) // Slices testCheck(c, check.DeepEquals, true, "", []byte{1, 2}, []byte{1, 2}) testCheck(c, check.DeepEquals, false, "", []byte{1, 2}, []byte{1, 3}) // Struct values testCheck(c, check.DeepEquals, true, "", simpleStruct{1}, simpleStruct{1}) testCheck(c, check.DeepEquals, false, "", simpleStruct{1}, simpleStruct{2}) // Struct pointers testCheck(c, check.DeepEquals, true, "", &simpleStruct{1}, &simpleStruct{1}) testCheck(c, check.DeepEquals, false, "", &simpleStruct{1}, &simpleStruct{2}) } func (s *CheckersS) TestHasLen(c *check.C) { testInfo(c, check.HasLen, "HasLen", []string{"obtained", "n"}) testCheck(c, check.HasLen, true, "", "abcd", 4) testCheck(c, check.HasLen, true, "", []int{1, 2}, 2) testCheck(c, check.HasLen, false, "", []int{1, 2}, 3) testCheck(c, check.HasLen, false, "n must be an int", []int{1, 2}, "2") testCheck(c, check.HasLen, false, "obtained value type has no length", nil, 2) } func (s *CheckersS) TestErrorMatches(c *check.C) { testInfo(c, check.ErrorMatches, "ErrorMatches", []string{"value", "regex"}) testCheck(c, check.ErrorMatches, false, "Error value is nil", nil, "some error") testCheck(c, check.ErrorMatches, false, "Value is not an error", 1, "some error") testCheck(c, check.ErrorMatches, true, "", errors.New("some error"), "some error") testCheck(c, check.ErrorMatches, true, "", errors.New("some error"), "so.*or") // Verify params mutation params, names := testCheck(c, check.ErrorMatches, false, "", errors.New("some error"), "other error") c.Assert(params[0], check.Equals, "some error") c.Assert(names[0], check.Equals, "error") } func (s *CheckersS) TestMatches(c *check.C) { testInfo(c, check.Matches, "Matches", []string{"value", "regex"}) // Simple matching testCheck(c, check.Matches, true, "", "abc", "abc") testCheck(c, check.Matches, true, "", "abc", "a.c") // Must match fully testCheck(c, check.Matches, false, "", "abc", "ab") testCheck(c, check.Matches, false, "", "abc", "bc") // String()-enabled values accepted testCheck(c, check.Matches, true, "", reflect.ValueOf("abc"), "a.c") testCheck(c, check.Matches, false, "", reflect.ValueOf("abc"), "a.d") // Some error conditions. testCheck(c, check.Matches, false, "Obtained value is not a string and has no .String()", 1, "a.c") testCheck(c, check.Matches, false, "Can't compile regex: error parsing regexp: missing closing ]: `[c$`", "abc", "a[c") } func (s *CheckersS) TestPanics(c *check.C) { testInfo(c, check.Panics, "Panics", []string{"function", "expected"}) // Some errors. testCheck(c, check.Panics, false, "Function has not panicked", func() bool { return false }, "BOOM") testCheck(c, check.Panics, false, "Function must take zero arguments", 1, "BOOM") // Plain strings. testCheck(c, check.Panics, true, "", func() { panic("BOOM") }, "BOOM") testCheck(c, check.Panics, false, "", func() { panic("KABOOM") }, "BOOM") testCheck(c, check.Panics, true, "", func() bool { panic("BOOM") }, "BOOM") // Error values. testCheck(c, check.Panics, true, "", func() { panic(errors.New("BOOM")) }, errors.New("BOOM")) testCheck(c, check.Panics, false, "", func() { panic(errors.New("KABOOM")) }, errors.New("BOOM")) type deep struct{ i int } // Deep value testCheck(c, check.Panics, true, "", func() { panic(&deep{99}) }, &deep{99}) // Verify params/names mutation params, names := testCheck(c, check.Panics, false, "", func() { panic(errors.New("KABOOM")) }, errors.New("BOOM")) c.Assert(params[0], check.ErrorMatches, "KABOOM") c.Assert(names[0], check.Equals, "panic") // Verify a nil panic testCheck(c, check.Panics, true, "", func() { panic(nil) }, nil) testCheck(c, check.Panics, false, "", func() { panic(nil) }, "NOPE") } func (s *CheckersS) TestPanicMatches(c *check.C) { testInfo(c, check.PanicMatches, "PanicMatches", []string{"function", "expected"}) // Error matching. testCheck(c, check.PanicMatches, true, "", func() { panic(errors.New("BOOM")) }, "BO.M") testCheck(c, check.PanicMatches, false, "", func() { panic(errors.New("KABOOM")) }, "BO.M") // Some errors. testCheck(c, check.PanicMatches, false, "Function has not panicked", func() bool { return false }, "BOOM") testCheck(c, check.PanicMatches, false, "Function must take zero arguments", 1, "BOOM") // Plain strings. testCheck(c, check.PanicMatches, true, "", func() { panic("BOOM") }, "BO.M") testCheck(c, check.PanicMatches, false, "", func() { panic("KABOOM") }, "BOOM") testCheck(c, check.PanicMatches, true, "", func() bool { panic("BOOM") }, "BO.M") // Verify params/names mutation params, names := testCheck(c, check.PanicMatches, false, "", func() { panic(errors.New("KABOOM")) }, "BOOM") c.Assert(params[0], check.Equals, "KABOOM") c.Assert(names[0], check.Equals, "panic") // Verify a nil panic testCheck(c, check.PanicMatches, false, "Panic value is not a string or an error", func() { panic(nil) }, "") } func (s *CheckersS) TestFitsTypeOf(c *check.C) { testInfo(c, check.FitsTypeOf, "FitsTypeOf", []string{"obtained", "sample"}) // Basic types testCheck(c, check.FitsTypeOf, true, "", 1, 0) testCheck(c, check.FitsTypeOf, false, "", 1, int64(0)) // Aliases testCheck(c, check.FitsTypeOf, false, "", 1, errors.New("")) testCheck(c, check.FitsTypeOf, false, "", "error", errors.New("")) testCheck(c, check.FitsTypeOf, true, "", errors.New("error"), errors.New("")) // Structures testCheck(c, check.FitsTypeOf, false, "", 1, simpleStruct{}) testCheck(c, check.FitsTypeOf, false, "", simpleStruct{42}, &simpleStruct{}) testCheck(c, check.FitsTypeOf, true, "", simpleStruct{42}, simpleStruct{}) testCheck(c, check.FitsTypeOf, true, "", &simpleStruct{42}, &simpleStruct{}) // Some bad values testCheck(c, check.FitsTypeOf, false, "Invalid sample value", 1, interface{}(nil)) testCheck(c, check.FitsTypeOf, false, "", interface{}(nil), 0) } func (s *CheckersS) TestImplements(c *check.C) { testInfo(c, check.Implements, "Implements", []string{"obtained", "ifaceptr"}) var e error var re runtime.Error testCheck(c, check.Implements, true, "", errors.New(""), &e) testCheck(c, check.Implements, false, "", errors.New(""), &re) // Some bad values testCheck(c, check.Implements, false, "ifaceptr should be a pointer to an interface variable", 0, errors.New("")) testCheck(c, check.Implements, false, "ifaceptr should be a pointer to an interface variable", 0, interface{}(nil)) testCheck(c, check.Implements, false, "", interface{}(nil), &e) } charm-2.1.1/src/gopkg.in/check.v1/LICENSE0000664000175000017500000000254112672604604016516 0ustar marcomarcoGocheck - A rich testing framework for Go Copyright (c) 2010-2013 Gustavo Niemeyer All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. charm-2.1.1/src/gopkg.in/check.v1/check.go0000664000175000017500000005203112672604604017114 0ustar marcomarco// Package check is a rich testing extension for Go's testing package. // // For details about the project, see: // // http://labix.org/gocheck // package check import ( "bytes" "errors" "fmt" "io" "math/rand" "os" "path" "path/filepath" "reflect" "regexp" "runtime" "strconv" "strings" "sync" "sync/atomic" "time" ) // ----------------------------------------------------------------------- // Internal type which deals with suite method calling. const ( fixtureKd = iota testKd ) type funcKind int const ( succeededSt = iota failedSt skippedSt panickedSt fixturePanickedSt missedSt ) type funcStatus uint32 // A method value can't reach its own Method structure. type methodType struct { reflect.Value Info reflect.Method } func newMethod(receiver reflect.Value, i int) *methodType { return &methodType{receiver.Method(i), receiver.Type().Method(i)} } func (method *methodType) PC() uintptr { return method.Info.Func.Pointer() } func (method *methodType) suiteName() string { t := method.Info.Type.In(0) if t.Kind() == reflect.Ptr { t = t.Elem() } return t.Name() } func (method *methodType) String() string { return method.suiteName() + "." + method.Info.Name } func (method *methodType) matches(re *regexp.Regexp) bool { return (re.MatchString(method.Info.Name) || re.MatchString(method.suiteName()) || re.MatchString(method.String())) } type C struct { method *methodType kind funcKind testName string _status funcStatus logb *logger logw io.Writer done chan *C reason string mustFail bool tempDir *tempDir benchMem bool startTime time.Time timer } func (c *C) status() funcStatus { return funcStatus(atomic.LoadUint32((*uint32)(&c._status))) } func (c *C) setStatus(s funcStatus) { atomic.StoreUint32((*uint32)(&c._status), uint32(s)) } func (c *C) stopNow() { runtime.Goexit() } // logger is a concurrency safe byte.Buffer type logger struct { sync.Mutex writer bytes.Buffer } func (l *logger) Write(buf []byte) (int, error) { l.Lock() defer l.Unlock() return l.writer.Write(buf) } func (l *logger) WriteTo(w io.Writer) (int64, error) { l.Lock() defer l.Unlock() return l.writer.WriteTo(w) } func (l *logger) String() string { l.Lock() defer l.Unlock() return l.writer.String() } // ----------------------------------------------------------------------- // Handling of temporary files and directories. type tempDir struct { sync.Mutex path string counter int } func (td *tempDir) newPath() string { td.Lock() defer td.Unlock() if td.path == "" { var err error for i := 0; i != 100; i++ { path := fmt.Sprintf("%s%ccheck-%d", os.TempDir(), os.PathSeparator, rand.Int()) if err = os.Mkdir(path, 0700); err == nil { td.path = path break } } if td.path == "" { panic("Couldn't create temporary directory: " + err.Error()) } } result := filepath.Join(td.path, strconv.Itoa(td.counter)) td.counter += 1 return result } func (td *tempDir) removeAll() { td.Lock() defer td.Unlock() if td.path != "" { err := os.RemoveAll(td.path) if err != nil { fmt.Fprintf(os.Stderr, "WARNING: Error cleaning up temporaries: "+err.Error()) } } } // Create a new temporary directory which is automatically removed after // the suite finishes running. func (c *C) MkDir() string { path := c.tempDir.newPath() if err := os.Mkdir(path, 0700); err != nil { panic(fmt.Sprintf("Couldn't create temporary directory %s: %s", path, err.Error())) } return path } // ----------------------------------------------------------------------- // Low-level logging functions. func (c *C) log(args ...interface{}) { c.writeLog([]byte(fmt.Sprint(args...) + "\n")) } func (c *C) logf(format string, args ...interface{}) { c.writeLog([]byte(fmt.Sprintf(format+"\n", args...))) } func (c *C) logNewLine() { c.writeLog([]byte{'\n'}) } func (c *C) writeLog(buf []byte) { c.logb.Write(buf) if c.logw != nil { c.logw.Write(buf) } } func hasStringOrError(x interface{}) (ok bool) { _, ok = x.(fmt.Stringer) if ok { return } _, ok = x.(error) return } func (c *C) logValue(label string, value interface{}) { if label == "" { if hasStringOrError(value) { c.logf("... %#v (%q)", value, value) } else { c.logf("... %#v", value) } } else if value == nil { c.logf("... %s = nil", label) } else { if hasStringOrError(value) { fv := fmt.Sprintf("%#v", value) qv := fmt.Sprintf("%q", value) if fv != qv { c.logf("... %s %s = %s (%s)", label, reflect.TypeOf(value), fv, qv) return } } if s, ok := value.(string); ok && isMultiLine(s) { c.logf(`... %s %s = "" +`, label, reflect.TypeOf(value)) c.logMultiLine(s) } else { c.logf("... %s %s = %#v", label, reflect.TypeOf(value), value) } } } func (c *C) logMultiLine(s string) { b := make([]byte, 0, len(s)*2) i := 0 n := len(s) for i < n { j := i + 1 for j < n && s[j-1] != '\n' { j++ } b = append(b, "... "...) b = strconv.AppendQuote(b, s[i:j]) if j < n { b = append(b, " +"...) } b = append(b, '\n') i = j } c.writeLog(b) } func isMultiLine(s string) bool { for i := 0; i+1 < len(s); i++ { if s[i] == '\n' { return true } } return false } func (c *C) logString(issue string) { c.log("... ", issue) } func (c *C) logCaller(skip int) { // This is a bit heavier than it ought to be. skip += 1 // Our own frame. pc, callerFile, callerLine, ok := runtime.Caller(skip) if !ok { return } var testFile string var testLine int testFunc := runtime.FuncForPC(c.method.PC()) if runtime.FuncForPC(pc) != testFunc { for { skip += 1 if pc, file, line, ok := runtime.Caller(skip); ok { // Note that the test line may be different on // distinct calls for the same test. Showing // the "internal" line is helpful when debugging. if runtime.FuncForPC(pc) == testFunc { testFile, testLine = file, line break } } else { break } } } if testFile != "" && (testFile != callerFile || testLine != callerLine) { c.logCode(testFile, testLine) } c.logCode(callerFile, callerLine) } func (c *C) logCode(path string, line int) { c.logf("%s:%d:", nicePath(path), line) code, err := printLine(path, line) if code == "" { code = "..." // XXX Open the file and take the raw line. if err != nil { code += err.Error() } } c.log(indent(code, " ")) } var valueGo = filepath.Join("reflect", "value.go") var asmGo = filepath.Join("runtime", "asm_") func (c *C) logPanic(skip int, value interface{}) { skip++ // Our own frame. initialSkip := skip for ; ; skip++ { if pc, file, line, ok := runtime.Caller(skip); ok { if skip == initialSkip { c.logf("... Panic: %s (PC=0x%X)\n", value, pc) } name := niceFuncName(pc) path := nicePath(file) if strings.Contains(path, "/gopkg.in/check.v") { continue } if name == "Value.call" && strings.HasSuffix(path, valueGo) { continue } if (name == "call16" || name == "call32") && strings.Contains(path, asmGo) { continue } c.logf("%s:%d\n in %s", nicePath(file), line, name) } else { break } } } func (c *C) logSoftPanic(issue string) { c.log("... Panic: ", issue) } func (c *C) logArgPanic(method *methodType, expectedType string) { c.logf("... Panic: %s argument should be %s", niceFuncName(method.PC()), expectedType) } // ----------------------------------------------------------------------- // Some simple formatting helpers. var initWD, initWDErr = os.Getwd() func init() { if initWDErr == nil { initWD = strings.Replace(initWD, "\\", "/", -1) + "/" } } func nicePath(path string) string { if initWDErr == nil { if strings.HasPrefix(path, initWD) { return path[len(initWD):] } } return path } func niceFuncPath(pc uintptr) string { function := runtime.FuncForPC(pc) if function != nil { filename, line := function.FileLine(pc) return fmt.Sprintf("%s:%d", nicePath(filename), line) } return "" } func niceFuncName(pc uintptr) string { function := runtime.FuncForPC(pc) if function != nil { name := path.Base(function.Name()) if i := strings.Index(name, "."); i > 0 { name = name[i+1:] } if strings.HasPrefix(name, "(*") { if i := strings.Index(name, ")"); i > 0 { name = name[2:i] + name[i+1:] } } if i := strings.LastIndex(name, ".*"); i != -1 { name = name[:i] + "." + name[i+2:] } if i := strings.LastIndex(name, "·"); i != -1 { name = name[:i] + "." + name[i+2:] } return name } return "" } // ----------------------------------------------------------------------- // Result tracker to aggregate call results. type Result struct { Succeeded int Failed int Skipped int Panicked int FixturePanicked int ExpectedFailures int Missed int // Not even tried to run, related to a panic in the fixture. RunError error // Houston, we've got a problem. WorkDir string // If KeepWorkDir is true } type resultTracker struct { result Result _lastWasProblem bool _waiting int _missed int _expectChan chan *C _doneChan chan *C _stopChan chan bool } func newResultTracker() *resultTracker { return &resultTracker{_expectChan: make(chan *C), // Synchronous _doneChan: make(chan *C, 32), // Asynchronous _stopChan: make(chan bool)} // Synchronous } func (tracker *resultTracker) start() { go tracker._loopRoutine() } func (tracker *resultTracker) waitAndStop() { <-tracker._stopChan } func (tracker *resultTracker) expectCall(c *C) { tracker._expectChan <- c } func (tracker *resultTracker) callDone(c *C) { tracker._doneChan <- c } func (tracker *resultTracker) _loopRoutine() { for { var c *C if tracker._waiting > 0 { // Calls still running. Can't stop. select { // XXX Reindent this (not now to make diff clear) case c = <-tracker._expectChan: tracker._waiting += 1 case c = <-tracker._doneChan: tracker._waiting -= 1 switch c.status() { case succeededSt: if c.kind == testKd { if c.mustFail { tracker.result.ExpectedFailures++ } else { tracker.result.Succeeded++ } } case failedSt: tracker.result.Failed++ case panickedSt: if c.kind == fixtureKd { tracker.result.FixturePanicked++ } else { tracker.result.Panicked++ } case fixturePanickedSt: // Track it as missed, since the panic // was on the fixture, not on the test. tracker.result.Missed++ case missedSt: tracker.result.Missed++ case skippedSt: if c.kind == testKd { tracker.result.Skipped++ } } } } else { // No calls. Can stop, but no done calls here. select { case tracker._stopChan <- true: return case c = <-tracker._expectChan: tracker._waiting += 1 case c = <-tracker._doneChan: panic("Tracker got an unexpected done call.") } } } } // ----------------------------------------------------------------------- // The underlying suite runner. type suiteRunner struct { suite interface{} setUpSuite, tearDownSuite *methodType setUpTest, tearDownTest *methodType tests []*methodType tracker *resultTracker tempDir *tempDir keepDir bool output *outputWriter reportedProblemLast bool benchTime time.Duration benchMem bool } type RunConf struct { Output io.Writer Stream bool Verbose bool Filter string Benchmark bool BenchmarkTime time.Duration // Defaults to 1 second BenchmarkMem bool KeepWorkDir bool } // Create a new suiteRunner able to run all methods in the given suite. func newSuiteRunner(suite interface{}, runConf *RunConf) *suiteRunner { var conf RunConf if runConf != nil { conf = *runConf } if conf.Output == nil { conf.Output = os.Stdout } if conf.Benchmark { conf.Verbose = true } suiteType := reflect.TypeOf(suite) suiteNumMethods := suiteType.NumMethod() suiteValue := reflect.ValueOf(suite) runner := &suiteRunner{ suite: suite, output: newOutputWriter(conf.Output, conf.Stream, conf.Verbose), tracker: newResultTracker(), benchTime: conf.BenchmarkTime, benchMem: conf.BenchmarkMem, tempDir: &tempDir{}, keepDir: conf.KeepWorkDir, tests: make([]*methodType, 0, suiteNumMethods), } if runner.benchTime == 0 { runner.benchTime = 1 * time.Second } var filterRegexp *regexp.Regexp if conf.Filter != "" { if regexp, err := regexp.Compile(conf.Filter); err != nil { msg := "Bad filter expression: " + err.Error() runner.tracker.result.RunError = errors.New(msg) return runner } else { filterRegexp = regexp } } for i := 0; i != suiteNumMethods; i++ { method := newMethod(suiteValue, i) switch method.Info.Name { case "SetUpSuite": runner.setUpSuite = method case "TearDownSuite": runner.tearDownSuite = method case "SetUpTest": runner.setUpTest = method case "TearDownTest": runner.tearDownTest = method default: prefix := "Test" if conf.Benchmark { prefix = "Benchmark" } if !strings.HasPrefix(method.Info.Name, prefix) { continue } if filterRegexp == nil || method.matches(filterRegexp) { runner.tests = append(runner.tests, method) } } } return runner } // Run all methods in the given suite. func (runner *suiteRunner) run() *Result { if runner.tracker.result.RunError == nil && len(runner.tests) > 0 { runner.tracker.start() if runner.checkFixtureArgs() { c := runner.runFixture(runner.setUpSuite, "", nil) if c == nil || c.status() == succeededSt { for i := 0; i != len(runner.tests); i++ { c := runner.runTest(runner.tests[i]) if c.status() == fixturePanickedSt { runner.skipTests(missedSt, runner.tests[i+1:]) break } } } else if c != nil && c.status() == skippedSt { runner.skipTests(skippedSt, runner.tests) } else { runner.skipTests(missedSt, runner.tests) } runner.runFixture(runner.tearDownSuite, "", nil) } else { runner.skipTests(missedSt, runner.tests) } runner.tracker.waitAndStop() if runner.keepDir { runner.tracker.result.WorkDir = runner.tempDir.path } else { runner.tempDir.removeAll() } } return &runner.tracker.result } // Create a call object with the given suite method, and fork a // goroutine with the provided dispatcher for running it. func (runner *suiteRunner) forkCall(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C { var logw io.Writer if runner.output.Stream { logw = runner.output } if logb == nil { logb = new(logger) } c := &C{ method: method, kind: kind, testName: testName, logb: logb, logw: logw, tempDir: runner.tempDir, done: make(chan *C, 1), timer: timer{benchTime: runner.benchTime}, startTime: time.Now(), benchMem: runner.benchMem, } runner.tracker.expectCall(c) go (func() { runner.reportCallStarted(c) defer runner.callDone(c) dispatcher(c) })() return c } // Same as forkCall(), but wait for call to finish before returning. func (runner *suiteRunner) runFunc(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C { c := runner.forkCall(method, kind, testName, logb, dispatcher) <-c.done return c } // Handle a finished call. If there were any panics, update the call status // accordingly. Then, mark the call as done and report to the tracker. func (runner *suiteRunner) callDone(c *C) { value := recover() if value != nil { switch v := value.(type) { case *fixturePanic: if v.status == skippedSt { c.setStatus(skippedSt) } else { c.logSoftPanic("Fixture has panicked (see related PANIC)") c.setStatus(fixturePanickedSt) } default: c.logPanic(1, value) c.setStatus(panickedSt) } } if c.mustFail { switch c.status() { case failedSt: c.setStatus(succeededSt) case succeededSt: c.setStatus(failedSt) c.logString("Error: Test succeeded, but was expected to fail") c.logString("Reason: " + c.reason) } } runner.reportCallDone(c) c.done <- c } // Runs a fixture call synchronously. The fixture will still be run in a // goroutine like all suite methods, but this method will not return // while the fixture goroutine is not done, because the fixture must be // run in a desired order. func (runner *suiteRunner) runFixture(method *methodType, testName string, logb *logger) *C { if method != nil { c := runner.runFunc(method, fixtureKd, testName, logb, func(c *C) { c.ResetTimer() c.StartTimer() defer c.StopTimer() c.method.Call([]reflect.Value{reflect.ValueOf(c)}) }) return c } return nil } // Run the fixture method with runFixture(), but panic with a fixturePanic{} // in case the fixture method panics. This makes it easier to track the // fixture panic together with other call panics within forkTest(). func (runner *suiteRunner) runFixtureWithPanic(method *methodType, testName string, logb *logger, skipped *bool) *C { if skipped != nil && *skipped { return nil } c := runner.runFixture(method, testName, logb) if c != nil && c.status() != succeededSt { if skipped != nil { *skipped = c.status() == skippedSt } panic(&fixturePanic{c.status(), method}) } return c } type fixturePanic struct { status funcStatus method *methodType } // Run the suite test method, together with the test-specific fixture, // asynchronously. func (runner *suiteRunner) forkTest(method *methodType) *C { testName := method.String() return runner.forkCall(method, testKd, testName, nil, func(c *C) { var skipped bool defer runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, &skipped) defer c.StopTimer() benchN := 1 for { runner.runFixtureWithPanic(runner.setUpTest, testName, c.logb, &skipped) mt := c.method.Type() if mt.NumIn() != 1 || mt.In(0) != reflect.TypeOf(c) { // Rather than a plain panic, provide a more helpful message when // the argument type is incorrect. c.setStatus(panickedSt) c.logArgPanic(c.method, "*check.C") return } if strings.HasPrefix(c.method.Info.Name, "Test") { c.ResetTimer() c.StartTimer() c.method.Call([]reflect.Value{reflect.ValueOf(c)}) return } if !strings.HasPrefix(c.method.Info.Name, "Benchmark") { panic("unexpected method prefix: " + c.method.Info.Name) } runtime.GC() c.N = benchN c.ResetTimer() c.StartTimer() c.method.Call([]reflect.Value{reflect.ValueOf(c)}) c.StopTimer() if c.status() != succeededSt || c.duration >= c.benchTime || benchN >= 1e9 { return } perOpN := int(1e9) if c.nsPerOp() != 0 { perOpN = int(c.benchTime.Nanoseconds() / c.nsPerOp()) } // Logic taken from the stock testing package: // - Run more iterations than we think we'll need for a second (1.5x). // - Don't grow too fast in case we had timing errors previously. // - Be sure to run at least one more than last time. benchN = max(min(perOpN+perOpN/2, 100*benchN), benchN+1) benchN = roundUp(benchN) skipped = true // Don't run the deferred one if this panics. runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, nil) skipped = false } }) } // Same as forkTest(), but wait for the test to finish before returning. func (runner *suiteRunner) runTest(method *methodType) *C { c := runner.forkTest(method) <-c.done return c } // Helper to mark tests as skipped or missed. A bit heavy for what // it does, but it enables homogeneous handling of tracking, including // nice verbose output. func (runner *suiteRunner) skipTests(status funcStatus, methods []*methodType) { for _, method := range methods { runner.runFunc(method, testKd, "", nil, func(c *C) { c.setStatus(status) }) } } // Verify if the fixture arguments are *check.C. In case of errors, // log the error as a panic in the fixture method call, and return false. func (runner *suiteRunner) checkFixtureArgs() bool { succeeded := true argType := reflect.TypeOf(&C{}) for _, method := range []*methodType{runner.setUpSuite, runner.tearDownSuite, runner.setUpTest, runner.tearDownTest} { if method != nil { mt := method.Type() if mt.NumIn() != 1 || mt.In(0) != argType { succeeded = false runner.runFunc(method, fixtureKd, "", nil, func(c *C) { c.logArgPanic(method, "*check.C") c.setStatus(panickedSt) }) } } } return succeeded } func (runner *suiteRunner) reportCallStarted(c *C) { runner.output.WriteCallStarted("START", c) } func (runner *suiteRunner) reportCallDone(c *C) { runner.tracker.callDone(c) switch c.status() { case succeededSt: if c.mustFail { runner.output.WriteCallSuccess("FAIL EXPECTED", c) } else { runner.output.WriteCallSuccess("PASS", c) } case skippedSt: runner.output.WriteCallSuccess("SKIP", c) case failedSt: runner.output.WriteCallProblem("FAIL", c) case panickedSt: runner.output.WriteCallProblem("PANIC", c) case fixturePanickedSt: // That's a testKd call reporting that its fixture // has panicked. The fixture call which caused the // panic itself was tracked above. We'll report to // aid debugging. runner.output.WriteCallProblem("PANIC", c) case missedSt: runner.output.WriteCallSuccess("MISS", c) } } charm-2.1.1/src/gopkg.in/check.v1/run_test.go0000664000175000017500000003025712672604604017710 0ustar marcomarco// These tests verify the test running logic. package check_test import ( "errors" . "gopkg.in/check.v1" "os" "sync" ) var runnerS = Suite(&RunS{}) type RunS struct{} func (s *RunS) TestCountSuite(c *C) { suitesRun += 1 } // ----------------------------------------------------------------------- // Tests ensuring result counting works properly. func (s *RunS) TestSuccess(c *C) { output := String{} result := Run(&SuccessHelper{}, &RunConf{Output: &output}) c.Check(result.Succeeded, Equals, 1) c.Check(result.Failed, Equals, 0) c.Check(result.Skipped, Equals, 0) c.Check(result.Panicked, Equals, 0) c.Check(result.FixturePanicked, Equals, 0) c.Check(result.Missed, Equals, 0) c.Check(result.RunError, IsNil) } func (s *RunS) TestFailure(c *C) { output := String{} result := Run(&FailHelper{}, &RunConf{Output: &output}) c.Check(result.Succeeded, Equals, 0) c.Check(result.Failed, Equals, 1) c.Check(result.Skipped, Equals, 0) c.Check(result.Panicked, Equals, 0) c.Check(result.FixturePanicked, Equals, 0) c.Check(result.Missed, Equals, 0) c.Check(result.RunError, IsNil) } func (s *RunS) TestFixture(c *C) { output := String{} result := Run(&FixtureHelper{}, &RunConf{Output: &output}) c.Check(result.Succeeded, Equals, 2) c.Check(result.Failed, Equals, 0) c.Check(result.Skipped, Equals, 0) c.Check(result.Panicked, Equals, 0) c.Check(result.FixturePanicked, Equals, 0) c.Check(result.Missed, Equals, 0) c.Check(result.RunError, IsNil) } func (s *RunS) TestPanicOnTest(c *C) { output := String{} helper := &FixtureHelper{panicOn: "Test1"} result := Run(helper, &RunConf{Output: &output}) c.Check(result.Succeeded, Equals, 1) c.Check(result.Failed, Equals, 0) c.Check(result.Skipped, Equals, 0) c.Check(result.Panicked, Equals, 1) c.Check(result.FixturePanicked, Equals, 0) c.Check(result.Missed, Equals, 0) c.Check(result.RunError, IsNil) } func (s *RunS) TestPanicOnSetUpTest(c *C) { output := String{} helper := &FixtureHelper{panicOn: "SetUpTest"} result := Run(helper, &RunConf{Output: &output}) c.Check(result.Succeeded, Equals, 0) c.Check(result.Failed, Equals, 0) c.Check(result.Skipped, Equals, 0) c.Check(result.Panicked, Equals, 0) c.Check(result.FixturePanicked, Equals, 1) c.Check(result.Missed, Equals, 2) c.Check(result.RunError, IsNil) } func (s *RunS) TestPanicOnSetUpSuite(c *C) { output := String{} helper := &FixtureHelper{panicOn: "SetUpSuite"} result := Run(helper, &RunConf{Output: &output}) c.Check(result.Succeeded, Equals, 0) c.Check(result.Failed, Equals, 0) c.Check(result.Skipped, Equals, 0) c.Check(result.Panicked, Equals, 0) c.Check(result.FixturePanicked, Equals, 1) c.Check(result.Missed, Equals, 2) c.Check(result.RunError, IsNil) } // ----------------------------------------------------------------------- // Check result aggregation. func (s *RunS) TestAdd(c *C) { result := &Result{ Succeeded: 1, Skipped: 2, Failed: 3, Panicked: 4, FixturePanicked: 5, Missed: 6, ExpectedFailures: 7, } result.Add(&Result{ Succeeded: 10, Skipped: 20, Failed: 30, Panicked: 40, FixturePanicked: 50, Missed: 60, ExpectedFailures: 70, }) c.Check(result.Succeeded, Equals, 11) c.Check(result.Skipped, Equals, 22) c.Check(result.Failed, Equals, 33) c.Check(result.Panicked, Equals, 44) c.Check(result.FixturePanicked, Equals, 55) c.Check(result.Missed, Equals, 66) c.Check(result.ExpectedFailures, Equals, 77) c.Check(result.RunError, IsNil) } // ----------------------------------------------------------------------- // Check the Passed() method. func (s *RunS) TestPassed(c *C) { c.Assert((&Result{}).Passed(), Equals, true) c.Assert((&Result{Succeeded: 1}).Passed(), Equals, true) c.Assert((&Result{Skipped: 1}).Passed(), Equals, true) c.Assert((&Result{Failed: 1}).Passed(), Equals, false) c.Assert((&Result{Panicked: 1}).Passed(), Equals, false) c.Assert((&Result{FixturePanicked: 1}).Passed(), Equals, false) c.Assert((&Result{Missed: 1}).Passed(), Equals, false) c.Assert((&Result{RunError: errors.New("!")}).Passed(), Equals, false) } // ----------------------------------------------------------------------- // Check that result printing is working correctly. func (s *RunS) TestPrintSuccess(c *C) { result := &Result{Succeeded: 5} c.Check(result.String(), Equals, "OK: 5 passed") } func (s *RunS) TestPrintFailure(c *C) { result := &Result{Failed: 5} c.Check(result.String(), Equals, "OOPS: 0 passed, 5 FAILED") } func (s *RunS) TestPrintSkipped(c *C) { result := &Result{Skipped: 5} c.Check(result.String(), Equals, "OK: 0 passed, 5 skipped") } func (s *RunS) TestPrintExpectedFailures(c *C) { result := &Result{ExpectedFailures: 5} c.Check(result.String(), Equals, "OK: 0 passed, 5 expected failures") } func (s *RunS) TestPrintPanicked(c *C) { result := &Result{Panicked: 5} c.Check(result.String(), Equals, "OOPS: 0 passed, 5 PANICKED") } func (s *RunS) TestPrintFixturePanicked(c *C) { result := &Result{FixturePanicked: 5} c.Check(result.String(), Equals, "OOPS: 0 passed, 5 FIXTURE-PANICKED") } func (s *RunS) TestPrintMissed(c *C) { result := &Result{Missed: 5} c.Check(result.String(), Equals, "OOPS: 0 passed, 5 MISSED") } func (s *RunS) TestPrintAll(c *C) { result := &Result{Succeeded: 1, Skipped: 2, ExpectedFailures: 3, Panicked: 4, FixturePanicked: 5, Missed: 6} c.Check(result.String(), Equals, "OOPS: 1 passed, 2 skipped, 3 expected failures, 4 PANICKED, "+ "5 FIXTURE-PANICKED, 6 MISSED") } func (s *RunS) TestPrintRunError(c *C) { result := &Result{Succeeded: 1, Failed: 1, RunError: errors.New("Kaboom!")} c.Check(result.String(), Equals, "ERROR: Kaboom!") } // ----------------------------------------------------------------------- // Verify that the method pattern flag works correctly. func (s *RunS) TestFilterTestName(c *C) { helper := FixtureHelper{} output := String{} runConf := RunConf{Output: &output, Filter: "Test[91]"} Run(&helper, &runConf) c.Check(helper.calls[0], Equals, "SetUpSuite") c.Check(helper.calls[1], Equals, "SetUpTest") c.Check(helper.calls[2], Equals, "Test1") c.Check(helper.calls[3], Equals, "TearDownTest") c.Check(helper.calls[4], Equals, "TearDownSuite") c.Check(len(helper.calls), Equals, 5) } func (s *RunS) TestFilterTestNameWithAll(c *C) { helper := FixtureHelper{} output := String{} runConf := RunConf{Output: &output, Filter: ".*"} Run(&helper, &runConf) c.Check(helper.calls[0], Equals, "SetUpSuite") c.Check(helper.calls[1], Equals, "SetUpTest") c.Check(helper.calls[2], Equals, "Test1") c.Check(helper.calls[3], Equals, "TearDownTest") c.Check(helper.calls[4], Equals, "SetUpTest") c.Check(helper.calls[5], Equals, "Test2") c.Check(helper.calls[6], Equals, "TearDownTest") c.Check(helper.calls[7], Equals, "TearDownSuite") c.Check(len(helper.calls), Equals, 8) } func (s *RunS) TestFilterSuiteName(c *C) { helper := FixtureHelper{} output := String{} runConf := RunConf{Output: &output, Filter: "FixtureHelper"} Run(&helper, &runConf) c.Check(helper.calls[0], Equals, "SetUpSuite") c.Check(helper.calls[1], Equals, "SetUpTest") c.Check(helper.calls[2], Equals, "Test1") c.Check(helper.calls[3], Equals, "TearDownTest") c.Check(helper.calls[4], Equals, "SetUpTest") c.Check(helper.calls[5], Equals, "Test2") c.Check(helper.calls[6], Equals, "TearDownTest") c.Check(helper.calls[7], Equals, "TearDownSuite") c.Check(len(helper.calls), Equals, 8) } func (s *RunS) TestFilterSuiteNameAndTestName(c *C) { helper := FixtureHelper{} output := String{} runConf := RunConf{Output: &output, Filter: "FixtureHelper\\.Test2"} Run(&helper, &runConf) c.Check(helper.calls[0], Equals, "SetUpSuite") c.Check(helper.calls[1], Equals, "SetUpTest") c.Check(helper.calls[2], Equals, "Test2") c.Check(helper.calls[3], Equals, "TearDownTest") c.Check(helper.calls[4], Equals, "TearDownSuite") c.Check(len(helper.calls), Equals, 5) } func (s *RunS) TestFilterAllOut(c *C) { helper := FixtureHelper{} output := String{} runConf := RunConf{Output: &output, Filter: "NotFound"} Run(&helper, &runConf) c.Check(len(helper.calls), Equals, 0) } func (s *RunS) TestRequirePartialMatch(c *C) { helper := FixtureHelper{} output := String{} runConf := RunConf{Output: &output, Filter: "est"} Run(&helper, &runConf) c.Check(len(helper.calls), Equals, 8) } func (s *RunS) TestFilterError(c *C) { helper := FixtureHelper{} output := String{} runConf := RunConf{Output: &output, Filter: "]["} result := Run(&helper, &runConf) c.Check(result.String(), Equals, "ERROR: Bad filter expression: error parsing regexp: missing closing ]: `[`") c.Check(len(helper.calls), Equals, 0) } // ----------------------------------------------------------------------- // Verify that List works correctly. func (s *RunS) TestListFiltered(c *C) { names := List(&FixtureHelper{}, &RunConf{Filter: "1"}) c.Assert(names, DeepEquals, []string{ "FixtureHelper.Test1", }) } func (s *RunS) TestList(c *C) { names := List(&FixtureHelper{}, &RunConf{}) c.Assert(names, DeepEquals, []string{ "FixtureHelper.Test1", "FixtureHelper.Test2", }) } // ----------------------------------------------------------------------- // Verify that verbose mode prints tests which pass as well. func (s *RunS) TestVerboseMode(c *C) { helper := FixtureHelper{} output := String{} runConf := RunConf{Output: &output, Verbose: true} Run(&helper, &runConf) expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test1\t *[.0-9]+s\n" + "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t *[.0-9]+s\n" c.Assert(output.value, Matches, expected) } func (s *RunS) TestVerboseModeWithFailBeforePass(c *C) { helper := FixtureHelper{panicOn: "Test1"} output := String{} runConf := RunConf{Output: &output, Verbose: true} Run(&helper, &runConf) expected := "(?s).*PANIC.*\n-+\n" + // Should have an extra line. "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t *[.0-9]+s\n" c.Assert(output.value, Matches, expected) } // ----------------------------------------------------------------------- // Verify the stream output mode. In this mode there's no output caching. type StreamHelper struct { l2 sync.Mutex l3 sync.Mutex } func (s *StreamHelper) SetUpSuite(c *C) { c.Log("0") } func (s *StreamHelper) Test1(c *C) { c.Log("1") s.l2.Lock() s.l3.Lock() go func() { s.l2.Lock() // Wait for "2". c.Log("3") s.l3.Unlock() }() } func (s *StreamHelper) Test2(c *C) { c.Log("2") s.l2.Unlock() s.l3.Lock() // Wait for "3". c.Fail() c.Log("4") } func (s *RunS) TestStreamMode(c *C) { helper := &StreamHelper{} output := String{} runConf := RunConf{Output: &output, Stream: true} Run(helper, &runConf) expected := "START: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\n0\n" + "PASS: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\t *[.0-9]+s\n\n" + "START: run_test\\.go:[0-9]+: StreamHelper\\.Test1\n1\n" + "PASS: run_test\\.go:[0-9]+: StreamHelper\\.Test1\t *[.0-9]+s\n\n" + "START: run_test\\.go:[0-9]+: StreamHelper\\.Test2\n2\n3\n4\n" + "FAIL: run_test\\.go:[0-9]+: StreamHelper\\.Test2\n\n" c.Assert(output.value, Matches, expected) } type StreamMissHelper struct{} func (s *StreamMissHelper) SetUpSuite(c *C) { c.Log("0") c.Fail() } func (s *StreamMissHelper) Test1(c *C) { c.Log("1") } func (s *RunS) TestStreamModeWithMiss(c *C) { helper := &StreamMissHelper{} output := String{} runConf := RunConf{Output: &output, Stream: true} Run(helper, &runConf) expected := "START: run_test\\.go:[0-9]+: StreamMissHelper\\.SetUpSuite\n0\n" + "FAIL: run_test\\.go:[0-9]+: StreamMissHelper\\.SetUpSuite\n\n" + "START: run_test\\.go:[0-9]+: StreamMissHelper\\.Test1\n" + "MISS: run_test\\.go:[0-9]+: StreamMissHelper\\.Test1\n\n" c.Assert(output.value, Matches, expected) } // ----------------------------------------------------------------------- // Verify that that the keep work dir request indeed does so. type WorkDirSuite struct {} func (s *WorkDirSuite) Test(c *C) { c.MkDir() } func (s *RunS) TestKeepWorkDir(c *C) { output := String{} runConf := RunConf{Output: &output, Verbose: true, KeepWorkDir: true} result := Run(&WorkDirSuite{}, &runConf) c.Assert(result.String(), Matches, ".*\nWORK=" + result.WorkDir) stat, err := os.Stat(result.WorkDir) c.Assert(err, IsNil) c.Assert(stat.IsDir(), Equals, true) } charm-2.1.1/src/gopkg.in/check.v1/printer_test.go0000664000175000017500000000462612672604604020570 0ustar marcomarcopackage check_test import ( . "gopkg.in/check.v1" ) var _ = Suite(&PrinterS{}) type PrinterS struct{} func (s *PrinterS) TestCountSuite(c *C) { suitesRun += 1 } var printTestFuncLine int func init() { printTestFuncLine = getMyLine() + 3 } func printTestFunc() { println(1) // Comment1 if 2 == 2 { // Comment2 println(3) // Comment3 } switch 5 { case 6: println(6) // Comment6 println(7) } switch interface{}(9).(type) {// Comment9 case int: println(10) println(11) } select { case <-(chan bool)(nil): println(14) println(15) default: println(16) println(17) } println(19, 20) _ = func() { println(21) println(22) } println(24, func() { println(25) }) // Leading comment // with multiple lines. println(29) // Comment29 } var printLineTests = []struct { line int output string }{ {1, "println(1) // Comment1"}, {2, "if 2 == 2 { // Comment2\n ...\n}"}, {3, "println(3) // Comment3"}, {5, "switch 5 {\n...\n}"}, {6, "case 6:\n println(6) // Comment6\n ..."}, {7, "println(7)"}, {9, "switch interface{}(9).(type) { // Comment9\n...\n}"}, {10, "case int:\n println(10)\n ..."}, {14, "case <-(chan bool)(nil):\n println(14)\n ..."}, {15, "println(15)"}, {16, "default:\n println(16)\n ..."}, {17, "println(17)"}, {19, "println(19,\n 20)"}, {20, "println(19,\n 20)"}, {21, "_ = func() {\n println(21)\n println(22)\n}"}, {22, "println(22)"}, {24, "println(24, func() {\n println(25)\n})"}, {25, "println(25)"}, {26, "println(24, func() {\n println(25)\n})"}, {29, "// Leading comment\n// with multiple lines.\nprintln(29) // Comment29"}, } func (s *PrinterS) TestPrintLine(c *C) { for _, test := range printLineTests { output, err := PrintLine("printer_test.go", printTestFuncLine+test.line) c.Assert(err, IsNil) c.Assert(output, Equals, test.output) } } var indentTests = []struct { in, out string }{ {"", ""}, {"\n", "\n"}, {"a", ">>>a"}, {"a\n", ">>>a\n"}, {"a\nb", ">>>a\n>>>b"}, {" ", ">>> "}, } func (s *PrinterS) TestIndent(c *C) { for _, test := range indentTests { out := Indent(test.in, ">>>") c.Assert(out, Equals, test.out) } } charm-2.1.1/src/gopkg.in/check.v1/benchmark.go0000664000175000017500000001220212672604604017765 0ustar marcomarco// Copyright (c) 2012 The Go Authors. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package check import ( "fmt" "runtime" "time" ) var memStats runtime.MemStats // testingB is a type passed to Benchmark functions to manage benchmark // timing and to specify the number of iterations to run. type timer struct { start time.Time // Time test or benchmark started duration time.Duration N int bytes int64 timerOn bool benchTime time.Duration // The initial states of memStats.Mallocs and memStats.TotalAlloc. startAllocs uint64 startBytes uint64 // The net total of this test after being run. netAllocs uint64 netBytes uint64 } // StartTimer starts timing a test. This function is called automatically // before a benchmark starts, but it can also used to resume timing after // a call to StopTimer. func (c *C) StartTimer() { if !c.timerOn { c.start = time.Now() c.timerOn = true runtime.ReadMemStats(&memStats) c.startAllocs = memStats.Mallocs c.startBytes = memStats.TotalAlloc } } // StopTimer stops timing a test. This can be used to pause the timer // while performing complex initialization that you don't // want to measure. func (c *C) StopTimer() { if c.timerOn { c.duration += time.Now().Sub(c.start) c.timerOn = false runtime.ReadMemStats(&memStats) c.netAllocs += memStats.Mallocs - c.startAllocs c.netBytes += memStats.TotalAlloc - c.startBytes } } // ResetTimer sets the elapsed benchmark time to zero. // It does not affect whether the timer is running. func (c *C) ResetTimer() { if c.timerOn { c.start = time.Now() runtime.ReadMemStats(&memStats) c.startAllocs = memStats.Mallocs c.startBytes = memStats.TotalAlloc } c.duration = 0 c.netAllocs = 0 c.netBytes = 0 } // SetBytes informs the number of bytes that the benchmark processes // on each iteration. If this is called in a benchmark it will also // report MB/s. func (c *C) SetBytes(n int64) { c.bytes = n } func (c *C) nsPerOp() int64 { if c.N <= 0 { return 0 } return c.duration.Nanoseconds() / int64(c.N) } func (c *C) mbPerSec() float64 { if c.bytes <= 0 || c.duration <= 0 || c.N <= 0 { return 0 } return (float64(c.bytes) * float64(c.N) / 1e6) / c.duration.Seconds() } func (c *C) timerString() string { if c.N <= 0 { return fmt.Sprintf("%3.3fs", float64(c.duration.Nanoseconds())/1e9) } mbs := c.mbPerSec() mb := "" if mbs != 0 { mb = fmt.Sprintf("\t%7.2f MB/s", mbs) } nsop := c.nsPerOp() ns := fmt.Sprintf("%10d ns/op", nsop) if c.N > 0 && nsop < 100 { // The format specifiers here make sure that // the ones digits line up for all three possible formats. if nsop < 10 { ns = fmt.Sprintf("%13.2f ns/op", float64(c.duration.Nanoseconds())/float64(c.N)) } else { ns = fmt.Sprintf("%12.1f ns/op", float64(c.duration.Nanoseconds())/float64(c.N)) } } memStats := "" if c.benchMem { allocedBytes := fmt.Sprintf("%8d B/op", int64(c.netBytes)/int64(c.N)) allocs := fmt.Sprintf("%8d allocs/op", int64(c.netAllocs)/int64(c.N)) memStats = fmt.Sprintf("\t%s\t%s", allocedBytes, allocs) } return fmt.Sprintf("%8d\t%s%s%s", c.N, ns, mb, memStats) } func min(x, y int) int { if x > y { return y } return x } func max(x, y int) int { if x < y { return y } return x } // roundDown10 rounds a number down to the nearest power of 10. func roundDown10(n int) int { var tens = 0 // tens = floor(log_10(n)) for n > 10 { n = n / 10 tens++ } // result = 10^tens result := 1 for i := 0; i < tens; i++ { result *= 10 } return result } // roundUp rounds x up to a number of the form [1eX, 2eX, 5eX]. func roundUp(n int) int { base := roundDown10(n) if n < (2 * base) { return 2 * base } if n < (5 * base) { return 5 * base } return 10 * base } charm-2.1.1/src/gopkg.in/check.v1/fixture_test.go0000664000175000017500000003330512672604604020567 0ustar marcomarco// Tests for the behavior of the test fixture system. package check_test import ( . "gopkg.in/check.v1" ) // ----------------------------------------------------------------------- // Fixture test suite. type FixtureS struct{} var fixtureS = Suite(&FixtureS{}) func (s *FixtureS) TestCountSuite(c *C) { suitesRun += 1 } // ----------------------------------------------------------------------- // Basic fixture ordering verification. func (s *FixtureS) TestOrder(c *C) { helper := FixtureHelper{} Run(&helper, nil) c.Check(helper.calls[0], Equals, "SetUpSuite") c.Check(helper.calls[1], Equals, "SetUpTest") c.Check(helper.calls[2], Equals, "Test1") c.Check(helper.calls[3], Equals, "TearDownTest") c.Check(helper.calls[4], Equals, "SetUpTest") c.Check(helper.calls[5], Equals, "Test2") c.Check(helper.calls[6], Equals, "TearDownTest") c.Check(helper.calls[7], Equals, "TearDownSuite") c.Check(len(helper.calls), Equals, 8) } // ----------------------------------------------------------------------- // Check the behavior when panics occur within tests and fixtures. func (s *FixtureS) TestPanicOnTest(c *C) { helper := FixtureHelper{panicOn: "Test1"} output := String{} Run(&helper, &RunConf{Output: &output}) c.Check(helper.calls[0], Equals, "SetUpSuite") c.Check(helper.calls[1], Equals, "SetUpTest") c.Check(helper.calls[2], Equals, "Test1") c.Check(helper.calls[3], Equals, "TearDownTest") c.Check(helper.calls[4], Equals, "SetUpTest") c.Check(helper.calls[5], Equals, "Test2") c.Check(helper.calls[6], Equals, "TearDownTest") c.Check(helper.calls[7], Equals, "TearDownSuite") c.Check(len(helper.calls), Equals, 8) expected := "^\n-+\n" + "PANIC: check_test\\.go:[0-9]+: FixtureHelper.Test1\n\n" + "\\.\\.\\. Panic: Test1 \\(PC=[xA-F0-9]+\\)\n\n" + ".+:[0-9]+\n" + " in (go)?panic\n" + ".*check_test.go:[0-9]+\n" + " in FixtureHelper.trace\n" + ".*check_test.go:[0-9]+\n" + " in FixtureHelper.Test1\n" + "(.|\n)*$" c.Check(output.value, Matches, expected) } func (s *FixtureS) TestPanicOnSetUpTest(c *C) { helper := FixtureHelper{panicOn: "SetUpTest"} output := String{} Run(&helper, &RunConf{Output: &output}) c.Check(helper.calls[0], Equals, "SetUpSuite") c.Check(helper.calls[1], Equals, "SetUpTest") c.Check(helper.calls[2], Equals, "TearDownTest") c.Check(helper.calls[3], Equals, "TearDownSuite") c.Check(len(helper.calls), Equals, 4) expected := "^\n-+\n" + "PANIC: check_test\\.go:[0-9]+: " + "FixtureHelper\\.SetUpTest\n\n" + "\\.\\.\\. Panic: SetUpTest \\(PC=[xA-F0-9]+\\)\n\n" + ".+:[0-9]+\n" + " in (go)?panic\n" + ".*check_test.go:[0-9]+\n" + " in FixtureHelper.trace\n" + ".*check_test.go:[0-9]+\n" + " in FixtureHelper.SetUpTest\n" + "(.|\n)*" + "\n-+\n" + "PANIC: check_test\\.go:[0-9]+: " + "FixtureHelper\\.Test1\n\n" + "\\.\\.\\. Panic: Fixture has panicked " + "\\(see related PANIC\\)\n$" c.Check(output.value, Matches, expected) } func (s *FixtureS) TestPanicOnTearDownTest(c *C) { helper := FixtureHelper{panicOn: "TearDownTest"} output := String{} Run(&helper, &RunConf{Output: &output}) c.Check(helper.calls[0], Equals, "SetUpSuite") c.Check(helper.calls[1], Equals, "SetUpTest") c.Check(helper.calls[2], Equals, "Test1") c.Check(helper.calls[3], Equals, "TearDownTest") c.Check(helper.calls[4], Equals, "TearDownSuite") c.Check(len(helper.calls), Equals, 5) expected := "^\n-+\n" + "PANIC: check_test\\.go:[0-9]+: " + "FixtureHelper.TearDownTest\n\n" + "\\.\\.\\. Panic: TearDownTest \\(PC=[xA-F0-9]+\\)\n\n" + ".+:[0-9]+\n" + " in (go)?panic\n" + ".*check_test.go:[0-9]+\n" + " in FixtureHelper.trace\n" + ".*check_test.go:[0-9]+\n" + " in FixtureHelper.TearDownTest\n" + "(.|\n)*" + "\n-+\n" + "PANIC: check_test\\.go:[0-9]+: " + "FixtureHelper\\.Test1\n\n" + "\\.\\.\\. Panic: Fixture has panicked " + "\\(see related PANIC\\)\n$" c.Check(output.value, Matches, expected) } func (s *FixtureS) TestPanicOnSetUpSuite(c *C) { helper := FixtureHelper{panicOn: "SetUpSuite"} output := String{} Run(&helper, &RunConf{Output: &output}) c.Check(helper.calls[0], Equals, "SetUpSuite") c.Check(helper.calls[1], Equals, "TearDownSuite") c.Check(len(helper.calls), Equals, 2) expected := "^\n-+\n" + "PANIC: check_test\\.go:[0-9]+: " + "FixtureHelper.SetUpSuite\n\n" + "\\.\\.\\. Panic: SetUpSuite \\(PC=[xA-F0-9]+\\)\n\n" + ".+:[0-9]+\n" + " in (go)?panic\n" + ".*check_test.go:[0-9]+\n" + " in FixtureHelper.trace\n" + ".*check_test.go:[0-9]+\n" + " in FixtureHelper.SetUpSuite\n" + "(.|\n)*$" c.Check(output.value, Matches, expected) } func (s *FixtureS) TestPanicOnTearDownSuite(c *C) { helper := FixtureHelper{panicOn: "TearDownSuite"} output := String{} Run(&helper, &RunConf{Output: &output}) c.Check(helper.calls[0], Equals, "SetUpSuite") c.Check(helper.calls[1], Equals, "SetUpTest") c.Check(helper.calls[2], Equals, "Test1") c.Check(helper.calls[3], Equals, "TearDownTest") c.Check(helper.calls[4], Equals, "SetUpTest") c.Check(helper.calls[5], Equals, "Test2") c.Check(helper.calls[6], Equals, "TearDownTest") c.Check(helper.calls[7], Equals, "TearDownSuite") c.Check(len(helper.calls), Equals, 8) expected := "^\n-+\n" + "PANIC: check_test\\.go:[0-9]+: " + "FixtureHelper.TearDownSuite\n\n" + "\\.\\.\\. Panic: TearDownSuite \\(PC=[xA-F0-9]+\\)\n\n" + ".+:[0-9]+\n" + " in (go)?panic\n" + ".*check_test.go:[0-9]+\n" + " in FixtureHelper.trace\n" + ".*check_test.go:[0-9]+\n" + " in FixtureHelper.TearDownSuite\n" + "(.|\n)*$" c.Check(output.value, Matches, expected) } // ----------------------------------------------------------------------- // A wrong argument on a test or fixture will produce a nice error. func (s *FixtureS) TestPanicOnWrongTestArg(c *C) { helper := WrongTestArgHelper{} output := String{} Run(&helper, &RunConf{Output: &output}) c.Check(helper.calls[0], Equals, "SetUpSuite") c.Check(helper.calls[1], Equals, "SetUpTest") c.Check(helper.calls[2], Equals, "TearDownTest") c.Check(helper.calls[3], Equals, "SetUpTest") c.Check(helper.calls[4], Equals, "Test2") c.Check(helper.calls[5], Equals, "TearDownTest") c.Check(helper.calls[6], Equals, "TearDownSuite") c.Check(len(helper.calls), Equals, 7) expected := "^\n-+\n" + "PANIC: fixture_test\\.go:[0-9]+: " + "WrongTestArgHelper\\.Test1\n\n" + "\\.\\.\\. Panic: WrongTestArgHelper\\.Test1 argument " + "should be \\*check\\.C\n" c.Check(output.value, Matches, expected) } func (s *FixtureS) TestPanicOnWrongSetUpTestArg(c *C) { helper := WrongSetUpTestArgHelper{} output := String{} Run(&helper, &RunConf{Output: &output}) c.Check(len(helper.calls), Equals, 0) expected := "^\n-+\n" + "PANIC: fixture_test\\.go:[0-9]+: " + "WrongSetUpTestArgHelper\\.SetUpTest\n\n" + "\\.\\.\\. Panic: WrongSetUpTestArgHelper\\.SetUpTest argument " + "should be \\*check\\.C\n" c.Check(output.value, Matches, expected) } func (s *FixtureS) TestPanicOnWrongSetUpSuiteArg(c *C) { helper := WrongSetUpSuiteArgHelper{} output := String{} Run(&helper, &RunConf{Output: &output}) c.Check(len(helper.calls), Equals, 0) expected := "^\n-+\n" + "PANIC: fixture_test\\.go:[0-9]+: " + "WrongSetUpSuiteArgHelper\\.SetUpSuite\n\n" + "\\.\\.\\. Panic: WrongSetUpSuiteArgHelper\\.SetUpSuite argument " + "should be \\*check\\.C\n" c.Check(output.value, Matches, expected) } // ----------------------------------------------------------------------- // Nice errors also when tests or fixture have wrong arg count. func (s *FixtureS) TestPanicOnWrongTestArgCount(c *C) { helper := WrongTestArgCountHelper{} output := String{} Run(&helper, &RunConf{Output: &output}) c.Check(helper.calls[0], Equals, "SetUpSuite") c.Check(helper.calls[1], Equals, "SetUpTest") c.Check(helper.calls[2], Equals, "TearDownTest") c.Check(helper.calls[3], Equals, "SetUpTest") c.Check(helper.calls[4], Equals, "Test2") c.Check(helper.calls[5], Equals, "TearDownTest") c.Check(helper.calls[6], Equals, "TearDownSuite") c.Check(len(helper.calls), Equals, 7) expected := "^\n-+\n" + "PANIC: fixture_test\\.go:[0-9]+: " + "WrongTestArgCountHelper\\.Test1\n\n" + "\\.\\.\\. Panic: WrongTestArgCountHelper\\.Test1 argument " + "should be \\*check\\.C\n" c.Check(output.value, Matches, expected) } func (s *FixtureS) TestPanicOnWrongSetUpTestArgCount(c *C) { helper := WrongSetUpTestArgCountHelper{} output := String{} Run(&helper, &RunConf{Output: &output}) c.Check(len(helper.calls), Equals, 0) expected := "^\n-+\n" + "PANIC: fixture_test\\.go:[0-9]+: " + "WrongSetUpTestArgCountHelper\\.SetUpTest\n\n" + "\\.\\.\\. Panic: WrongSetUpTestArgCountHelper\\.SetUpTest argument " + "should be \\*check\\.C\n" c.Check(output.value, Matches, expected) } func (s *FixtureS) TestPanicOnWrongSetUpSuiteArgCount(c *C) { helper := WrongSetUpSuiteArgCountHelper{} output := String{} Run(&helper, &RunConf{Output: &output}) c.Check(len(helper.calls), Equals, 0) expected := "^\n-+\n" + "PANIC: fixture_test\\.go:[0-9]+: " + "WrongSetUpSuiteArgCountHelper\\.SetUpSuite\n\n" + "\\.\\.\\. Panic: WrongSetUpSuiteArgCountHelper" + "\\.SetUpSuite argument should be \\*check\\.C\n" c.Check(output.value, Matches, expected) } // ----------------------------------------------------------------------- // Helper test suites with wrong function arguments. type WrongTestArgHelper struct { FixtureHelper } func (s *WrongTestArgHelper) Test1(t int) { } type WrongSetUpTestArgHelper struct { FixtureHelper } func (s *WrongSetUpTestArgHelper) SetUpTest(t int) { } type WrongSetUpSuiteArgHelper struct { FixtureHelper } func (s *WrongSetUpSuiteArgHelper) SetUpSuite(t int) { } type WrongTestArgCountHelper struct { FixtureHelper } func (s *WrongTestArgCountHelper) Test1(c *C, i int) { } type WrongSetUpTestArgCountHelper struct { FixtureHelper } func (s *WrongSetUpTestArgCountHelper) SetUpTest(c *C, i int) { } type WrongSetUpSuiteArgCountHelper struct { FixtureHelper } func (s *WrongSetUpSuiteArgCountHelper) SetUpSuite(c *C, i int) { } // ----------------------------------------------------------------------- // Ensure fixture doesn't run without tests. type NoTestsHelper struct { hasRun bool } func (s *NoTestsHelper) SetUpSuite(c *C) { s.hasRun = true } func (s *NoTestsHelper) TearDownSuite(c *C) { s.hasRun = true } func (s *FixtureS) TestFixtureDoesntRunWithoutTests(c *C) { helper := NoTestsHelper{} output := String{} Run(&helper, &RunConf{Output: &output}) c.Check(helper.hasRun, Equals, false) } // ----------------------------------------------------------------------- // Verify that checks and assertions work correctly inside the fixture. type FixtureCheckHelper struct { fail string completed bool } func (s *FixtureCheckHelper) SetUpSuite(c *C) { switch s.fail { case "SetUpSuiteAssert": c.Assert(false, Equals, true) case "SetUpSuiteCheck": c.Check(false, Equals, true) } s.completed = true } func (s *FixtureCheckHelper) SetUpTest(c *C) { switch s.fail { case "SetUpTestAssert": c.Assert(false, Equals, true) case "SetUpTestCheck": c.Check(false, Equals, true) } s.completed = true } func (s *FixtureCheckHelper) Test(c *C) { // Do nothing. } func (s *FixtureS) TestSetUpSuiteCheck(c *C) { helper := FixtureCheckHelper{fail: "SetUpSuiteCheck"} output := String{} Run(&helper, &RunConf{Output: &output}) c.Assert(output.value, Matches, "\n---+\n"+ "FAIL: fixture_test\\.go:[0-9]+: "+ "FixtureCheckHelper\\.SetUpSuite\n\n"+ "fixture_test\\.go:[0-9]+:\n"+ " c\\.Check\\(false, Equals, true\\)\n"+ "\\.+ obtained bool = false\n"+ "\\.+ expected bool = true\n\n") c.Assert(helper.completed, Equals, true) } func (s *FixtureS) TestSetUpSuiteAssert(c *C) { helper := FixtureCheckHelper{fail: "SetUpSuiteAssert"} output := String{} Run(&helper, &RunConf{Output: &output}) c.Assert(output.value, Matches, "\n---+\n"+ "FAIL: fixture_test\\.go:[0-9]+: "+ "FixtureCheckHelper\\.SetUpSuite\n\n"+ "fixture_test\\.go:[0-9]+:\n"+ " c\\.Assert\\(false, Equals, true\\)\n"+ "\\.+ obtained bool = false\n"+ "\\.+ expected bool = true\n\n") c.Assert(helper.completed, Equals, false) } // ----------------------------------------------------------------------- // Verify that logging within SetUpTest() persists within the test log itself. type FixtureLogHelper struct { c *C } func (s *FixtureLogHelper) SetUpTest(c *C) { s.c = c c.Log("1") } func (s *FixtureLogHelper) Test(c *C) { c.Log("2") s.c.Log("3") c.Log("4") c.Fail() } func (s *FixtureLogHelper) TearDownTest(c *C) { s.c.Log("5") } func (s *FixtureS) TestFixtureLogging(c *C) { helper := FixtureLogHelper{} output := String{} Run(&helper, &RunConf{Output: &output}) c.Assert(output.value, Matches, "\n---+\n"+ "FAIL: fixture_test\\.go:[0-9]+: "+ "FixtureLogHelper\\.Test\n\n"+ "1\n2\n3\n4\n5\n") } // ----------------------------------------------------------------------- // Skip() within fixture methods. func (s *FixtureS) TestSkipSuite(c *C) { helper := FixtureHelper{skip: true, skipOnN: 0} output := String{} result := Run(&helper, &RunConf{Output: &output}) c.Assert(output.value, Equals, "") c.Assert(helper.calls[0], Equals, "SetUpSuite") c.Assert(helper.calls[1], Equals, "TearDownSuite") c.Assert(len(helper.calls), Equals, 2) c.Assert(result.Skipped, Equals, 2) } func (s *FixtureS) TestSkipTest(c *C) { helper := FixtureHelper{skip: true, skipOnN: 1} output := String{} result := Run(&helper, &RunConf{Output: &output}) c.Assert(helper.calls[0], Equals, "SetUpSuite") c.Assert(helper.calls[1], Equals, "SetUpTest") c.Assert(helper.calls[2], Equals, "SetUpTest") c.Assert(helper.calls[3], Equals, "Test2") c.Assert(helper.calls[4], Equals, "TearDownTest") c.Assert(helper.calls[5], Equals, "TearDownSuite") c.Assert(len(helper.calls), Equals, 6) c.Assert(result.Skipped, Equals, 1) } charm-2.1.1/src/gopkg.in/check.v1/helpers_test.go0000664000175000017500000004103412672604604020541 0ustar marcomarco// These tests verify the inner workings of the helper methods associated // with check.T. package check_test import ( "gopkg.in/check.v1" "os" "reflect" "runtime" "sync" ) var helpersS = check.Suite(&HelpersS{}) type HelpersS struct{} func (s *HelpersS) TestCountSuite(c *check.C) { suitesRun += 1 } // ----------------------------------------------------------------------- // Fake checker and bug info to verify the behavior of Assert() and Check(). type MyChecker struct { info *check.CheckerInfo params []interface{} names []string result bool error string } func (checker *MyChecker) Info() *check.CheckerInfo { if checker.info == nil { return &check.CheckerInfo{Name: "MyChecker", Params: []string{"myobtained", "myexpected"}} } return checker.info } func (checker *MyChecker) Check(params []interface{}, names []string) (bool, string) { rparams := checker.params rnames := checker.names checker.params = append([]interface{}{}, params...) checker.names = append([]string{}, names...) if rparams != nil { copy(params, rparams) } if rnames != nil { copy(names, rnames) } return checker.result, checker.error } type myCommentType string func (c myCommentType) CheckCommentString() string { return string(c) } func myComment(s string) myCommentType { return myCommentType(s) } // ----------------------------------------------------------------------- // Ensure a real checker actually works fine. func (s *HelpersS) TestCheckerInterface(c *check.C) { testHelperSuccess(c, "Check(1, Equals, 1)", true, func() interface{} { return c.Check(1, check.Equals, 1) }) } // ----------------------------------------------------------------------- // Tests for Check(), mostly the same as for Assert() following these. func (s *HelpersS) TestCheckSucceedWithExpected(c *check.C) { checker := &MyChecker{result: true} testHelperSuccess(c, "Check(1, checker, 2)", true, func() interface{} { return c.Check(1, checker, 2) }) if !reflect.DeepEqual(checker.params, []interface{}{1, 2}) { c.Fatalf("Bad params for check: %#v", checker.params) } } func (s *HelpersS) TestCheckSucceedWithoutExpected(c *check.C) { checker := &MyChecker{result: true, info: &check.CheckerInfo{Params: []string{"myvalue"}}} testHelperSuccess(c, "Check(1, checker)", true, func() interface{} { return c.Check(1, checker) }) if !reflect.DeepEqual(checker.params, []interface{}{1}) { c.Fatalf("Bad params for check: %#v", checker.params) } } func (s *HelpersS) TestCheckFailWithExpected(c *check.C) { checker := &MyChecker{result: false} log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + " return c\\.Check\\(1, checker, 2\\)\n" + "\\.+ myobtained int = 1\n" + "\\.+ myexpected int = 2\n\n" testHelperFailure(c, "Check(1, checker, 2)", false, false, log, func() interface{} { return c.Check(1, checker, 2) }) } func (s *HelpersS) TestCheckFailWithExpectedAndComment(c *check.C) { checker := &MyChecker{result: false} log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + " return c\\.Check\\(1, checker, 2, myComment\\(\"Hello world!\"\\)\\)\n" + "\\.+ myobtained int = 1\n" + "\\.+ myexpected int = 2\n" + "\\.+ Hello world!\n\n" testHelperFailure(c, "Check(1, checker, 2, msg)", false, false, log, func() interface{} { return c.Check(1, checker, 2, myComment("Hello world!")) }) } func (s *HelpersS) TestCheckFailWithExpectedAndStaticComment(c *check.C) { checker := &MyChecker{result: false} log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + " // Nice leading comment\\.\n" + " return c\\.Check\\(1, checker, 2\\) // Hello there\n" + "\\.+ myobtained int = 1\n" + "\\.+ myexpected int = 2\n\n" testHelperFailure(c, "Check(1, checker, 2, msg)", false, false, log, func() interface{} { // Nice leading comment. return c.Check(1, checker, 2) // Hello there }) } func (s *HelpersS) TestCheckFailWithoutExpected(c *check.C) { checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}} log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + " return c\\.Check\\(1, checker\\)\n" + "\\.+ myvalue int = 1\n\n" testHelperFailure(c, "Check(1, checker)", false, false, log, func() interface{} { return c.Check(1, checker) }) } func (s *HelpersS) TestCheckFailWithoutExpectedAndMessage(c *check.C) { checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}} log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + " return c\\.Check\\(1, checker, myComment\\(\"Hello world!\"\\)\\)\n" + "\\.+ myvalue int = 1\n" + "\\.+ Hello world!\n\n" testHelperFailure(c, "Check(1, checker, msg)", false, false, log, func() interface{} { return c.Check(1, checker, myComment("Hello world!")) }) } func (s *HelpersS) TestCheckWithMissingExpected(c *check.C) { checker := &MyChecker{result: true} log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + " return c\\.Check\\(1, checker\\)\n" + "\\.+ Check\\(myobtained, MyChecker, myexpected\\):\n" + "\\.+ Wrong number of parameters for MyChecker: " + "want 3, got 2\n\n" testHelperFailure(c, "Check(1, checker, !?)", false, false, log, func() interface{} { return c.Check(1, checker) }) } func (s *HelpersS) TestCheckWithTooManyExpected(c *check.C) { checker := &MyChecker{result: true} log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + " return c\\.Check\\(1, checker, 2, 3\\)\n" + "\\.+ Check\\(myobtained, MyChecker, myexpected\\):\n" + "\\.+ Wrong number of parameters for MyChecker: " + "want 3, got 4\n\n" testHelperFailure(c, "Check(1, checker, 2, 3)", false, false, log, func() interface{} { return c.Check(1, checker, 2, 3) }) } func (s *HelpersS) TestCheckWithError(c *check.C) { checker := &MyChecker{result: false, error: "Some not so cool data provided!"} log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + " return c\\.Check\\(1, checker, 2\\)\n" + "\\.+ myobtained int = 1\n" + "\\.+ myexpected int = 2\n" + "\\.+ Some not so cool data provided!\n\n" testHelperFailure(c, "Check(1, checker, 2)", false, false, log, func() interface{} { return c.Check(1, checker, 2) }) } func (s *HelpersS) TestCheckWithNilChecker(c *check.C) { log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + " return c\\.Check\\(1, nil\\)\n" + "\\.+ Check\\(obtained, nil!\\?, \\.\\.\\.\\):\n" + "\\.+ Oops\\.\\. you've provided a nil checker!\n\n" testHelperFailure(c, "Check(obtained, nil)", false, false, log, func() interface{} { return c.Check(1, nil) }) } func (s *HelpersS) TestCheckWithParamsAndNamesMutation(c *check.C) { checker := &MyChecker{result: false, params: []interface{}{3, 4}, names: []string{"newobtained", "newexpected"}} log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + " return c\\.Check\\(1, checker, 2\\)\n" + "\\.+ newobtained int = 3\n" + "\\.+ newexpected int = 4\n\n" testHelperFailure(c, "Check(1, checker, 2) with mutation", false, false, log, func() interface{} { return c.Check(1, checker, 2) }) } // ----------------------------------------------------------------------- // Tests for Assert(), mostly the same as for Check() above. func (s *HelpersS) TestAssertSucceedWithExpected(c *check.C) { checker := &MyChecker{result: true} testHelperSuccess(c, "Assert(1, checker, 2)", nil, func() interface{} { c.Assert(1, checker, 2) return nil }) if !reflect.DeepEqual(checker.params, []interface{}{1, 2}) { c.Fatalf("Bad params for check: %#v", checker.params) } } func (s *HelpersS) TestAssertSucceedWithoutExpected(c *check.C) { checker := &MyChecker{result: true, info: &check.CheckerInfo{Params: []string{"myvalue"}}} testHelperSuccess(c, "Assert(1, checker)", nil, func() interface{} { c.Assert(1, checker) return nil }) if !reflect.DeepEqual(checker.params, []interface{}{1}) { c.Fatalf("Bad params for check: %#v", checker.params) } } func (s *HelpersS) TestAssertFailWithExpected(c *check.C) { checker := &MyChecker{result: false} log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + " c\\.Assert\\(1, checker, 2\\)\n" + "\\.+ myobtained int = 1\n" + "\\.+ myexpected int = 2\n\n" testHelperFailure(c, "Assert(1, checker, 2)", nil, true, log, func() interface{} { c.Assert(1, checker, 2) return nil }) } func (s *HelpersS) TestAssertFailWithExpectedAndMessage(c *check.C) { checker := &MyChecker{result: false} log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + " c\\.Assert\\(1, checker, 2, myComment\\(\"Hello world!\"\\)\\)\n" + "\\.+ myobtained int = 1\n" + "\\.+ myexpected int = 2\n" + "\\.+ Hello world!\n\n" testHelperFailure(c, "Assert(1, checker, 2, msg)", nil, true, log, func() interface{} { c.Assert(1, checker, 2, myComment("Hello world!")) return nil }) } func (s *HelpersS) TestAssertFailWithoutExpected(c *check.C) { checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}} log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + " c\\.Assert\\(1, checker\\)\n" + "\\.+ myvalue int = 1\n\n" testHelperFailure(c, "Assert(1, checker)", nil, true, log, func() interface{} { c.Assert(1, checker) return nil }) } func (s *HelpersS) TestAssertFailWithoutExpectedAndMessage(c *check.C) { checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}} log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + " c\\.Assert\\(1, checker, myComment\\(\"Hello world!\"\\)\\)\n" + "\\.+ myvalue int = 1\n" + "\\.+ Hello world!\n\n" testHelperFailure(c, "Assert(1, checker, msg)", nil, true, log, func() interface{} { c.Assert(1, checker, myComment("Hello world!")) return nil }) } func (s *HelpersS) TestAssertWithMissingExpected(c *check.C) { checker := &MyChecker{result: true} log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + " c\\.Assert\\(1, checker\\)\n" + "\\.+ Assert\\(myobtained, MyChecker, myexpected\\):\n" + "\\.+ Wrong number of parameters for MyChecker: " + "want 3, got 2\n\n" testHelperFailure(c, "Assert(1, checker, !?)", nil, true, log, func() interface{} { c.Assert(1, checker) return nil }) } func (s *HelpersS) TestAssertWithError(c *check.C) { checker := &MyChecker{result: false, error: "Some not so cool data provided!"} log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + " c\\.Assert\\(1, checker, 2\\)\n" + "\\.+ myobtained int = 1\n" + "\\.+ myexpected int = 2\n" + "\\.+ Some not so cool data provided!\n\n" testHelperFailure(c, "Assert(1, checker, 2)", nil, true, log, func() interface{} { c.Assert(1, checker, 2) return nil }) } func (s *HelpersS) TestAssertWithNilChecker(c *check.C) { log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + " c\\.Assert\\(1, nil\\)\n" + "\\.+ Assert\\(obtained, nil!\\?, \\.\\.\\.\\):\n" + "\\.+ Oops\\.\\. you've provided a nil checker!\n\n" testHelperFailure(c, "Assert(obtained, nil)", nil, true, log, func() interface{} { c.Assert(1, nil) return nil }) } // ----------------------------------------------------------------------- // Ensure that values logged work properly in some interesting cases. func (s *HelpersS) TestValueLoggingWithArrays(c *check.C) { checker := &MyChecker{result: false} log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" + " return c\\.Check\\(\\[\\]byte{1, 2}, checker, \\[\\]byte{1, 3}\\)\n" + "\\.+ myobtained \\[\\]uint8 = \\[\\]byte{0x1, 0x2}\n" + "\\.+ myexpected \\[\\]uint8 = \\[\\]byte{0x1, 0x3}\n\n" testHelperFailure(c, "Check([]byte{1}, chk, []byte{3})", false, false, log, func() interface{} { return c.Check([]byte{1, 2}, checker, []byte{1, 3}) }) } func (s *HelpersS) TestValueLoggingWithMultiLine(c *check.C) { checker := &MyChecker{result: false} log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" + " return c\\.Check\\(\"a\\\\nb\\\\n\", checker, \"a\\\\nb\\\\nc\"\\)\n" + "\\.+ myobtained string = \"\" \\+\n" + "\\.+ \"a\\\\n\" \\+\n" + "\\.+ \"b\\\\n\"\n" + "\\.+ myexpected string = \"\" \\+\n" + "\\.+ \"a\\\\n\" \\+\n" + "\\.+ \"b\\\\n\" \\+\n" + "\\.+ \"c\"\n\n" testHelperFailure(c, `Check("a\nb\n", chk, "a\nb\nc")`, false, false, log, func() interface{} { return c.Check("a\nb\n", checker, "a\nb\nc") }) } func (s *HelpersS) TestValueLoggingWithMultiLineException(c *check.C) { // If the newline is at the end of the string, don't log as multi-line. checker := &MyChecker{result: false} log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" + " return c\\.Check\\(\"a b\\\\n\", checker, \"a\\\\nb\"\\)\n" + "\\.+ myobtained string = \"a b\\\\n\"\n" + "\\.+ myexpected string = \"\" \\+\n" + "\\.+ \"a\\\\n\" \\+\n" + "\\.+ \"b\"\n\n" testHelperFailure(c, `Check("a b\n", chk, "a\nb")`, false, false, log, func() interface{} { return c.Check("a b\n", checker, "a\nb") }) } // ----------------------------------------------------------------------- // MakeDir() tests. type MkDirHelper struct { path1 string path2 string isDir1 bool isDir2 bool isDir3 bool isDir4 bool } func (s *MkDirHelper) SetUpSuite(c *check.C) { s.path1 = c.MkDir() s.isDir1 = isDir(s.path1) } func (s *MkDirHelper) Test(c *check.C) { s.path2 = c.MkDir() s.isDir2 = isDir(s.path2) } func (s *MkDirHelper) TearDownSuite(c *check.C) { s.isDir3 = isDir(s.path1) s.isDir4 = isDir(s.path2) } func (s *HelpersS) TestMkDir(c *check.C) { helper := MkDirHelper{} output := String{} check.Run(&helper, &check.RunConf{Output: &output}) c.Assert(output.value, check.Equals, "") c.Check(helper.isDir1, check.Equals, true) c.Check(helper.isDir2, check.Equals, true) c.Check(helper.isDir3, check.Equals, true) c.Check(helper.isDir4, check.Equals, true) c.Check(helper.path1, check.Not(check.Equals), helper.path2) c.Check(isDir(helper.path1), check.Equals, false) c.Check(isDir(helper.path2), check.Equals, false) } func isDir(path string) bool { if stat, err := os.Stat(path); err == nil { return stat.IsDir() } return false } // Concurrent logging should not corrupt the underling buffer. // Use go test -race to detect the race in this test. func (s *HelpersS) TestConcurrentLogging(c *check.C) { defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU())) var start, stop sync.WaitGroup start.Add(1) for i, n := 0, runtime.NumCPU()*2; i < n; i++ { stop.Add(1) go func(i int) { start.Wait() for j := 0; j < 30; j++ { c.Logf("Worker %d: line %d", i, j) } stop.Done() }(i) } start.Done() stop.Wait() } // ----------------------------------------------------------------------- // Test the TestName function type TestNameHelper struct { name1 string name2 string name3 string name4 string name5 string } func (s *TestNameHelper) SetUpSuite(c *check.C) { s.name1 = c.TestName() } func (s *TestNameHelper) SetUpTest(c *check.C) { s.name2 = c.TestName() } func (s *TestNameHelper) Test(c *check.C) { s.name3 = c.TestName() } func (s *TestNameHelper) TearDownTest(c *check.C) { s.name4 = c.TestName() } func (s *TestNameHelper) TearDownSuite(c *check.C) { s.name5 = c.TestName() } func (s *HelpersS) TestTestName(c *check.C) { helper := TestNameHelper{} output := String{} check.Run(&helper, &check.RunConf{Output: &output}) c.Check(helper.name1, check.Equals, "") c.Check(helper.name2, check.Equals, "TestNameHelper.Test") c.Check(helper.name3, check.Equals, "TestNameHelper.Test") c.Check(helper.name4, check.Equals, "TestNameHelper.Test") c.Check(helper.name5, check.Equals, "") } // ----------------------------------------------------------------------- // A couple of helper functions to test helper functions. :-) func testHelperSuccess(c *check.C, name string, expectedResult interface{}, closure func() interface{}) { var result interface{} defer (func() { if err := recover(); err != nil { panic(err) } checkState(c, result, &expectedState{ name: name, result: expectedResult, failed: false, log: "", }) })() result = closure() } func testHelperFailure(c *check.C, name string, expectedResult interface{}, shouldStop bool, log string, closure func() interface{}) { var result interface{} defer (func() { if err := recover(); err != nil { panic(err) } checkState(c, result, &expectedState{ name: name, result: expectedResult, failed: true, log: log, }) })() result = closure() if shouldStop { c.Logf("%s didn't stop when it should", name) } } charm-2.1.1/src/gopkg.in/check.v1/README.md0000664000175000017500000000047612672604604016775 0ustar marcomarcoInstructions ============ Install the package with: go get gopkg.in/check.v1 Import it with: import "gopkg.in/check.v1" and use _check_ as the package name inside the code. For more details, visit the project page: * http://labix.org/gocheck and the API documentation: * https://gopkg.in/check.v1 charm-2.1.1/src/gopkg.in/check.v1/printer.go0000664000175000017500000000736512672604604017534 0ustar marcomarcopackage check import ( "bytes" "go/ast" "go/parser" "go/printer" "go/token" "os" ) func indent(s, with string) (r string) { eol := true for i := 0; i != len(s); i++ { c := s[i] switch { case eol && c == '\n' || c == '\r': case c == '\n' || c == '\r': eol = true case eol: eol = false s = s[:i] + with + s[i:] i += len(with) } } return s } func printLine(filename string, line int) (string, error) { fset := token.NewFileSet() file, err := os.Open(filename) if err != nil { return "", err } fnode, err := parser.ParseFile(fset, filename, file, parser.ParseComments) if err != nil { return "", err } config := &printer.Config{Mode: printer.UseSpaces, Tabwidth: 4} lp := &linePrinter{fset: fset, fnode: fnode, line: line, config: config} ast.Walk(lp, fnode) result := lp.output.Bytes() // Comments leave \n at the end. n := len(result) for n > 0 && result[n-1] == '\n' { n-- } return string(result[:n]), nil } type linePrinter struct { config *printer.Config fset *token.FileSet fnode *ast.File line int output bytes.Buffer stmt ast.Stmt } func (lp *linePrinter) emit() bool { if lp.stmt != nil { lp.trim(lp.stmt) lp.printWithComments(lp.stmt) lp.stmt = nil return true } return false } func (lp *linePrinter) printWithComments(n ast.Node) { nfirst := lp.fset.Position(n.Pos()).Line nlast := lp.fset.Position(n.End()).Line for _, g := range lp.fnode.Comments { cfirst := lp.fset.Position(g.Pos()).Line clast := lp.fset.Position(g.End()).Line if clast == nfirst-1 && lp.fset.Position(n.Pos()).Column == lp.fset.Position(g.Pos()).Column { for _, c := range g.List { lp.output.WriteString(c.Text) lp.output.WriteByte('\n') } } if cfirst >= nfirst && cfirst <= nlast && n.End() <= g.List[0].Slash { // The printer will not include the comment if it starts past // the node itself. Trick it into printing by overlapping the // slash with the end of the statement. g.List[0].Slash = n.End() - 1 } } node := &printer.CommentedNode{n, lp.fnode.Comments} lp.config.Fprint(&lp.output, lp.fset, node) } func (lp *linePrinter) Visit(n ast.Node) (w ast.Visitor) { if n == nil { if lp.output.Len() == 0 { lp.emit() } return nil } first := lp.fset.Position(n.Pos()).Line last := lp.fset.Position(n.End()).Line if first <= lp.line && last >= lp.line { // Print the innermost statement containing the line. if stmt, ok := n.(ast.Stmt); ok { if _, ok := n.(*ast.BlockStmt); !ok { lp.stmt = stmt } } if first == lp.line && lp.emit() { return nil } return lp } return nil } func (lp *linePrinter) trim(n ast.Node) bool { stmt, ok := n.(ast.Stmt) if !ok { return true } line := lp.fset.Position(n.Pos()).Line if line != lp.line { return false } switch stmt := stmt.(type) { case *ast.IfStmt: stmt.Body = lp.trimBlock(stmt.Body) case *ast.SwitchStmt: stmt.Body = lp.trimBlock(stmt.Body) case *ast.TypeSwitchStmt: stmt.Body = lp.trimBlock(stmt.Body) case *ast.CaseClause: stmt.Body = lp.trimList(stmt.Body) case *ast.CommClause: stmt.Body = lp.trimList(stmt.Body) case *ast.BlockStmt: stmt.List = lp.trimList(stmt.List) } return true } func (lp *linePrinter) trimBlock(stmt *ast.BlockStmt) *ast.BlockStmt { if !lp.trim(stmt) { return lp.emptyBlock(stmt) } stmt.Rbrace = stmt.Lbrace return stmt } func (lp *linePrinter) trimList(stmts []ast.Stmt) []ast.Stmt { for i := 0; i != len(stmts); i++ { if !lp.trim(stmts[i]) { stmts[i] = lp.emptyStmt(stmts[i]) break } } return stmts } func (lp *linePrinter) emptyStmt(n ast.Node) *ast.ExprStmt { return &ast.ExprStmt{&ast.Ellipsis{n.Pos(), nil}} } func (lp *linePrinter) emptyBlock(n ast.Node) *ast.BlockStmt { p := n.Pos() return &ast.BlockStmt{p, []ast.Stmt{lp.emptyStmt(n)}, p} } charm-2.1.1/src/gopkg.in/check.v1/foundation_test.go0000664000175000017500000002142112672604604021243 0ustar marcomarco// These tests check that the foundations of gocheck are working properly. // They already assume that fundamental failing is working already, though, // since this was tested in bootstrap_test.go. Even then, some care may // still have to be taken when using external functions, since they should // of course not rely on functionality tested here. package check_test import ( "fmt" "gopkg.in/check.v1" "log" "os" "regexp" "strings" ) // ----------------------------------------------------------------------- // Foundation test suite. type FoundationS struct{} var foundationS = check.Suite(&FoundationS{}) func (s *FoundationS) TestCountSuite(c *check.C) { suitesRun += 1 } func (s *FoundationS) TestErrorf(c *check.C) { // Do not use checkState() here. It depends on Errorf() working. expectedLog := fmt.Sprintf("foundation_test.go:%d:\n"+ " c.Errorf(\"Error %%v!\", \"message\")\n"+ "... Error: Error message!\n\n", getMyLine()+1) c.Errorf("Error %v!", "message") failed := c.Failed() c.Succeed() if log := c.GetTestLog(); log != expectedLog { c.Logf("Errorf() logged %#v rather than %#v", log, expectedLog) c.Fail() } if !failed { c.Logf("Errorf() didn't put the test in a failed state") c.Fail() } } func (s *FoundationS) TestError(c *check.C) { expectedLog := fmt.Sprintf("foundation_test.go:%d:\n"+ " c\\.Error\\(\"Error \", \"message!\"\\)\n"+ "\\.\\.\\. Error: Error message!\n\n", getMyLine()+1) c.Error("Error ", "message!") checkState(c, nil, &expectedState{ name: "Error(`Error `, `message!`)", failed: true, log: expectedLog, }) } func (s *FoundationS) TestFailNow(c *check.C) { defer (func() { if !c.Failed() { c.Error("FailNow() didn't fail the test") } else { c.Succeed() if c.GetTestLog() != "" { c.Error("Something got logged:\n" + c.GetTestLog()) } } })() c.FailNow() c.Log("FailNow() didn't stop the test") } func (s *FoundationS) TestSucceedNow(c *check.C) { defer (func() { if c.Failed() { c.Error("SucceedNow() didn't succeed the test") } if c.GetTestLog() != "" { c.Error("Something got logged:\n" + c.GetTestLog()) } })() c.Fail() c.SucceedNow() c.Log("SucceedNow() didn't stop the test") } func (s *FoundationS) TestFailureHeader(c *check.C) { output := String{} failHelper := FailHelper{} check.Run(&failHelper, &check.RunConf{Output: &output}) header := fmt.Sprintf(""+ "\n-----------------------------------"+ "-----------------------------------\n"+ "FAIL: check_test.go:%d: FailHelper.TestLogAndFail\n", failHelper.testLine) if strings.Index(output.value, header) == -1 { c.Errorf(""+ "Failure didn't print a proper header.\n"+ "... Got:\n%s... Expected something with:\n%s", output.value, header) } } func (s *FoundationS) TestFatal(c *check.C) { var line int defer (func() { if !c.Failed() { c.Error("Fatal() didn't fail the test") } else { c.Succeed() expected := fmt.Sprintf("foundation_test.go:%d:\n"+ " c.Fatal(\"Die \", \"now!\")\n"+ "... Error: Die now!\n\n", line) if c.GetTestLog() != expected { c.Error("Incorrect log:", c.GetTestLog()) } } })() line = getMyLine() + 1 c.Fatal("Die ", "now!") c.Log("Fatal() didn't stop the test") } func (s *FoundationS) TestFatalf(c *check.C) { var line int defer (func() { if !c.Failed() { c.Error("Fatalf() didn't fail the test") } else { c.Succeed() expected := fmt.Sprintf("foundation_test.go:%d:\n"+ " c.Fatalf(\"Die %%s!\", \"now\")\n"+ "... Error: Die now!\n\n", line) if c.GetTestLog() != expected { c.Error("Incorrect log:", c.GetTestLog()) } } })() line = getMyLine() + 1 c.Fatalf("Die %s!", "now") c.Log("Fatalf() didn't stop the test") } func (s *FoundationS) TestCallerLoggingInsideTest(c *check.C) { log := fmt.Sprintf(""+ "foundation_test.go:%d:\n"+ " result := c.Check\\(10, check.Equals, 20\\)\n"+ "\\.\\.\\. obtained int = 10\n"+ "\\.\\.\\. expected int = 20\n\n", getMyLine()+1) result := c.Check(10, check.Equals, 20) checkState(c, result, &expectedState{ name: "Check(10, Equals, 20)", result: false, failed: true, log: log, }) } func (s *FoundationS) TestCallerLoggingInDifferentFile(c *check.C) { result, line := checkEqualWrapper(c, 10, 20) testLine := getMyLine() - 1 log := fmt.Sprintf(""+ "foundation_test.go:%d:\n"+ " result, line := checkEqualWrapper\\(c, 10, 20\\)\n"+ "check_test.go:%d:\n"+ " return c.Check\\(obtained, check.Equals, expected\\), getMyLine\\(\\)\n"+ "\\.\\.\\. obtained int = 10\n"+ "\\.\\.\\. expected int = 20\n\n", testLine, line) checkState(c, result, &expectedState{ name: "Check(10, Equals, 20)", result: false, failed: true, log: log, }) } // ----------------------------------------------------------------------- // ExpectFailure() inverts the logic of failure. type ExpectFailureSucceedHelper struct{} func (s *ExpectFailureSucceedHelper) TestSucceed(c *check.C) { c.ExpectFailure("It booms!") c.Error("Boom!") } type ExpectFailureFailHelper struct{} func (s *ExpectFailureFailHelper) TestFail(c *check.C) { c.ExpectFailure("Bug #XYZ") } func (s *FoundationS) TestExpectFailureFail(c *check.C) { helper := ExpectFailureFailHelper{} output := String{} result := check.Run(&helper, &check.RunConf{Output: &output}) expected := "" + "^\n-+\n" + "FAIL: foundation_test\\.go:[0-9]+:" + " ExpectFailureFailHelper\\.TestFail\n\n" + "\\.\\.\\. Error: Test succeeded, but was expected to fail\n" + "\\.\\.\\. Reason: Bug #XYZ\n$" matched, err := regexp.MatchString(expected, output.value) if err != nil { c.Error("Bad expression: ", expected) } else if !matched { c.Error("ExpectFailure() didn't log properly:\n", output.value) } c.Assert(result.ExpectedFailures, check.Equals, 0) } func (s *FoundationS) TestExpectFailureSucceed(c *check.C) { helper := ExpectFailureSucceedHelper{} output := String{} result := check.Run(&helper, &check.RunConf{Output: &output}) c.Assert(output.value, check.Equals, "") c.Assert(result.ExpectedFailures, check.Equals, 1) } func (s *FoundationS) TestExpectFailureSucceedVerbose(c *check.C) { helper := ExpectFailureSucceedHelper{} output := String{} result := check.Run(&helper, &check.RunConf{Output: &output, Verbose: true}) expected := "" + "FAIL EXPECTED: foundation_test\\.go:[0-9]+:" + " ExpectFailureSucceedHelper\\.TestSucceed \\(It booms!\\)\t *[.0-9]+s\n" matched, err := regexp.MatchString(expected, output.value) if err != nil { c.Error("Bad expression: ", expected) } else if !matched { c.Error("ExpectFailure() didn't log properly:\n", output.value) } c.Assert(result.ExpectedFailures, check.Equals, 1) } // ----------------------------------------------------------------------- // Skip() allows stopping a test without positive/negative results. type SkipTestHelper struct{} func (s *SkipTestHelper) TestFail(c *check.C) { c.Skip("Wrong platform or whatever") c.Error("Boom!") } func (s *FoundationS) TestSkip(c *check.C) { helper := SkipTestHelper{} output := String{} check.Run(&helper, &check.RunConf{Output: &output}) if output.value != "" { c.Error("Skip() logged something:\n", output.value) } } func (s *FoundationS) TestSkipVerbose(c *check.C) { helper := SkipTestHelper{} output := String{} check.Run(&helper, &check.RunConf{Output: &output, Verbose: true}) expected := "SKIP: foundation_test\\.go:[0-9]+: SkipTestHelper\\.TestFail" + " \\(Wrong platform or whatever\\)" matched, err := regexp.MatchString(expected, output.value) if err != nil { c.Error("Bad expression: ", expected) } else if !matched { c.Error("Skip() didn't log properly:\n", output.value) } } // ----------------------------------------------------------------------- // Check minimum *log.Logger interface provided by *check.C. type minLogger interface { Output(calldepth int, s string) error } func (s *BootstrapS) TestMinLogger(c *check.C) { var logger minLogger logger = log.New(os.Stderr, "", 0) logger = c logger.Output(0, "Hello there") expected := `\[LOG\] [0-9]+:[0-9][0-9]\.[0-9][0-9][0-9] +Hello there\n` output := c.GetTestLog() c.Assert(output, check.Matches, expected) } // ----------------------------------------------------------------------- // Ensure that suites with embedded types are working fine, including the // the workaround for issue 906. type EmbeddedInternalS struct { called bool } type EmbeddedS struct { EmbeddedInternalS } var embeddedS = check.Suite(&EmbeddedS{}) func (s *EmbeddedS) TestCountSuite(c *check.C) { suitesRun += 1 } func (s *EmbeddedInternalS) TestMethod(c *check.C) { c.Error("TestMethod() of the embedded type was called!?") } func (s *EmbeddedS) TestMethod(c *check.C) { // http://code.google.com/p/go/issues/detail?id=906 c.Check(s.called, check.Equals, false) // Go issue 906 is affecting the runner? s.called = true } charm-2.1.1/src/gopkg.in/check.v1/run.go0000664000175000017500000001252612672604604016650 0ustar marcomarcopackage check import ( "bufio" "flag" "fmt" "os" "testing" "time" ) // ----------------------------------------------------------------------- // Test suite registry. var allSuites []interface{} // Suite registers the given value as a test suite to be run. Any methods // starting with the Test prefix in the given value will be considered as // a test method. func Suite(suite interface{}) interface{} { allSuites = append(allSuites, suite) return suite } // ----------------------------------------------------------------------- // Public running interface. var ( oldFilterFlag = flag.String("gocheck.f", "", "Regular expression selecting which tests and/or suites to run") oldVerboseFlag = flag.Bool("gocheck.v", false, "Verbose mode") oldStreamFlag = flag.Bool("gocheck.vv", false, "Super verbose mode (disables output caching)") oldBenchFlag = flag.Bool("gocheck.b", false, "Run benchmarks") oldBenchTime = flag.Duration("gocheck.btime", 1*time.Second, "approximate run time for each benchmark") oldListFlag = flag.Bool("gocheck.list", false, "List the names of all tests that will be run") oldWorkFlag = flag.Bool("gocheck.work", false, "Display and do not remove the test working directory") newFilterFlag = flag.String("check.f", "", "Regular expression selecting which tests and/or suites to run") newVerboseFlag = flag.Bool("check.v", false, "Verbose mode") newStreamFlag = flag.Bool("check.vv", false, "Super verbose mode (disables output caching)") newBenchFlag = flag.Bool("check.b", false, "Run benchmarks") newBenchTime = flag.Duration("check.btime", 1*time.Second, "approximate run time for each benchmark") newBenchMem = flag.Bool("check.bmem", false, "Report memory benchmarks") newListFlag = flag.Bool("check.list", false, "List the names of all tests that will be run") newWorkFlag = flag.Bool("check.work", false, "Display and do not remove the test working directory") ) // TestingT runs all test suites registered with the Suite function, // printing results to stdout, and reporting any failures back to // the "testing" package. func TestingT(testingT *testing.T) { benchTime := *newBenchTime if benchTime == 1*time.Second { benchTime = *oldBenchTime } conf := &RunConf{ Filter: *oldFilterFlag + *newFilterFlag, Verbose: *oldVerboseFlag || *newVerboseFlag, Stream: *oldStreamFlag || *newStreamFlag, Benchmark: *oldBenchFlag || *newBenchFlag, BenchmarkTime: benchTime, BenchmarkMem: *newBenchMem, KeepWorkDir: *oldWorkFlag || *newWorkFlag, } if *oldListFlag || *newListFlag { w := bufio.NewWriter(os.Stdout) for _, name := range ListAll(conf) { fmt.Fprintln(w, name) } w.Flush() return } result := RunAll(conf) println(result.String()) if !result.Passed() { testingT.Fail() } } // RunAll runs all test suites registered with the Suite function, using the // provided run configuration. func RunAll(runConf *RunConf) *Result { result := Result{} for _, suite := range allSuites { result.Add(Run(suite, runConf)) } return &result } // Run runs the provided test suite using the provided run configuration. func Run(suite interface{}, runConf *RunConf) *Result { runner := newSuiteRunner(suite, runConf) return runner.run() } // ListAll returns the names of all the test functions registered with the // Suite function that will be run with the provided run configuration. func ListAll(runConf *RunConf) []string { var names []string for _, suite := range allSuites { names = append(names, List(suite, runConf)...) } return names } // List returns the names of the test functions in the given // suite that will be run with the provided run configuration. func List(suite interface{}, runConf *RunConf) []string { var names []string runner := newSuiteRunner(suite, runConf) for _, t := range runner.tests { names = append(names, t.String()) } return names } // ----------------------------------------------------------------------- // Result methods. func (r *Result) Add(other *Result) { r.Succeeded += other.Succeeded r.Skipped += other.Skipped r.Failed += other.Failed r.Panicked += other.Panicked r.FixturePanicked += other.FixturePanicked r.ExpectedFailures += other.ExpectedFailures r.Missed += other.Missed if r.WorkDir != "" && other.WorkDir != "" { r.WorkDir += ":" + other.WorkDir } else if other.WorkDir != "" { r.WorkDir = other.WorkDir } } func (r *Result) Passed() bool { return (r.Failed == 0 && r.Panicked == 0 && r.FixturePanicked == 0 && r.Missed == 0 && r.RunError == nil) } func (r *Result) String() string { if r.RunError != nil { return "ERROR: " + r.RunError.Error() } var value string if r.Failed == 0 && r.Panicked == 0 && r.FixturePanicked == 0 && r.Missed == 0 { value = "OK: " } else { value = "OOPS: " } value += fmt.Sprintf("%d passed", r.Succeeded) if r.Skipped != 0 { value += fmt.Sprintf(", %d skipped", r.Skipped) } if r.ExpectedFailures != 0 { value += fmt.Sprintf(", %d expected failures", r.ExpectedFailures) } if r.Failed != 0 { value += fmt.Sprintf(", %d FAILED", r.Failed) } if r.Panicked != 0 { value += fmt.Sprintf(", %d PANICKED", r.Panicked) } if r.FixturePanicked != 0 { value += fmt.Sprintf(", %d FIXTURE-PANICKED", r.FixturePanicked) } if r.Missed != 0 { value += fmt.Sprintf(", %d MISSED", r.Missed) } if r.WorkDir != "" { value += "\nWORK=" + r.WorkDir } return value } charm-2.1.1/src/gopkg.in/check.v1/check_test.go0000664000175000017500000001206012672604604020151 0ustar marcomarco// This file contains just a few generic helpers which are used by the // other test files. package check_test import ( "flag" "fmt" "os" "regexp" "runtime" "testing" "time" "gopkg.in/check.v1" ) // We count the number of suites run at least to get a vague hint that the // test suite is behaving as it should. Otherwise a bug introduced at the // very core of the system could go unperceived. const suitesRunExpected = 8 var suitesRun int = 0 func Test(t *testing.T) { check.TestingT(t) if suitesRun != suitesRunExpected && flag.Lookup("check.f").Value.String() == "" { critical(fmt.Sprintf("Expected %d suites to run rather than %d", suitesRunExpected, suitesRun)) } } // ----------------------------------------------------------------------- // Helper functions. // Break down badly. This is used in test cases which can't yet assume // that the fundamental bits are working. func critical(error string) { fmt.Fprintln(os.Stderr, "CRITICAL: "+error) os.Exit(1) } // Return the file line where it's called. func getMyLine() int { if _, _, line, ok := runtime.Caller(1); ok { return line } return -1 } // ----------------------------------------------------------------------- // Helper type implementing a basic io.Writer for testing output. // Type implementing the io.Writer interface for analyzing output. type String struct { value string } // The only function required by the io.Writer interface. Will append // written data to the String.value string. func (s *String) Write(p []byte) (n int, err error) { s.value += string(p) return len(p), nil } // Trivial wrapper to test errors happening on a different file // than the test itself. func checkEqualWrapper(c *check.C, obtained, expected interface{}) (result bool, line int) { return c.Check(obtained, check.Equals, expected), getMyLine() } // ----------------------------------------------------------------------- // Helper suite for testing basic fail behavior. type FailHelper struct { testLine int } func (s *FailHelper) TestLogAndFail(c *check.C) { s.testLine = getMyLine() - 1 c.Log("Expected failure!") c.Fail() } // ----------------------------------------------------------------------- // Helper suite for testing basic success behavior. type SuccessHelper struct{} func (s *SuccessHelper) TestLogAndSucceed(c *check.C) { c.Log("Expected success!") } // ----------------------------------------------------------------------- // Helper suite for testing ordering and behavior of fixture. type FixtureHelper struct { calls []string panicOn string skip bool skipOnN int sleepOn string sleep time.Duration bytes int64 } func (s *FixtureHelper) trace(name string, c *check.C) { s.calls = append(s.calls, name) if name == s.panicOn { panic(name) } if s.sleep > 0 && s.sleepOn == name { time.Sleep(s.sleep) } if s.skip && s.skipOnN == len(s.calls)-1 { c.Skip("skipOnN == n") } } func (s *FixtureHelper) SetUpSuite(c *check.C) { s.trace("SetUpSuite", c) } func (s *FixtureHelper) TearDownSuite(c *check.C) { s.trace("TearDownSuite", c) } func (s *FixtureHelper) SetUpTest(c *check.C) { s.trace("SetUpTest", c) } func (s *FixtureHelper) TearDownTest(c *check.C) { s.trace("TearDownTest", c) } func (s *FixtureHelper) Test1(c *check.C) { s.trace("Test1", c) } func (s *FixtureHelper) Test2(c *check.C) { s.trace("Test2", c) } func (s *FixtureHelper) Benchmark1(c *check.C) { s.trace("Benchmark1", c) for i := 0; i < c.N; i++ { time.Sleep(s.sleep) } } func (s *FixtureHelper) Benchmark2(c *check.C) { s.trace("Benchmark2", c) c.SetBytes(1024) for i := 0; i < c.N; i++ { time.Sleep(s.sleep) } } func (s *FixtureHelper) Benchmark3(c *check.C) { var x []int64 s.trace("Benchmark3", c) for i := 0; i < c.N; i++ { time.Sleep(s.sleep) x = make([]int64, 5) _ = x } } // ----------------------------------------------------------------------- // Helper which checks the state of the test and ensures that it matches // the given expectations. Depends on c.Errorf() working, so shouldn't // be used to test this one function. type expectedState struct { name string result interface{} failed bool log string } // Verify the state of the test. Note that since this also verifies if // the test is supposed to be in a failed state, no other checks should // be done in addition to what is being tested. func checkState(c *check.C, result interface{}, expected *expectedState) { failed := c.Failed() c.Succeed() log := c.GetTestLog() matched, matchError := regexp.MatchString("^"+expected.log+"$", log) if matchError != nil { c.Errorf("Error in matching expression used in testing %s", expected.name) } else if !matched { c.Errorf("%s logged:\n----------\n%s----------\n\nExpected:\n----------\n%s\n----------", expected.name, log, expected.log) } if result != expected.result { c.Errorf("%s returned %#v rather than %#v", expected.name, result, expected.result) } if failed != expected.failed { if failed { c.Errorf("%s has failed when it shouldn't", expected.name) } else { c.Errorf("%s has not failed when it should", expected.name) } } } charm-2.1.1/src/gopkg.in/check.v1/reporter_test.go0000664000175000017500000001011112672604604020731 0ustar marcomarcopackage check_test import ( "fmt" "path/filepath" "runtime" . "gopkg.in/check.v1" ) var _ = Suite(&reporterS{}) type reporterS struct { testFile string } func (s *reporterS) SetUpSuite(c *C) { _, fileName, _, ok := runtime.Caller(0) c.Assert(ok, Equals, true) s.testFile = filepath.Base(fileName) } func (s *reporterS) TestWrite(c *C) { testString := "test string" output := String{} dummyStream := true dummyVerbose := true o := NewOutputWriter(&output, dummyStream, dummyVerbose) o.Write([]byte(testString)) c.Assert(output.value, Equals, testString) } func (s *reporterS) TestWriteCallStartedWithStreamFlag(c *C) { testLabel := "test started label" stream := true output := String{} dummyVerbose := true o := NewOutputWriter(&output, stream, dummyVerbose) o.WriteCallStarted(testLabel, c) expected := fmt.Sprintf("%s: %s:\\d+: %s\n", testLabel, s.testFile, c.TestName()) c.Assert(output.value, Matches, expected) } func (s *reporterS) TestWriteCallStartedWithoutStreamFlag(c *C) { stream := false output := String{} dummyLabel := "dummy" dummyVerbose := true o := NewOutputWriter(&output, stream, dummyVerbose) o.WriteCallStarted(dummyLabel, c) c.Assert(output.value, Equals, "") } func (s *reporterS) TestWriteCallProblemWithStreamFlag(c *C) { testLabel := "test problem label" stream := true output := String{} dummyVerbose := true o := NewOutputWriter(&output, stream, dummyVerbose) o.WriteCallProblem(testLabel, c) expected := fmt.Sprintf("%s: %s:\\d+: %s\n\n", testLabel, s.testFile, c.TestName()) c.Assert(output.value, Matches, expected) } func (s *reporterS) TestWriteCallProblemWithoutStreamFlag(c *C) { testLabel := "test problem label" stream := false output := String{} dummyVerbose := true o := NewOutputWriter(&output, stream, dummyVerbose) o.WriteCallProblem(testLabel, c) expected := fmt.Sprintf(""+ "\n"+ "----------------------------------------------------------------------\n"+ "%s: %s:\\d+: %s\n\n", testLabel, s.testFile, c.TestName()) c.Assert(output.value, Matches, expected) } func (s *reporterS) TestWriteCallProblemWithoutStreamFlagWithLog(c *C) { testLabel := "test problem label" testLog := "test log" stream := false output := String{} dummyVerbose := true o := NewOutputWriter(&output, stream, dummyVerbose) c.Log(testLog) o.WriteCallProblem(testLabel, c) expected := fmt.Sprintf(""+ "\n"+ "----------------------------------------------------------------------\n"+ "%s: %s:\\d+: %s\n\n%s\n", testLabel, s.testFile, c.TestName(), testLog) c.Assert(output.value, Matches, expected) } func (s *reporterS) TestWriteCallSuccessWithStreamFlag(c *C) { testLabel := "test success label" stream := true output := String{} dummyVerbose := true o := NewOutputWriter(&output, stream, dummyVerbose) o.WriteCallSuccess(testLabel, c) expected := fmt.Sprintf("%s: %s:\\d+: %s\t\\d\\.\\d+s\n\n", testLabel, s.testFile, c.TestName()) c.Assert(output.value, Matches, expected) } func (s *reporterS) TestWriteCallSuccessWithStreamFlagAndReason(c *C) { testLabel := "test success label" testReason := "test skip reason" stream := true output := String{} dummyVerbose := true o := NewOutputWriter(&output, stream, dummyVerbose) c.FakeSkip(testReason) o.WriteCallSuccess(testLabel, c) expected := fmt.Sprintf("%s: %s:\\d+: %s \\(%s\\)\t\\d\\.\\d+s\n\n", testLabel, s.testFile, c.TestName(), testReason) c.Assert(output.value, Matches, expected) } func (s *reporterS) TestWriteCallSuccessWithoutStreamFlagWithVerboseFlag(c *C) { testLabel := "test success label" stream := false verbose := true output := String{} o := NewOutputWriter(&output, stream, verbose) o.WriteCallSuccess(testLabel, c) expected := fmt.Sprintf("%s: %s:\\d+: %s\t\\d\\.\\d+s\n", testLabel, s.testFile, c.TestName()) c.Assert(output.value, Matches, expected) } func (s *reporterS) TestWriteCallSuccessWithoutStreamFlagWithoutVerboseFlag(c *C) { testLabel := "test success label" stream := false verbose := false output := String{} o := NewOutputWriter(&output, stream, verbose) o.WriteCallSuccess(testLabel, c) c.Assert(output.value, Equals, "") } charm-2.1.1/src/gopkg.in/check.v1/checkers.go0000664000175000017500000003033712672604604017633 0ustar marcomarcopackage check import ( "fmt" "reflect" "regexp" ) // ----------------------------------------------------------------------- // CommentInterface and Commentf helper, to attach extra information to checks. type comment struct { format string args []interface{} } // Commentf returns an infomational value to use with Assert or Check calls. // If the checker test fails, the provided arguments will be passed to // fmt.Sprintf, and will be presented next to the logged failure. // // For example: // // c.Assert(v, Equals, 42, Commentf("Iteration #%d failed.", i)) // // Note that if the comment is constant, a better option is to // simply use a normal comment right above or next to the line, as // it will also get printed with any errors: // // c.Assert(l, Equals, 8192) // Ensure buffer size is correct (bug #123) // func Commentf(format string, args ...interface{}) CommentInterface { return &comment{format, args} } // CommentInterface must be implemented by types that attach extra // information to failed checks. See the Commentf function for details. type CommentInterface interface { CheckCommentString() string } func (c *comment) CheckCommentString() string { return fmt.Sprintf(c.format, c.args...) } // ----------------------------------------------------------------------- // The Checker interface. // The Checker interface must be provided by checkers used with // the Assert and Check verification methods. type Checker interface { Info() *CheckerInfo Check(params []interface{}, names []string) (result bool, error string) } // See the Checker interface. type CheckerInfo struct { Name string Params []string } func (info *CheckerInfo) Info() *CheckerInfo { return info } // ----------------------------------------------------------------------- // Not checker logic inverter. // The Not checker inverts the logic of the provided checker. The // resulting checker will succeed where the original one failed, and // vice-versa. // // For example: // // c.Assert(a, Not(Equals), b) // func Not(checker Checker) Checker { return ¬Checker{checker} } type notChecker struct { sub Checker } func (checker *notChecker) Info() *CheckerInfo { info := *checker.sub.Info() info.Name = "Not(" + info.Name + ")" return &info } func (checker *notChecker) Check(params []interface{}, names []string) (result bool, error string) { result, error = checker.sub.Check(params, names) result = !result return } // ----------------------------------------------------------------------- // IsNil checker. type isNilChecker struct { *CheckerInfo } // The IsNil checker tests whether the obtained value is nil. // // For example: // // c.Assert(err, IsNil) // var IsNil Checker = &isNilChecker{ &CheckerInfo{Name: "IsNil", Params: []string{"value"}}, } func (checker *isNilChecker) Check(params []interface{}, names []string) (result bool, error string) { return isNil(params[0]), "" } func isNil(obtained interface{}) (result bool) { if obtained == nil { result = true } else { switch v := reflect.ValueOf(obtained); v.Kind() { case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: return v.IsNil() } } return } // ----------------------------------------------------------------------- // NotNil checker. Alias for Not(IsNil), since it's so common. type notNilChecker struct { *CheckerInfo } // The NotNil checker verifies that the obtained value is not nil. // // For example: // // c.Assert(iface, NotNil) // // This is an alias for Not(IsNil), made available since it's a // fairly common check. // var NotNil Checker = ¬NilChecker{ &CheckerInfo{Name: "NotNil", Params: []string{"value"}}, } func (checker *notNilChecker) Check(params []interface{}, names []string) (result bool, error string) { return !isNil(params[0]), "" } // ----------------------------------------------------------------------- // Equals checker. type equalsChecker struct { *CheckerInfo } // The Equals checker verifies that the obtained value is equal to // the expected value, according to usual Go semantics for ==. // // For example: // // c.Assert(value, Equals, 42) // var Equals Checker = &equalsChecker{ &CheckerInfo{Name: "Equals", Params: []string{"obtained", "expected"}}, } func (checker *equalsChecker) Check(params []interface{}, names []string) (result bool, error string) { defer func() { if v := recover(); v != nil { result = false error = fmt.Sprint(v) } }() return params[0] == params[1], "" } // ----------------------------------------------------------------------- // DeepEquals checker. type deepEqualsChecker struct { *CheckerInfo } // The DeepEquals checker verifies that the obtained value is deep-equal to // the expected value. The check will work correctly even when facing // slices, interfaces, and values of different types (which always fail // the test). // // For example: // // c.Assert(value, DeepEquals, 42) // c.Assert(array, DeepEquals, []string{"hi", "there"}) // var DeepEquals Checker = &deepEqualsChecker{ &CheckerInfo{Name: "DeepEquals", Params: []string{"obtained", "expected"}}, } func (checker *deepEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) { return reflect.DeepEqual(params[0], params[1]), "" } // ----------------------------------------------------------------------- // HasLen checker. type hasLenChecker struct { *CheckerInfo } // The HasLen checker verifies that the obtained value has the // provided length. In many cases this is superior to using Equals // in conjuction with the len function because in case the check // fails the value itself will be printed, instead of its length, // providing more details for figuring the problem. // // For example: // // c.Assert(list, HasLen, 5) // var HasLen Checker = &hasLenChecker{ &CheckerInfo{Name: "HasLen", Params: []string{"obtained", "n"}}, } func (checker *hasLenChecker) Check(params []interface{}, names []string) (result bool, error string) { n, ok := params[1].(int) if !ok { return false, "n must be an int" } value := reflect.ValueOf(params[0]) switch value.Kind() { case reflect.Map, reflect.Array, reflect.Slice, reflect.Chan, reflect.String: default: return false, "obtained value type has no length" } return value.Len() == n, "" } // ----------------------------------------------------------------------- // ErrorMatches checker. type errorMatchesChecker struct { *CheckerInfo } // The ErrorMatches checker verifies that the error value // is non nil and matches the regular expression provided. // // For example: // // c.Assert(err, ErrorMatches, "perm.*denied") // var ErrorMatches Checker = errorMatchesChecker{ &CheckerInfo{Name: "ErrorMatches", Params: []string{"value", "regex"}}, } func (checker errorMatchesChecker) Check(params []interface{}, names []string) (result bool, errStr string) { if params[0] == nil { return false, "Error value is nil" } err, ok := params[0].(error) if !ok { return false, "Value is not an error" } params[0] = err.Error() names[0] = "error" return matches(params[0], params[1]) } // ----------------------------------------------------------------------- // Matches checker. type matchesChecker struct { *CheckerInfo } // The Matches checker verifies that the string provided as the obtained // value (or the string resulting from obtained.String()) matches the // regular expression provided. // // For example: // // c.Assert(err, Matches, "perm.*denied") // var Matches Checker = &matchesChecker{ &CheckerInfo{Name: "Matches", Params: []string{"value", "regex"}}, } func (checker *matchesChecker) Check(params []interface{}, names []string) (result bool, error string) { return matches(params[0], params[1]) } func matches(value, regex interface{}) (result bool, error string) { reStr, ok := regex.(string) if !ok { return false, "Regex must be a string" } valueStr, valueIsStr := value.(string) if !valueIsStr { if valueWithStr, valueHasStr := value.(fmt.Stringer); valueHasStr { valueStr, valueIsStr = valueWithStr.String(), true } } if valueIsStr { matches, err := regexp.MatchString("^"+reStr+"$", valueStr) if err != nil { return false, "Can't compile regex: " + err.Error() } return matches, "" } return false, "Obtained value is not a string and has no .String()" } // ----------------------------------------------------------------------- // Panics checker. type panicsChecker struct { *CheckerInfo } // The Panics checker verifies that calling the provided zero-argument // function will cause a panic which is deep-equal to the provided value. // // For example: // // c.Assert(func() { f(1, 2) }, Panics, &SomeErrorType{"BOOM"}). // // var Panics Checker = &panicsChecker{ &CheckerInfo{Name: "Panics", Params: []string{"function", "expected"}}, } func (checker *panicsChecker) Check(params []interface{}, names []string) (result bool, error string) { f := reflect.ValueOf(params[0]) if f.Kind() != reflect.Func || f.Type().NumIn() != 0 { return false, "Function must take zero arguments" } defer func() { // If the function has not panicked, then don't do the check. if error != "" { return } params[0] = recover() names[0] = "panic" result = reflect.DeepEqual(params[0], params[1]) }() f.Call(nil) return false, "Function has not panicked" } type panicMatchesChecker struct { *CheckerInfo } // The PanicMatches checker verifies that calling the provided zero-argument // function will cause a panic with an error value matching // the regular expression provided. // // For example: // // c.Assert(func() { f(1, 2) }, PanicMatches, `open.*: no such file or directory`). // // var PanicMatches Checker = &panicMatchesChecker{ &CheckerInfo{Name: "PanicMatches", Params: []string{"function", "expected"}}, } func (checker *panicMatchesChecker) Check(params []interface{}, names []string) (result bool, errmsg string) { f := reflect.ValueOf(params[0]) if f.Kind() != reflect.Func || f.Type().NumIn() != 0 { return false, "Function must take zero arguments" } defer func() { // If the function has not panicked, then don't do the check. if errmsg != "" { return } obtained := recover() names[0] = "panic" if e, ok := obtained.(error); ok { params[0] = e.Error() } else if _, ok := obtained.(string); ok { params[0] = obtained } else { errmsg = "Panic value is not a string or an error" return } result, errmsg = matches(params[0], params[1]) }() f.Call(nil) return false, "Function has not panicked" } // ----------------------------------------------------------------------- // FitsTypeOf checker. type fitsTypeChecker struct { *CheckerInfo } // The FitsTypeOf checker verifies that the obtained value is // assignable to a variable with the same type as the provided // sample value. // // For example: // // c.Assert(value, FitsTypeOf, int64(0)) // c.Assert(value, FitsTypeOf, os.Error(nil)) // var FitsTypeOf Checker = &fitsTypeChecker{ &CheckerInfo{Name: "FitsTypeOf", Params: []string{"obtained", "sample"}}, } func (checker *fitsTypeChecker) Check(params []interface{}, names []string) (result bool, error string) { obtained := reflect.ValueOf(params[0]) sample := reflect.ValueOf(params[1]) if !obtained.IsValid() { return false, "" } if !sample.IsValid() { return false, "Invalid sample value" } return obtained.Type().AssignableTo(sample.Type()), "" } // ----------------------------------------------------------------------- // Implements checker. type implementsChecker struct { *CheckerInfo } // The Implements checker verifies that the obtained value // implements the interface specified via a pointer to an interface // variable. // // For example: // // var e os.Error // c.Assert(err, Implements, &e) // var Implements Checker = &implementsChecker{ &CheckerInfo{Name: "Implements", Params: []string{"obtained", "ifaceptr"}}, } func (checker *implementsChecker) Check(params []interface{}, names []string) (result bool, error string) { obtained := reflect.ValueOf(params[0]) ifaceptr := reflect.ValueOf(params[1]) if !obtained.IsValid() { return false, "" } if !ifaceptr.IsValid() || ifaceptr.Kind() != reflect.Ptr || ifaceptr.Elem().Kind() != reflect.Interface { return false, "ifaceptr should be a pointer to an interface variable" } return obtained.Type().Implements(ifaceptr.Elem().Type()), "" } charm-2.1.1/src/gopkg.in/yaml.v2/0000775000175000017500000000000012672604537015402 5ustar marcomarcocharm-2.1.1/src/gopkg.in/yaml.v2/decode.go0000664000175000017500000003614012672604537017160 0ustar marcomarcopackage yaml import ( "encoding" "encoding/base64" "fmt" "math" "reflect" "strconv" "time" ) const ( documentNode = 1 << iota mappingNode sequenceNode scalarNode aliasNode ) type node struct { kind int line, column int tag string value string implicit bool children []*node anchors map[string]*node } // ---------------------------------------------------------------------------- // Parser, produces a node tree out of a libyaml event stream. type parser struct { parser yaml_parser_t event yaml_event_t doc *node } func newParser(b []byte) *parser { p := parser{} if !yaml_parser_initialize(&p.parser) { panic("failed to initialize YAML emitter") } if len(b) == 0 { b = []byte{'\n'} } yaml_parser_set_input_string(&p.parser, b) p.skip() if p.event.typ != yaml_STREAM_START_EVENT { panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) } p.skip() return &p } func (p *parser) destroy() { if p.event.typ != yaml_NO_EVENT { yaml_event_delete(&p.event) } yaml_parser_delete(&p.parser) } func (p *parser) skip() { if p.event.typ != yaml_NO_EVENT { if p.event.typ == yaml_STREAM_END_EVENT { failf("attempted to go past the end of stream; corrupted value?") } yaml_event_delete(&p.event) } if !yaml_parser_parse(&p.parser, &p.event) { p.fail() } } func (p *parser) fail() { var where string var line int if p.parser.problem_mark.line != 0 { line = p.parser.problem_mark.line } else if p.parser.context_mark.line != 0 { line = p.parser.context_mark.line } if line != 0 { where = "line " + strconv.Itoa(line) + ": " } var msg string if len(p.parser.problem) > 0 { msg = p.parser.problem } else { msg = "unknown problem parsing YAML content" } failf("%s%s", where, msg) } func (p *parser) anchor(n *node, anchor []byte) { if anchor != nil { p.doc.anchors[string(anchor)] = n } } func (p *parser) parse() *node { switch p.event.typ { case yaml_SCALAR_EVENT: return p.scalar() case yaml_ALIAS_EVENT: return p.alias() case yaml_MAPPING_START_EVENT: return p.mapping() case yaml_SEQUENCE_START_EVENT: return p.sequence() case yaml_DOCUMENT_START_EVENT: return p.document() case yaml_STREAM_END_EVENT: // Happens when attempting to decode an empty buffer. return nil default: panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) } panic("unreachable") } func (p *parser) node(kind int) *node { return &node{ kind: kind, line: p.event.start_mark.line, column: p.event.start_mark.column, } } func (p *parser) document() *node { n := p.node(documentNode) n.anchors = make(map[string]*node) p.doc = n p.skip() n.children = append(n.children, p.parse()) if p.event.typ != yaml_DOCUMENT_END_EVENT { panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) } p.skip() return n } func (p *parser) alias() *node { n := p.node(aliasNode) n.value = string(p.event.anchor) p.skip() return n } func (p *parser) scalar() *node { n := p.node(scalarNode) n.value = string(p.event.value) n.tag = string(p.event.tag) n.implicit = p.event.implicit p.anchor(n, p.event.anchor) p.skip() return n } func (p *parser) sequence() *node { n := p.node(sequenceNode) p.anchor(n, p.event.anchor) p.skip() for p.event.typ != yaml_SEQUENCE_END_EVENT { n.children = append(n.children, p.parse()) } p.skip() return n } func (p *parser) mapping() *node { n := p.node(mappingNode) p.anchor(n, p.event.anchor) p.skip() for p.event.typ != yaml_MAPPING_END_EVENT { n.children = append(n.children, p.parse(), p.parse()) } p.skip() return n } // ---------------------------------------------------------------------------- // Decoder, unmarshals a node into a provided value. type decoder struct { doc *node aliases map[string]bool mapType reflect.Type terrors []string } var ( mapItemType = reflect.TypeOf(MapItem{}) durationType = reflect.TypeOf(time.Duration(0)) defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) ifaceType = defaultMapType.Elem() ) func newDecoder() *decoder { d := &decoder{mapType: defaultMapType} d.aliases = make(map[string]bool) return d } func (d *decoder) terror(n *node, tag string, out reflect.Value) { if n.tag != "" { tag = n.tag } value := n.value if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { if len(value) > 10 { value = " `" + value[:7] + "...`" } else { value = " `" + value + "`" } } d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) } func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { terrlen := len(d.terrors) err := u.UnmarshalYAML(func(v interface{}) (err error) { defer handleErr(&err) d.unmarshal(n, reflect.ValueOf(v)) if len(d.terrors) > terrlen { issues := d.terrors[terrlen:] d.terrors = d.terrors[:terrlen] return &TypeError{issues} } return nil }) if e, ok := err.(*TypeError); ok { d.terrors = append(d.terrors, e.Errors...) return false } if err != nil { fail(err) } return true } // d.prepare initializes and dereferences pointers and calls UnmarshalYAML // if a value is found to implement it. // It returns the initialized and dereferenced out value, whether // unmarshalling was already done by UnmarshalYAML, and if so whether // its types unmarshalled appropriately. // // If n holds a null value, prepare returns before doing anything. func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") { return out, false, false } again := true for again { again = false if out.Kind() == reflect.Ptr { if out.IsNil() { out.Set(reflect.New(out.Type().Elem())) } out = out.Elem() again = true } if out.CanAddr() { if u, ok := out.Addr().Interface().(Unmarshaler); ok { good = d.callUnmarshaler(n, u) return out, true, good } } } return out, false, false } func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { switch n.kind { case documentNode: return d.document(n, out) case aliasNode: return d.alias(n, out) } out, unmarshaled, good := d.prepare(n, out) if unmarshaled { return good } switch n.kind { case scalarNode: good = d.scalar(n, out) case mappingNode: good = d.mapping(n, out) case sequenceNode: good = d.sequence(n, out) default: panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) } return good } func (d *decoder) document(n *node, out reflect.Value) (good bool) { if len(n.children) == 1 { d.doc = n d.unmarshal(n.children[0], out) return true } return false } func (d *decoder) alias(n *node, out reflect.Value) (good bool) { an, ok := d.doc.anchors[n.value] if !ok { failf("unknown anchor '%s' referenced", n.value) } if d.aliases[n.value] { failf("anchor '%s' value contains itself", n.value) } d.aliases[n.value] = true good = d.unmarshal(an, out) delete(d.aliases, n.value) return good } var zeroValue reflect.Value func resetMap(out reflect.Value) { for _, k := range out.MapKeys() { out.SetMapIndex(k, zeroValue) } } func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { var tag string var resolved interface{} if n.tag == "" && !n.implicit { tag = yaml_STR_TAG resolved = n.value } else { tag, resolved = resolve(n.tag, n.value) if tag == yaml_BINARY_TAG { data, err := base64.StdEncoding.DecodeString(resolved.(string)) if err != nil { failf("!!binary value contains invalid base64 data") } resolved = string(data) } } if resolved == nil { if out.Kind() == reflect.Map && !out.CanAddr() { resetMap(out) } else { out.Set(reflect.Zero(out.Type())) } return true } if s, ok := resolved.(string); ok && out.CanAddr() { if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { err := u.UnmarshalText([]byte(s)) if err != nil { fail(err) } return true } } switch out.Kind() { case reflect.String: if tag == yaml_BINARY_TAG { out.SetString(resolved.(string)) good = true } else if resolved != nil { out.SetString(n.value) good = true } case reflect.Interface: if resolved == nil { out.Set(reflect.Zero(out.Type())) } else { out.Set(reflect.ValueOf(resolved)) } good = true case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: switch resolved := resolved.(type) { case int: if !out.OverflowInt(int64(resolved)) { out.SetInt(int64(resolved)) good = true } case int64: if !out.OverflowInt(resolved) { out.SetInt(resolved) good = true } case uint64: if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { out.SetInt(int64(resolved)) good = true } case float64: if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { out.SetInt(int64(resolved)) good = true } case string: if out.Type() == durationType { d, err := time.ParseDuration(resolved) if err == nil { out.SetInt(int64(d)) good = true } } } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: switch resolved := resolved.(type) { case int: if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { out.SetUint(uint64(resolved)) good = true } case int64: if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { out.SetUint(uint64(resolved)) good = true } case uint64: if !out.OverflowUint(uint64(resolved)) { out.SetUint(uint64(resolved)) good = true } case float64: if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { out.SetUint(uint64(resolved)) good = true } } case reflect.Bool: switch resolved := resolved.(type) { case bool: out.SetBool(resolved) good = true } case reflect.Float32, reflect.Float64: switch resolved := resolved.(type) { case int: out.SetFloat(float64(resolved)) good = true case int64: out.SetFloat(float64(resolved)) good = true case uint64: out.SetFloat(float64(resolved)) good = true case float64: out.SetFloat(resolved) good = true } case reflect.Ptr: if out.Type().Elem() == reflect.TypeOf(resolved) { // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? elem := reflect.New(out.Type().Elem()) elem.Elem().Set(reflect.ValueOf(resolved)) out.Set(elem) good = true } } if !good { d.terror(n, tag, out) } return good } func settableValueOf(i interface{}) reflect.Value { v := reflect.ValueOf(i) sv := reflect.New(v.Type()).Elem() sv.Set(v) return sv } func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { l := len(n.children) var iface reflect.Value switch out.Kind() { case reflect.Slice: out.Set(reflect.MakeSlice(out.Type(), l, l)) case reflect.Interface: // No type hints. Will have to use a generic sequence. iface = out out = settableValueOf(make([]interface{}, l)) default: d.terror(n, yaml_SEQ_TAG, out) return false } et := out.Type().Elem() j := 0 for i := 0; i < l; i++ { e := reflect.New(et).Elem() if ok := d.unmarshal(n.children[i], e); ok { out.Index(j).Set(e) j++ } } out.Set(out.Slice(0, j)) if iface.IsValid() { iface.Set(out) } return true } func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { switch out.Kind() { case reflect.Struct: return d.mappingStruct(n, out) case reflect.Slice: return d.mappingSlice(n, out) case reflect.Map: // okay case reflect.Interface: if d.mapType.Kind() == reflect.Map { iface := out out = reflect.MakeMap(d.mapType) iface.Set(out) } else { slicev := reflect.New(d.mapType).Elem() if !d.mappingSlice(n, slicev) { return false } out.Set(slicev) return true } default: d.terror(n, yaml_MAP_TAG, out) return false } outt := out.Type() kt := outt.Key() et := outt.Elem() mapType := d.mapType if outt.Key() == ifaceType && outt.Elem() == ifaceType { d.mapType = outt } if out.IsNil() { out.Set(reflect.MakeMap(outt)) } l := len(n.children) for i := 0; i < l; i += 2 { if isMerge(n.children[i]) { d.merge(n.children[i+1], out) continue } k := reflect.New(kt).Elem() if d.unmarshal(n.children[i], k) { kkind := k.Kind() if kkind == reflect.Interface { kkind = k.Elem().Kind() } if kkind == reflect.Map || kkind == reflect.Slice { failf("invalid map key: %#v", k.Interface()) } e := reflect.New(et).Elem() if d.unmarshal(n.children[i+1], e) { out.SetMapIndex(k, e) } } } d.mapType = mapType return true } func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { outt := out.Type() if outt.Elem() != mapItemType { d.terror(n, yaml_MAP_TAG, out) return false } mapType := d.mapType d.mapType = outt var slice []MapItem var l = len(n.children) for i := 0; i < l; i += 2 { if isMerge(n.children[i]) { d.merge(n.children[i+1], out) continue } item := MapItem{} k := reflect.ValueOf(&item.Key).Elem() if d.unmarshal(n.children[i], k) { v := reflect.ValueOf(&item.Value).Elem() if d.unmarshal(n.children[i+1], v) { slice = append(slice, item) } } } out.Set(reflect.ValueOf(slice)) d.mapType = mapType return true } func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { sinfo, err := getStructInfo(out.Type()) if err != nil { panic(err) } name := settableValueOf("") l := len(n.children) var inlineMap reflect.Value var elemType reflect.Type if sinfo.InlineMap != -1 { inlineMap = out.Field(sinfo.InlineMap) inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) elemType = inlineMap.Type().Elem() } for i := 0; i < l; i += 2 { ni := n.children[i] if isMerge(ni) { d.merge(n.children[i+1], out) continue } if !d.unmarshal(ni, name) { continue } if info, ok := sinfo.FieldsMap[name.String()]; ok { var field reflect.Value if info.Inline == nil { field = out.Field(info.Num) } else { field = out.FieldByIndex(info.Inline) } d.unmarshal(n.children[i+1], field) } else if sinfo.InlineMap != -1 { if inlineMap.IsNil() { inlineMap.Set(reflect.MakeMap(inlineMap.Type())) } value := reflect.New(elemType).Elem() d.unmarshal(n.children[i+1], value) inlineMap.SetMapIndex(name, value) } } return true } func failWantMap() { failf("map merge requires map or sequence of maps as the value") } func (d *decoder) merge(n *node, out reflect.Value) { switch n.kind { case mappingNode: d.unmarshal(n, out) case aliasNode: an, ok := d.doc.anchors[n.value] if ok && an.kind != mappingNode { failWantMap() } d.unmarshal(n, out) case sequenceNode: // Step backwards as earlier nodes take precedence. for i := len(n.children) - 1; i >= 0; i-- { ni := n.children[i] if ni.kind == aliasNode { an, ok := d.doc.anchors[ni.value] if ok && an.kind != mappingNode { failWantMap() } } else if ni.kind != mappingNode { failWantMap() } d.unmarshal(ni, out) } default: failWantMap() } } func isMerge(n *node) bool { return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) } charm-2.1.1/src/gopkg.in/yaml.v2/emitterc.go0000664000175000017500000013037412672604537017555 0ustar marcomarcopackage yaml import ( "bytes" ) // Flush the buffer if needed. func flush(emitter *yaml_emitter_t) bool { if emitter.buffer_pos+5 >= len(emitter.buffer) { return yaml_emitter_flush(emitter) } return true } // Put a character to the output buffer. func put(emitter *yaml_emitter_t, value byte) bool { if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { return false } emitter.buffer[emitter.buffer_pos] = value emitter.buffer_pos++ emitter.column++ return true } // Put a line break to the output buffer. func put_break(emitter *yaml_emitter_t) bool { if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { return false } switch emitter.line_break { case yaml_CR_BREAK: emitter.buffer[emitter.buffer_pos] = '\r' emitter.buffer_pos += 1 case yaml_LN_BREAK: emitter.buffer[emitter.buffer_pos] = '\n' emitter.buffer_pos += 1 case yaml_CRLN_BREAK: emitter.buffer[emitter.buffer_pos+0] = '\r' emitter.buffer[emitter.buffer_pos+1] = '\n' emitter.buffer_pos += 2 default: panic("unknown line break setting") } emitter.column = 0 emitter.line++ return true } // Copy a character from a string into buffer. func write(emitter *yaml_emitter_t, s []byte, i *int) bool { if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { return false } p := emitter.buffer_pos w := width(s[*i]) switch w { case 4: emitter.buffer[p+3] = s[*i+3] fallthrough case 3: emitter.buffer[p+2] = s[*i+2] fallthrough case 2: emitter.buffer[p+1] = s[*i+1] fallthrough case 1: emitter.buffer[p+0] = s[*i+0] default: panic("unknown character width") } emitter.column++ emitter.buffer_pos += w *i += w return true } // Write a whole string into buffer. func write_all(emitter *yaml_emitter_t, s []byte) bool { for i := 0; i < len(s); { if !write(emitter, s, &i) { return false } } return true } // Copy a line break character from a string into buffer. func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { if s[*i] == '\n' { if !put_break(emitter) { return false } *i++ } else { if !write(emitter, s, i) { return false } emitter.column = 0 emitter.line++ } return true } // Set an emitter error and return false. func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { emitter.error = yaml_EMITTER_ERROR emitter.problem = problem return false } // Emit an event. func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { emitter.events = append(emitter.events, *event) for !yaml_emitter_need_more_events(emitter) { event := &emitter.events[emitter.events_head] if !yaml_emitter_analyze_event(emitter, event) { return false } if !yaml_emitter_state_machine(emitter, event) { return false } yaml_event_delete(event) emitter.events_head++ } return true } // Check if we need to accumulate more events before emitting. // // We accumulate extra // - 1 event for DOCUMENT-START // - 2 events for SEQUENCE-START // - 3 events for MAPPING-START // func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { if emitter.events_head == len(emitter.events) { return true } var accumulate int switch emitter.events[emitter.events_head].typ { case yaml_DOCUMENT_START_EVENT: accumulate = 1 break case yaml_SEQUENCE_START_EVENT: accumulate = 2 break case yaml_MAPPING_START_EVENT: accumulate = 3 break default: return false } if len(emitter.events)-emitter.events_head > accumulate { return false } var level int for i := emitter.events_head; i < len(emitter.events); i++ { switch emitter.events[i].typ { case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: level++ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: level-- } if level == 0 { return false } } return true } // Append a directive to the directives stack. func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { for i := 0; i < len(emitter.tag_directives); i++ { if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { if allow_duplicates { return true } return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") } } // [Go] Do we actually need to copy this given garbage collection // and the lack of deallocating destructors? tag_copy := yaml_tag_directive_t{ handle: make([]byte, len(value.handle)), prefix: make([]byte, len(value.prefix)), } copy(tag_copy.handle, value.handle) copy(tag_copy.prefix, value.prefix) emitter.tag_directives = append(emitter.tag_directives, tag_copy) return true } // Increase the indentation level. func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { emitter.indents = append(emitter.indents, emitter.indent) if emitter.indent < 0 { if flow { emitter.indent = emitter.best_indent } else { emitter.indent = 0 } } else if !indentless { emitter.indent += emitter.best_indent } return true } // State dispatcher. func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { switch emitter.state { default: case yaml_EMIT_STREAM_START_STATE: return yaml_emitter_emit_stream_start(emitter, event) case yaml_EMIT_FIRST_DOCUMENT_START_STATE: return yaml_emitter_emit_document_start(emitter, event, true) case yaml_EMIT_DOCUMENT_START_STATE: return yaml_emitter_emit_document_start(emitter, event, false) case yaml_EMIT_DOCUMENT_CONTENT_STATE: return yaml_emitter_emit_document_content(emitter, event) case yaml_EMIT_DOCUMENT_END_STATE: return yaml_emitter_emit_document_end(emitter, event) case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: return yaml_emitter_emit_flow_sequence_item(emitter, event, true) case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: return yaml_emitter_emit_flow_sequence_item(emitter, event, false) case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: return yaml_emitter_emit_flow_mapping_key(emitter, event, true) case yaml_EMIT_FLOW_MAPPING_KEY_STATE: return yaml_emitter_emit_flow_mapping_key(emitter, event, false) case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: return yaml_emitter_emit_flow_mapping_value(emitter, event, true) case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: return yaml_emitter_emit_flow_mapping_value(emitter, event, false) case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: return yaml_emitter_emit_block_sequence_item(emitter, event, true) case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: return yaml_emitter_emit_block_sequence_item(emitter, event, false) case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: return yaml_emitter_emit_block_mapping_key(emitter, event, true) case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: return yaml_emitter_emit_block_mapping_key(emitter, event, false) case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: return yaml_emitter_emit_block_mapping_value(emitter, event, true) case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: return yaml_emitter_emit_block_mapping_value(emitter, event, false) case yaml_EMIT_END_STATE: return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") } panic("invalid emitter state") } // Expect STREAM-START. func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { if event.typ != yaml_STREAM_START_EVENT { return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") } if emitter.encoding == yaml_ANY_ENCODING { emitter.encoding = event.encoding if emitter.encoding == yaml_ANY_ENCODING { emitter.encoding = yaml_UTF8_ENCODING } } if emitter.best_indent < 2 || emitter.best_indent > 9 { emitter.best_indent = 2 } if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { emitter.best_width = 80 } if emitter.best_width < 0 { emitter.best_width = 1<<31 - 1 } if emitter.line_break == yaml_ANY_BREAK { emitter.line_break = yaml_LN_BREAK } emitter.indent = -1 emitter.line = 0 emitter.column = 0 emitter.whitespace = true emitter.indention = true if emitter.encoding != yaml_UTF8_ENCODING { if !yaml_emitter_write_bom(emitter) { return false } } emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE return true } // Expect DOCUMENT-START or STREAM-END. func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { if event.typ == yaml_DOCUMENT_START_EVENT { if event.version_directive != nil { if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { return false } } for i := 0; i < len(event.tag_directives); i++ { tag_directive := &event.tag_directives[i] if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { return false } if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { return false } } for i := 0; i < len(default_tag_directives); i++ { tag_directive := &default_tag_directives[i] if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { return false } } implicit := event.implicit if !first || emitter.canonical { implicit = false } if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { return false } if !yaml_emitter_write_indent(emitter) { return false } } if event.version_directive != nil { implicit = false if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { return false } if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { return false } if !yaml_emitter_write_indent(emitter) { return false } } if len(event.tag_directives) > 0 { implicit = false for i := 0; i < len(event.tag_directives); i++ { tag_directive := &event.tag_directives[i] if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { return false } if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { return false } if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { return false } if !yaml_emitter_write_indent(emitter) { return false } } } if yaml_emitter_check_empty_document(emitter) { implicit = false } if !implicit { if !yaml_emitter_write_indent(emitter) { return false } if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { return false } if emitter.canonical { if !yaml_emitter_write_indent(emitter) { return false } } } emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE return true } if event.typ == yaml_STREAM_END_EVENT { if emitter.open_ended { if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { return false } if !yaml_emitter_write_indent(emitter) { return false } } if !yaml_emitter_flush(emitter) { return false } emitter.state = yaml_EMIT_END_STATE return true } return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") } // Expect the root node. func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) return yaml_emitter_emit_node(emitter, event, true, false, false, false) } // Expect DOCUMENT-END. func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { if event.typ != yaml_DOCUMENT_END_EVENT { return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") } if !yaml_emitter_write_indent(emitter) { return false } if !event.implicit { // [Go] Allocate the slice elsewhere. if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { return false } if !yaml_emitter_write_indent(emitter) { return false } } if !yaml_emitter_flush(emitter) { return false } emitter.state = yaml_EMIT_DOCUMENT_START_STATE emitter.tag_directives = emitter.tag_directives[:0] return true } // Expect a flow item node. func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { if first { if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { return false } if !yaml_emitter_increase_indent(emitter, true, false) { return false } emitter.flow_level++ } if event.typ == yaml_SEQUENCE_END_EVENT { emitter.flow_level-- emitter.indent = emitter.indents[len(emitter.indents)-1] emitter.indents = emitter.indents[:len(emitter.indents)-1] if emitter.canonical && !first { if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { return false } if !yaml_emitter_write_indent(emitter) { return false } } if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { return false } emitter.state = emitter.states[len(emitter.states)-1] emitter.states = emitter.states[:len(emitter.states)-1] return true } if !first { if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { return false } } if emitter.canonical || emitter.column > emitter.best_width { if !yaml_emitter_write_indent(emitter) { return false } } emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) return yaml_emitter_emit_node(emitter, event, false, true, false, false) } // Expect a flow key node. func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { if first { if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { return false } if !yaml_emitter_increase_indent(emitter, true, false) { return false } emitter.flow_level++ } if event.typ == yaml_MAPPING_END_EVENT { emitter.flow_level-- emitter.indent = emitter.indents[len(emitter.indents)-1] emitter.indents = emitter.indents[:len(emitter.indents)-1] if emitter.canonical && !first { if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { return false } if !yaml_emitter_write_indent(emitter) { return false } } if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { return false } emitter.state = emitter.states[len(emitter.states)-1] emitter.states = emitter.states[:len(emitter.states)-1] return true } if !first { if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { return false } } if emitter.canonical || emitter.column > emitter.best_width { if !yaml_emitter_write_indent(emitter) { return false } } if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) return yaml_emitter_emit_node(emitter, event, false, false, true, true) } if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { return false } emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) return yaml_emitter_emit_node(emitter, event, false, false, true, false) } // Expect a flow value node. func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { if simple { if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { return false } } else { if emitter.canonical || emitter.column > emitter.best_width { if !yaml_emitter_write_indent(emitter) { return false } } if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { return false } } emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) return yaml_emitter_emit_node(emitter, event, false, false, true, false) } // Expect a block item node. func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { if first { if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { return false } } if event.typ == yaml_SEQUENCE_END_EVENT { emitter.indent = emitter.indents[len(emitter.indents)-1] emitter.indents = emitter.indents[:len(emitter.indents)-1] emitter.state = emitter.states[len(emitter.states)-1] emitter.states = emitter.states[:len(emitter.states)-1] return true } if !yaml_emitter_write_indent(emitter) { return false } if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { return false } emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) return yaml_emitter_emit_node(emitter, event, false, true, false, false) } // Expect a block key node. func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { if first { if !yaml_emitter_increase_indent(emitter, false, false) { return false } } if event.typ == yaml_MAPPING_END_EVENT { emitter.indent = emitter.indents[len(emitter.indents)-1] emitter.indents = emitter.indents[:len(emitter.indents)-1] emitter.state = emitter.states[len(emitter.states)-1] emitter.states = emitter.states[:len(emitter.states)-1] return true } if !yaml_emitter_write_indent(emitter) { return false } if yaml_emitter_check_simple_key(emitter) { emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) return yaml_emitter_emit_node(emitter, event, false, false, true, true) } if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { return false } emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) return yaml_emitter_emit_node(emitter, event, false, false, true, false) } // Expect a block value node. func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { if simple { if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { return false } } else { if !yaml_emitter_write_indent(emitter) { return false } if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { return false } } emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) return yaml_emitter_emit_node(emitter, event, false, false, true, false) } // Expect a node. func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, root bool, sequence bool, mapping bool, simple_key bool) bool { emitter.root_context = root emitter.sequence_context = sequence emitter.mapping_context = mapping emitter.simple_key_context = simple_key switch event.typ { case yaml_ALIAS_EVENT: return yaml_emitter_emit_alias(emitter, event) case yaml_SCALAR_EVENT: return yaml_emitter_emit_scalar(emitter, event) case yaml_SEQUENCE_START_EVENT: return yaml_emitter_emit_sequence_start(emitter, event) case yaml_MAPPING_START_EVENT: return yaml_emitter_emit_mapping_start(emitter, event) default: return yaml_emitter_set_emitter_error(emitter, "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") } return false } // Expect ALIAS. func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { if !yaml_emitter_process_anchor(emitter) { return false } emitter.state = emitter.states[len(emitter.states)-1] emitter.states = emitter.states[:len(emitter.states)-1] return true } // Expect SCALAR. func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { if !yaml_emitter_select_scalar_style(emitter, event) { return false } if !yaml_emitter_process_anchor(emitter) { return false } if !yaml_emitter_process_tag(emitter) { return false } if !yaml_emitter_increase_indent(emitter, true, false) { return false } if !yaml_emitter_process_scalar(emitter) { return false } emitter.indent = emitter.indents[len(emitter.indents)-1] emitter.indents = emitter.indents[:len(emitter.indents)-1] emitter.state = emitter.states[len(emitter.states)-1] emitter.states = emitter.states[:len(emitter.states)-1] return true } // Expect SEQUENCE-START. func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { if !yaml_emitter_process_anchor(emitter) { return false } if !yaml_emitter_process_tag(emitter) { return false } if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || yaml_emitter_check_empty_sequence(emitter) { emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE } else { emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE } return true } // Expect MAPPING-START. func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { if !yaml_emitter_process_anchor(emitter) { return false } if !yaml_emitter_process_tag(emitter) { return false } if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || yaml_emitter_check_empty_mapping(emitter) { emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE } else { emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE } return true } // Check if the document content is an empty scalar. func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { return false // [Go] Huh? } // Check if the next events represent an empty sequence. func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { if len(emitter.events)-emitter.events_head < 2 { return false } return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT } // Check if the next events represent an empty mapping. func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { if len(emitter.events)-emitter.events_head < 2 { return false } return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT } // Check if the next node can be expressed as a simple key. func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { length := 0 switch emitter.events[emitter.events_head].typ { case yaml_ALIAS_EVENT: length += len(emitter.anchor_data.anchor) case yaml_SCALAR_EVENT: if emitter.scalar_data.multiline { return false } length += len(emitter.anchor_data.anchor) + len(emitter.tag_data.handle) + len(emitter.tag_data.suffix) + len(emitter.scalar_data.value) case yaml_SEQUENCE_START_EVENT: if !yaml_emitter_check_empty_sequence(emitter) { return false } length += len(emitter.anchor_data.anchor) + len(emitter.tag_data.handle) + len(emitter.tag_data.suffix) case yaml_MAPPING_START_EVENT: if !yaml_emitter_check_empty_mapping(emitter) { return false } length += len(emitter.anchor_data.anchor) + len(emitter.tag_data.handle) + len(emitter.tag_data.suffix) default: return false } return length <= 128 } // Determine an acceptable scalar style. func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 if no_tag && !event.implicit && !event.quoted_implicit { return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") } style := event.scalar_style() if style == yaml_ANY_SCALAR_STYLE { style = yaml_PLAIN_SCALAR_STYLE } if emitter.canonical { style = yaml_DOUBLE_QUOTED_SCALAR_STYLE } if emitter.simple_key_context && emitter.scalar_data.multiline { style = yaml_DOUBLE_QUOTED_SCALAR_STYLE } if style == yaml_PLAIN_SCALAR_STYLE { if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { style = yaml_SINGLE_QUOTED_SCALAR_STYLE } if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { style = yaml_SINGLE_QUOTED_SCALAR_STYLE } if no_tag && !event.implicit { style = yaml_SINGLE_QUOTED_SCALAR_STYLE } } if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { if !emitter.scalar_data.single_quoted_allowed { style = yaml_DOUBLE_QUOTED_SCALAR_STYLE } } if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { style = yaml_DOUBLE_QUOTED_SCALAR_STYLE } } if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { emitter.tag_data.handle = []byte{'!'} } emitter.scalar_data.style = style return true } // Write an achor. func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { if emitter.anchor_data.anchor == nil { return true } c := []byte{'&'} if emitter.anchor_data.alias { c[0] = '*' } if !yaml_emitter_write_indicator(emitter, c, true, false, false) { return false } return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) } // Write a tag. func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { return true } if len(emitter.tag_data.handle) > 0 { if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { return false } if len(emitter.tag_data.suffix) > 0 { if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { return false } } } else { // [Go] Allocate these slices elsewhere. if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { return false } if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { return false } if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { return false } } return true } // Write a scalar. func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { switch emitter.scalar_data.style { case yaml_PLAIN_SCALAR_STYLE: return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) case yaml_SINGLE_QUOTED_SCALAR_STYLE: return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) case yaml_DOUBLE_QUOTED_SCALAR_STYLE: return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) case yaml_LITERAL_SCALAR_STYLE: return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) case yaml_FOLDED_SCALAR_STYLE: return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) } panic("unknown scalar style") } // Check if a %YAML directive is valid. func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { if version_directive.major != 1 || version_directive.minor != 1 { return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") } return true } // Check if a %TAG directive is valid. func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { handle := tag_directive.handle prefix := tag_directive.prefix if len(handle) == 0 { return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") } if handle[0] != '!' { return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") } if handle[len(handle)-1] != '!' { return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") } for i := 1; i < len(handle)-1; i += width(handle[i]) { if !is_alpha(handle, i) { return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") } } if len(prefix) == 0 { return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") } return true } // Check if an anchor is valid. func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { if len(anchor) == 0 { problem := "anchor value must not be empty" if alias { problem = "alias value must not be empty" } return yaml_emitter_set_emitter_error(emitter, problem) } for i := 0; i < len(anchor); i += width(anchor[i]) { if !is_alpha(anchor, i) { problem := "anchor value must contain alphanumerical characters only" if alias { problem = "alias value must contain alphanumerical characters only" } return yaml_emitter_set_emitter_error(emitter, problem) } } emitter.anchor_data.anchor = anchor emitter.anchor_data.alias = alias return true } // Check if a tag is valid. func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { if len(tag) == 0 { return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") } for i := 0; i < len(emitter.tag_directives); i++ { tag_directive := &emitter.tag_directives[i] if bytes.HasPrefix(tag, tag_directive.prefix) { emitter.tag_data.handle = tag_directive.handle emitter.tag_data.suffix = tag[len(tag_directive.prefix):] return true } } emitter.tag_data.suffix = tag return true } // Check if a scalar is valid. func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { var ( block_indicators = false flow_indicators = false line_breaks = false special_characters = false leading_space = false leading_break = false trailing_space = false trailing_break = false break_space = false space_break = false preceeded_by_whitespace = false followed_by_whitespace = false previous_space = false previous_break = false ) emitter.scalar_data.value = value if len(value) == 0 { emitter.scalar_data.multiline = false emitter.scalar_data.flow_plain_allowed = false emitter.scalar_data.block_plain_allowed = true emitter.scalar_data.single_quoted_allowed = true emitter.scalar_data.block_allowed = false return true } if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { block_indicators = true flow_indicators = true } preceeded_by_whitespace = true for i, w := 0, 0; i < len(value); i += w { w = width(value[i]) followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) if i == 0 { switch value[i] { case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': flow_indicators = true block_indicators = true case '?', ':': flow_indicators = true if followed_by_whitespace { block_indicators = true } case '-': if followed_by_whitespace { flow_indicators = true block_indicators = true } } } else { switch value[i] { case ',', '?', '[', ']', '{', '}': flow_indicators = true case ':': flow_indicators = true if followed_by_whitespace { block_indicators = true } case '#': if preceeded_by_whitespace { flow_indicators = true block_indicators = true } } } if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { special_characters = true } if is_space(value, i) { if i == 0 { leading_space = true } if i+width(value[i]) == len(value) { trailing_space = true } if previous_break { break_space = true } previous_space = true previous_break = false } else if is_break(value, i) { line_breaks = true if i == 0 { leading_break = true } if i+width(value[i]) == len(value) { trailing_break = true } if previous_space { space_break = true } previous_space = false previous_break = true } else { previous_space = false previous_break = false } // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. preceeded_by_whitespace = is_blankz(value, i) } emitter.scalar_data.multiline = line_breaks emitter.scalar_data.flow_plain_allowed = true emitter.scalar_data.block_plain_allowed = true emitter.scalar_data.single_quoted_allowed = true emitter.scalar_data.block_allowed = true if leading_space || leading_break || trailing_space || trailing_break { emitter.scalar_data.flow_plain_allowed = false emitter.scalar_data.block_plain_allowed = false } if trailing_space { emitter.scalar_data.block_allowed = false } if break_space { emitter.scalar_data.flow_plain_allowed = false emitter.scalar_data.block_plain_allowed = false emitter.scalar_data.single_quoted_allowed = false } if space_break || special_characters { emitter.scalar_data.flow_plain_allowed = false emitter.scalar_data.block_plain_allowed = false emitter.scalar_data.single_quoted_allowed = false emitter.scalar_data.block_allowed = false } if line_breaks { emitter.scalar_data.flow_plain_allowed = false emitter.scalar_data.block_plain_allowed = false } if flow_indicators { emitter.scalar_data.flow_plain_allowed = false } if block_indicators { emitter.scalar_data.block_plain_allowed = false } return true } // Check if the event data is valid. func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { emitter.anchor_data.anchor = nil emitter.tag_data.handle = nil emitter.tag_data.suffix = nil emitter.scalar_data.value = nil switch event.typ { case yaml_ALIAS_EVENT: if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { return false } case yaml_SCALAR_EVENT: if len(event.anchor) > 0 { if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { return false } } if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { if !yaml_emitter_analyze_tag(emitter, event.tag) { return false } } if !yaml_emitter_analyze_scalar(emitter, event.value) { return false } case yaml_SEQUENCE_START_EVENT: if len(event.anchor) > 0 { if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { return false } } if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { if !yaml_emitter_analyze_tag(emitter, event.tag) { return false } } case yaml_MAPPING_START_EVENT: if len(event.anchor) > 0 { if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { return false } } if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { if !yaml_emitter_analyze_tag(emitter, event.tag) { return false } } } return true } // Write the BOM character. func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { if !flush(emitter) { return false } pos := emitter.buffer_pos emitter.buffer[pos+0] = '\xEF' emitter.buffer[pos+1] = '\xBB' emitter.buffer[pos+2] = '\xBF' emitter.buffer_pos += 3 return true } func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { indent := emitter.indent if indent < 0 { indent = 0 } if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { if !put_break(emitter) { return false } } for emitter.column < indent { if !put(emitter, ' ') { return false } } emitter.whitespace = true emitter.indention = true return true } func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { if need_whitespace && !emitter.whitespace { if !put(emitter, ' ') { return false } } if !write_all(emitter, indicator) { return false } emitter.whitespace = is_whitespace emitter.indention = (emitter.indention && is_indention) emitter.open_ended = false return true } func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { if !write_all(emitter, value) { return false } emitter.whitespace = false emitter.indention = false return true } func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { if !emitter.whitespace { if !put(emitter, ' ') { return false } } if !write_all(emitter, value) { return false } emitter.whitespace = false emitter.indention = false return true } func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { if need_whitespace && !emitter.whitespace { if !put(emitter, ' ') { return false } } for i := 0; i < len(value); { var must_write bool switch value[i] { case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': must_write = true default: must_write = is_alpha(value, i) } if must_write { if !write(emitter, value, &i) { return false } } else { w := width(value[i]) for k := 0; k < w; k++ { octet := value[i] i++ if !put(emitter, '%') { return false } c := octet >> 4 if c < 10 { c += '0' } else { c += 'A' - 10 } if !put(emitter, c) { return false } c = octet & 0x0f if c < 10 { c += '0' } else { c += 'A' - 10 } if !put(emitter, c) { return false } } } } emitter.whitespace = false emitter.indention = false return true } func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { if !emitter.whitespace { if !put(emitter, ' ') { return false } } spaces := false breaks := false for i := 0; i < len(value); { if is_space(value, i) { if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { if !yaml_emitter_write_indent(emitter) { return false } i += width(value[i]) } else { if !write(emitter, value, &i) { return false } } spaces = true } else if is_break(value, i) { if !breaks && value[i] == '\n' { if !put_break(emitter) { return false } } if !write_break(emitter, value, &i) { return false } emitter.indention = true breaks = true } else { if breaks { if !yaml_emitter_write_indent(emitter) { return false } } if !write(emitter, value, &i) { return false } emitter.indention = false spaces = false breaks = false } } emitter.whitespace = false emitter.indention = false if emitter.root_context { emitter.open_ended = true } return true } func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { return false } spaces := false breaks := false for i := 0; i < len(value); { if is_space(value, i) { if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { if !yaml_emitter_write_indent(emitter) { return false } i += width(value[i]) } else { if !write(emitter, value, &i) { return false } } spaces = true } else if is_break(value, i) { if !breaks && value[i] == '\n' { if !put_break(emitter) { return false } } if !write_break(emitter, value, &i) { return false } emitter.indention = true breaks = true } else { if breaks { if !yaml_emitter_write_indent(emitter) { return false } } if value[i] == '\'' { if !put(emitter, '\'') { return false } } if !write(emitter, value, &i) { return false } emitter.indention = false spaces = false breaks = false } } if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { return false } emitter.whitespace = false emitter.indention = false return true } func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { spaces := false if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { return false } for i := 0; i < len(value); { if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || is_bom(value, i) || is_break(value, i) || value[i] == '"' || value[i] == '\\' { octet := value[i] var w int var v rune switch { case octet&0x80 == 0x00: w, v = 1, rune(octet&0x7F) case octet&0xE0 == 0xC0: w, v = 2, rune(octet&0x1F) case octet&0xF0 == 0xE0: w, v = 3, rune(octet&0x0F) case octet&0xF8 == 0xF0: w, v = 4, rune(octet&0x07) } for k := 1; k < w; k++ { octet = value[i+k] v = (v << 6) + (rune(octet) & 0x3F) } i += w if !put(emitter, '\\') { return false } var ok bool switch v { case 0x00: ok = put(emitter, '0') case 0x07: ok = put(emitter, 'a') case 0x08: ok = put(emitter, 'b') case 0x09: ok = put(emitter, 't') case 0x0A: ok = put(emitter, 'n') case 0x0b: ok = put(emitter, 'v') case 0x0c: ok = put(emitter, 'f') case 0x0d: ok = put(emitter, 'r') case 0x1b: ok = put(emitter, 'e') case 0x22: ok = put(emitter, '"') case 0x5c: ok = put(emitter, '\\') case 0x85: ok = put(emitter, 'N') case 0xA0: ok = put(emitter, '_') case 0x2028: ok = put(emitter, 'L') case 0x2029: ok = put(emitter, 'P') default: if v <= 0xFF { ok = put(emitter, 'x') w = 2 } else if v <= 0xFFFF { ok = put(emitter, 'u') w = 4 } else { ok = put(emitter, 'U') w = 8 } for k := (w - 1) * 4; ok && k >= 0; k -= 4 { digit := byte((v >> uint(k)) & 0x0F) if digit < 10 { ok = put(emitter, digit+'0') } else { ok = put(emitter, digit+'A'-10) } } } if !ok { return false } spaces = false } else if is_space(value, i) { if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { if !yaml_emitter_write_indent(emitter) { return false } if is_space(value, i+1) { if !put(emitter, '\\') { return false } } i += width(value[i]) } else if !write(emitter, value, &i) { return false } spaces = true } else { if !write(emitter, value, &i) { return false } spaces = false } } if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { return false } emitter.whitespace = false emitter.indention = false return true } func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { if is_space(value, 0) || is_break(value, 0) { indent_hint := []byte{'0' + byte(emitter.best_indent)} if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { return false } } emitter.open_ended = false var chomp_hint [1]byte if len(value) == 0 { chomp_hint[0] = '-' } else { i := len(value) - 1 for value[i]&0xC0 == 0x80 { i-- } if !is_break(value, i) { chomp_hint[0] = '-' } else if i == 0 { chomp_hint[0] = '+' emitter.open_ended = true } else { i-- for value[i]&0xC0 == 0x80 { i-- } if is_break(value, i) { chomp_hint[0] = '+' emitter.open_ended = true } } } if chomp_hint[0] != 0 { if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { return false } } return true } func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { return false } if !yaml_emitter_write_block_scalar_hints(emitter, value) { return false } if !put_break(emitter) { return false } emitter.indention = true emitter.whitespace = true breaks := true for i := 0; i < len(value); { if is_break(value, i) { if !write_break(emitter, value, &i) { return false } emitter.indention = true breaks = true } else { if breaks { if !yaml_emitter_write_indent(emitter) { return false } } if !write(emitter, value, &i) { return false } emitter.indention = false breaks = false } } return true } func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { return false } if !yaml_emitter_write_block_scalar_hints(emitter, value) { return false } if !put_break(emitter) { return false } emitter.indention = true emitter.whitespace = true breaks := true leading_spaces := true for i := 0; i < len(value); { if is_break(value, i) { if !breaks && !leading_spaces && value[i] == '\n' { k := 0 for is_break(value, k) { k += width(value[k]) } if !is_blankz(value, k) { if !put_break(emitter) { return false } } } if !write_break(emitter, value, &i) { return false } emitter.indention = true breaks = true } else { if breaks { if !yaml_emitter_write_indent(emitter) { return false } leading_spaces = is_blank(value, i) } if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { if !yaml_emitter_write_indent(emitter) { return false } i += width(value[i]) } else { if !write(emitter, value, &i) { return false } } emitter.indention = false breaks = false } } return true } charm-2.1.1/src/gopkg.in/yaml.v2/writerc.go0000664000175000017500000000460312672604537017413 0ustar marcomarcopackage yaml // Set the writer error and return false. func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { emitter.error = yaml_WRITER_ERROR emitter.problem = problem return false } // Flush the output buffer. func yaml_emitter_flush(emitter *yaml_emitter_t) bool { if emitter.write_handler == nil { panic("write handler not set") } // Check if the buffer is empty. if emitter.buffer_pos == 0 { return true } // If the output encoding is UTF-8, we don't need to recode the buffer. if emitter.encoding == yaml_UTF8_ENCODING { if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) } emitter.buffer_pos = 0 return true } // Recode the buffer into the raw buffer. var low, high int if emitter.encoding == yaml_UTF16LE_ENCODING { low, high = 0, 1 } else { high, low = 1, 0 } pos := 0 for pos < emitter.buffer_pos { // See the "reader.c" code for more details on UTF-8 encoding. Note // that we assume that the buffer contains a valid UTF-8 sequence. // Read the next UTF-8 character. octet := emitter.buffer[pos] var w int var value rune switch { case octet&0x80 == 0x00: w, value = 1, rune(octet&0x7F) case octet&0xE0 == 0xC0: w, value = 2, rune(octet&0x1F) case octet&0xF0 == 0xE0: w, value = 3, rune(octet&0x0F) case octet&0xF8 == 0xF0: w, value = 4, rune(octet&0x07) } for k := 1; k < w; k++ { octet = emitter.buffer[pos+k] value = (value << 6) + (rune(octet) & 0x3F) } pos += w // Write the character. if value < 0x10000 { var b [2]byte b[high] = byte(value >> 8) b[low] = byte(value & 0xFF) emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) } else { // Write the character using a surrogate pair (check "reader.c"). var b [4]byte value -= 0x10000 b[high] = byte(0xD8 + (value >> 18)) b[low] = byte((value >> 10) & 0xFF) b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) b[low+2] = byte(value & 0xFF) emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) } } // Write the raw buffer. if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) } emitter.buffer_pos = 0 emitter.raw_buffer = emitter.raw_buffer[:0] return true } charm-2.1.1/src/gopkg.in/yaml.v2/yamlh.go0000664000175000017500000006150112672604537017046 0ustar marcomarcopackage yaml import ( "io" ) // The version directive data. type yaml_version_directive_t struct { major int8 // The major version number. minor int8 // The minor version number. } // The tag directive data. type yaml_tag_directive_t struct { handle []byte // The tag handle. prefix []byte // The tag prefix. } type yaml_encoding_t int // The stream encoding. const ( // Let the parser choose the encoding. yaml_ANY_ENCODING yaml_encoding_t = iota yaml_UTF8_ENCODING // The default UTF-8 encoding. yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. ) type yaml_break_t int // Line break types. const ( // Let the parser choose the break type. yaml_ANY_BREAK yaml_break_t = iota yaml_CR_BREAK // Use CR for line breaks (Mac style). yaml_LN_BREAK // Use LN for line breaks (Unix style). yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). ) type yaml_error_type_t int // Many bad things could happen with the parser and emitter. const ( // No error is produced. yaml_NO_ERROR yaml_error_type_t = iota yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. yaml_READER_ERROR // Cannot read or decode the input stream. yaml_SCANNER_ERROR // Cannot scan the input stream. yaml_PARSER_ERROR // Cannot parse the input stream. yaml_COMPOSER_ERROR // Cannot compose a YAML document. yaml_WRITER_ERROR // Cannot write to the output stream. yaml_EMITTER_ERROR // Cannot emit a YAML stream. ) // The pointer position. type yaml_mark_t struct { index int // The position index. line int // The position line. column int // The position column. } // Node Styles type yaml_style_t int8 type yaml_scalar_style_t yaml_style_t // Scalar styles. const ( // Let the emitter choose the style. yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota yaml_PLAIN_SCALAR_STYLE // The plain scalar style. yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. yaml_LITERAL_SCALAR_STYLE // The literal scalar style. yaml_FOLDED_SCALAR_STYLE // The folded scalar style. ) type yaml_sequence_style_t yaml_style_t // Sequence styles. const ( // Let the emitter choose the style. yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. ) type yaml_mapping_style_t yaml_style_t // Mapping styles. const ( // Let the emitter choose the style. yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota yaml_BLOCK_MAPPING_STYLE // The block mapping style. yaml_FLOW_MAPPING_STYLE // The flow mapping style. ) // Tokens type yaml_token_type_t int // Token types. const ( // An empty token. yaml_NO_TOKEN yaml_token_type_t = iota yaml_STREAM_START_TOKEN // A STREAM-START token. yaml_STREAM_END_TOKEN // A STREAM-END token. yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. yaml_BLOCK_END_TOKEN // A BLOCK-END token. yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. yaml_KEY_TOKEN // A KEY token. yaml_VALUE_TOKEN // A VALUE token. yaml_ALIAS_TOKEN // An ALIAS token. yaml_ANCHOR_TOKEN // An ANCHOR token. yaml_TAG_TOKEN // A TAG token. yaml_SCALAR_TOKEN // A SCALAR token. ) func (tt yaml_token_type_t) String() string { switch tt { case yaml_NO_TOKEN: return "yaml_NO_TOKEN" case yaml_STREAM_START_TOKEN: return "yaml_STREAM_START_TOKEN" case yaml_STREAM_END_TOKEN: return "yaml_STREAM_END_TOKEN" case yaml_VERSION_DIRECTIVE_TOKEN: return "yaml_VERSION_DIRECTIVE_TOKEN" case yaml_TAG_DIRECTIVE_TOKEN: return "yaml_TAG_DIRECTIVE_TOKEN" case yaml_DOCUMENT_START_TOKEN: return "yaml_DOCUMENT_START_TOKEN" case yaml_DOCUMENT_END_TOKEN: return "yaml_DOCUMENT_END_TOKEN" case yaml_BLOCK_SEQUENCE_START_TOKEN: return "yaml_BLOCK_SEQUENCE_START_TOKEN" case yaml_BLOCK_MAPPING_START_TOKEN: return "yaml_BLOCK_MAPPING_START_TOKEN" case yaml_BLOCK_END_TOKEN: return "yaml_BLOCK_END_TOKEN" case yaml_FLOW_SEQUENCE_START_TOKEN: return "yaml_FLOW_SEQUENCE_START_TOKEN" case yaml_FLOW_SEQUENCE_END_TOKEN: return "yaml_FLOW_SEQUENCE_END_TOKEN" case yaml_FLOW_MAPPING_START_TOKEN: return "yaml_FLOW_MAPPING_START_TOKEN" case yaml_FLOW_MAPPING_END_TOKEN: return "yaml_FLOW_MAPPING_END_TOKEN" case yaml_BLOCK_ENTRY_TOKEN: return "yaml_BLOCK_ENTRY_TOKEN" case yaml_FLOW_ENTRY_TOKEN: return "yaml_FLOW_ENTRY_TOKEN" case yaml_KEY_TOKEN: return "yaml_KEY_TOKEN" case yaml_VALUE_TOKEN: return "yaml_VALUE_TOKEN" case yaml_ALIAS_TOKEN: return "yaml_ALIAS_TOKEN" case yaml_ANCHOR_TOKEN: return "yaml_ANCHOR_TOKEN" case yaml_TAG_TOKEN: return "yaml_TAG_TOKEN" case yaml_SCALAR_TOKEN: return "yaml_SCALAR_TOKEN" } return "" } // The token structure. type yaml_token_t struct { // The token type. typ yaml_token_type_t // The start/end of the token. start_mark, end_mark yaml_mark_t // The stream encoding (for yaml_STREAM_START_TOKEN). encoding yaml_encoding_t // The alias/anchor/scalar value or tag/tag directive handle // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). value []byte // The tag suffix (for yaml_TAG_TOKEN). suffix []byte // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). prefix []byte // The scalar style (for yaml_SCALAR_TOKEN). style yaml_scalar_style_t // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). major, minor int8 } // Events type yaml_event_type_t int8 // Event types. const ( // An empty event. yaml_NO_EVENT yaml_event_type_t = iota yaml_STREAM_START_EVENT // A STREAM-START event. yaml_STREAM_END_EVENT // A STREAM-END event. yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. yaml_ALIAS_EVENT // An ALIAS event. yaml_SCALAR_EVENT // A SCALAR event. yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. yaml_MAPPING_START_EVENT // A MAPPING-START event. yaml_MAPPING_END_EVENT // A MAPPING-END event. ) // The event structure. type yaml_event_t struct { // The event type. typ yaml_event_type_t // The start and end of the event. start_mark, end_mark yaml_mark_t // The document encoding (for yaml_STREAM_START_EVENT). encoding yaml_encoding_t // The version directive (for yaml_DOCUMENT_START_EVENT). version_directive *yaml_version_directive_t // The list of tag directives (for yaml_DOCUMENT_START_EVENT). tag_directives []yaml_tag_directive_t // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). anchor []byte // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). tag []byte // The scalar value (for yaml_SCALAR_EVENT). value []byte // Is the document start/end indicator implicit, or the tag optional? // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). implicit bool // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). quoted_implicit bool // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). style yaml_style_t } func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } // Nodes const ( yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. // Not in original libyaml. yaml_BINARY_TAG = "tag:yaml.org,2002:binary" yaml_MERGE_TAG = "tag:yaml.org,2002:merge" yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. ) type yaml_node_type_t int // Node types. const ( // An empty node. yaml_NO_NODE yaml_node_type_t = iota yaml_SCALAR_NODE // A scalar node. yaml_SEQUENCE_NODE // A sequence node. yaml_MAPPING_NODE // A mapping node. ) // An element of a sequence node. type yaml_node_item_t int // An element of a mapping node. type yaml_node_pair_t struct { key int // The key of the element. value int // The value of the element. } // The node structure. type yaml_node_t struct { typ yaml_node_type_t // The node type. tag []byte // The node tag. // The node data. // The scalar parameters (for yaml_SCALAR_NODE). scalar struct { value []byte // The scalar value. length int // The length of the scalar value. style yaml_scalar_style_t // The scalar style. } // The sequence parameters (for YAML_SEQUENCE_NODE). sequence struct { items_data []yaml_node_item_t // The stack of sequence items. style yaml_sequence_style_t // The sequence style. } // The mapping parameters (for yaml_MAPPING_NODE). mapping struct { pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). pairs_start *yaml_node_pair_t // The beginning of the stack. pairs_end *yaml_node_pair_t // The end of the stack. pairs_top *yaml_node_pair_t // The top of the stack. style yaml_mapping_style_t // The mapping style. } start_mark yaml_mark_t // The beginning of the node. end_mark yaml_mark_t // The end of the node. } // The document structure. type yaml_document_t struct { // The document nodes. nodes []yaml_node_t // The version directive. version_directive *yaml_version_directive_t // The list of tag directives. tag_directives_data []yaml_tag_directive_t tag_directives_start int // The beginning of the tag directives list. tag_directives_end int // The end of the tag directives list. start_implicit int // Is the document start indicator implicit? end_implicit int // Is the document end indicator implicit? // The start/end of the document. start_mark, end_mark yaml_mark_t } // The prototype of a read handler. // // The read handler is called when the parser needs to read more bytes from the // source. The handler should write not more than size bytes to the buffer. // The number of written bytes should be set to the size_read variable. // // [in,out] data A pointer to an application data specified by // yaml_parser_set_input(). // [out] buffer The buffer to write the data from the source. // [in] size The size of the buffer. // [out] size_read The actual number of bytes read from the source. // // On success, the handler should return 1. If the handler failed, // the returned value should be 0. On EOF, the handler should set the // size_read to 0 and return 1. type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) // This structure holds information about a potential simple key. type yaml_simple_key_t struct { possible bool // Is a simple key possible? required bool // Is a simple key required? token_number int // The number of the token. mark yaml_mark_t // The position mark. } // The states of the parser. type yaml_parser_state_t int const ( yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. yaml_PARSE_END_STATE // Expect nothing. ) func (ps yaml_parser_state_t) String() string { switch ps { case yaml_PARSE_STREAM_START_STATE: return "yaml_PARSE_STREAM_START_STATE" case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" case yaml_PARSE_DOCUMENT_START_STATE: return "yaml_PARSE_DOCUMENT_START_STATE" case yaml_PARSE_DOCUMENT_CONTENT_STATE: return "yaml_PARSE_DOCUMENT_CONTENT_STATE" case yaml_PARSE_DOCUMENT_END_STATE: return "yaml_PARSE_DOCUMENT_END_STATE" case yaml_PARSE_BLOCK_NODE_STATE: return "yaml_PARSE_BLOCK_NODE_STATE" case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" case yaml_PARSE_FLOW_NODE_STATE: return "yaml_PARSE_FLOW_NODE_STATE" case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" case yaml_PARSE_FLOW_MAPPING_KEY_STATE: return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" case yaml_PARSE_END_STATE: return "yaml_PARSE_END_STATE" } return "" } // This structure holds aliases data. type yaml_alias_data_t struct { anchor []byte // The anchor. index int // The node id. mark yaml_mark_t // The anchor mark. } // The parser structure. // // All members are internal. Manage the structure using the // yaml_parser_ family of functions. type yaml_parser_t struct { // Error handling error yaml_error_type_t // Error type. problem string // Error description. // The byte about which the problem occured. problem_offset int problem_value int problem_mark yaml_mark_t // The error context. context string context_mark yaml_mark_t // Reader stuff read_handler yaml_read_handler_t // Read handler. input_file io.Reader // File input data. input []byte // String input data. input_pos int eof bool // EOF flag buffer []byte // The working buffer. buffer_pos int // The current position of the buffer. unread int // The number of unread characters in the buffer. raw_buffer []byte // The raw buffer. raw_buffer_pos int // The current position of the buffer. encoding yaml_encoding_t // The input encoding. offset int // The offset of the current position (in bytes). mark yaml_mark_t // The mark of the current position. // Scanner stuff stream_start_produced bool // Have we started to scan the input stream? stream_end_produced bool // Have we reached the end of the input stream? flow_level int // The number of unclosed '[' and '{' indicators. tokens []yaml_token_t // The tokens queue. tokens_head int // The head of the tokens queue. tokens_parsed int // The number of tokens fetched from the queue. token_available bool // Does the tokens queue contain a token ready for dequeueing. indent int // The current indentation level. indents []int // The indentation levels stack. simple_key_allowed bool // May a simple key occur at the current position? simple_keys []yaml_simple_key_t // The stack of simple keys. // Parser stuff state yaml_parser_state_t // The current parser state. states []yaml_parser_state_t // The parser states stack. marks []yaml_mark_t // The stack of marks. tag_directives []yaml_tag_directive_t // The list of TAG directives. // Dumper stuff aliases []yaml_alias_data_t // The alias data. document *yaml_document_t // The currently parsed document. } // Emitter Definitions // The prototype of a write handler. // // The write handler is called when the emitter needs to flush the accumulated // characters to the output. The handler should write @a size bytes of the // @a buffer to the output. // // @param[in,out] data A pointer to an application data specified by // yaml_emitter_set_output(). // @param[in] buffer The buffer with bytes to be written. // @param[in] size The size of the buffer. // // @returns On success, the handler should return @c 1. If the handler failed, // the returned value should be @c 0. // type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error type yaml_emitter_state_t int // The emitter states. const ( // Expect STREAM-START. yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. yaml_EMIT_END_STATE // Expect nothing. ) // The emitter structure. // // All members are internal. Manage the structure using the @c yaml_emitter_ // family of functions. type yaml_emitter_t struct { // Error handling error yaml_error_type_t // Error type. problem string // Error description. // Writer stuff write_handler yaml_write_handler_t // Write handler. output_buffer *[]byte // String output data. output_file io.Writer // File output data. buffer []byte // The working buffer. buffer_pos int // The current position of the buffer. raw_buffer []byte // The raw buffer. raw_buffer_pos int // The current position of the buffer. encoding yaml_encoding_t // The stream encoding. // Emitter stuff canonical bool // If the output is in the canonical style? best_indent int // The number of indentation spaces. best_width int // The preferred width of the output lines. unicode bool // Allow unescaped non-ASCII characters? line_break yaml_break_t // The preferred line break. state yaml_emitter_state_t // The current emitter state. states []yaml_emitter_state_t // The stack of states. events []yaml_event_t // The event queue. events_head int // The head of the event queue. indents []int // The stack of indentation levels. tag_directives []yaml_tag_directive_t // The list of tag directives. indent int // The current indentation level. flow_level int // The current flow level. root_context bool // Is it the document root context? sequence_context bool // Is it a sequence context? mapping_context bool // Is it a mapping context? simple_key_context bool // Is it a simple mapping key context? line int // The current line. column int // The current column. whitespace bool // If the last character was a whitespace? indention bool // If the last character was an indentation character (' ', '-', '?', ':')? open_ended bool // If an explicit document end is required? // Anchor analysis. anchor_data struct { anchor []byte // The anchor value. alias bool // Is it an alias? } // Tag analysis. tag_data struct { handle []byte // The tag handle. suffix []byte // The tag suffix. } // Scalar analysis. scalar_data struct { value []byte // The scalar value. multiline bool // Does the scalar contain line breaks? flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? block_plain_allowed bool // Can the scalar be expressed in the block plain style? single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? block_allowed bool // Can the scalar be expressed in the literal or folded styles? style yaml_scalar_style_t // The output style. } // Dumper stuff opened bool // If the stream was already opened? closed bool // If the stream was already closed? // The information associated with the document nodes. anchors *struct { references int // The number of references. anchor int // The anchor id. serialized bool // If the node has been emitted? } last_anchor_id int // The last assigned anchor id. document *yaml_document_t // The currently emitted document. } charm-2.1.1/src/gopkg.in/yaml.v2/LICENSE0000664000175000017500000002112612672604537016411 0ustar marcomarco Copyright (c) 2011-2014 - Canonical Inc. This software is licensed under the LGPLv3, included below. As a special exception to the GNU Lesser General Public License version 3 ("LGPL3"), the copyright holders of this Library give you permission to convey to a third party a Combined Work that links statically or dynamically to this Library without providing any Minimal Corresponding Source or Minimal Application Code as set out in 4d or providing the installation information set out in section 4e, provided that you comply with the other provisions of LGPL3 and provided that you meet, for the Application the terms and conditions of the license(s) which apply to the Application. Except as stated in this special exception, the provisions of LGPL3 will continue to comply in full to this Library. If you modify this Library, you may apply this exception to your version of this Library, but you are not obliged to do so. If you do not wish to do so, delete this exception statement from your version. This exception does not (and cannot) modify any license terms which apply to the Application, with which you must still comply. GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. charm-2.1.1/src/gopkg.in/yaml.v2/scannerc.go0000664000175000017500000022671712672604537017544 0ustar marcomarcopackage yaml import ( "bytes" "fmt" ) // Introduction // ************ // // The following notes assume that you are familiar with the YAML specification // (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in // some cases we are less restrictive that it requires. // // The process of transforming a YAML stream into a sequence of events is // divided on two steps: Scanning and Parsing. // // The Scanner transforms the input stream into a sequence of tokens, while the // parser transform the sequence of tokens produced by the Scanner into a // sequence of parsing events. // // The Scanner is rather clever and complicated. The Parser, on the contrary, // is a straightforward implementation of a recursive-descendant parser (or, // LL(1) parser, as it is usually called). // // Actually there are two issues of Scanning that might be called "clever", the // rest is quite straightforward. The issues are "block collection start" and // "simple keys". Both issues are explained below in details. // // Here the Scanning step is explained and implemented. We start with the list // of all the tokens produced by the Scanner together with short descriptions. // // Now, tokens: // // STREAM-START(encoding) # The stream start. // STREAM-END # The stream end. // VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. // TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. // DOCUMENT-START # '---' // DOCUMENT-END # '...' // BLOCK-SEQUENCE-START # Indentation increase denoting a block // BLOCK-MAPPING-START # sequence or a block mapping. // BLOCK-END # Indentation decrease. // FLOW-SEQUENCE-START # '[' // FLOW-SEQUENCE-END # ']' // BLOCK-SEQUENCE-START # '{' // BLOCK-SEQUENCE-END # '}' // BLOCK-ENTRY # '-' // FLOW-ENTRY # ',' // KEY # '?' or nothing (simple keys). // VALUE # ':' // ALIAS(anchor) # '*anchor' // ANCHOR(anchor) # '&anchor' // TAG(handle,suffix) # '!handle!suffix' // SCALAR(value,style) # A scalar. // // The following two tokens are "virtual" tokens denoting the beginning and the // end of the stream: // // STREAM-START(encoding) // STREAM-END // // We pass the information about the input stream encoding with the // STREAM-START token. // // The next two tokens are responsible for tags: // // VERSION-DIRECTIVE(major,minor) // TAG-DIRECTIVE(handle,prefix) // // Example: // // %YAML 1.1 // %TAG ! !foo // %TAG !yaml! tag:yaml.org,2002: // --- // // The correspoding sequence of tokens: // // STREAM-START(utf-8) // VERSION-DIRECTIVE(1,1) // TAG-DIRECTIVE("!","!foo") // TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") // DOCUMENT-START // STREAM-END // // Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole // line. // // The document start and end indicators are represented by: // // DOCUMENT-START // DOCUMENT-END // // Note that if a YAML stream contains an implicit document (without '---' // and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be // produced. // // In the following examples, we present whole documents together with the // produced tokens. // // 1. An implicit document: // // 'a scalar' // // Tokens: // // STREAM-START(utf-8) // SCALAR("a scalar",single-quoted) // STREAM-END // // 2. An explicit document: // // --- // 'a scalar' // ... // // Tokens: // // STREAM-START(utf-8) // DOCUMENT-START // SCALAR("a scalar",single-quoted) // DOCUMENT-END // STREAM-END // // 3. Several documents in a stream: // // 'a scalar' // --- // 'another scalar' // --- // 'yet another scalar' // // Tokens: // // STREAM-START(utf-8) // SCALAR("a scalar",single-quoted) // DOCUMENT-START // SCALAR("another scalar",single-quoted) // DOCUMENT-START // SCALAR("yet another scalar",single-quoted) // STREAM-END // // We have already introduced the SCALAR token above. The following tokens are // used to describe aliases, anchors, tag, and scalars: // // ALIAS(anchor) // ANCHOR(anchor) // TAG(handle,suffix) // SCALAR(value,style) // // The following series of examples illustrate the usage of these tokens: // // 1. A recursive sequence: // // &A [ *A ] // // Tokens: // // STREAM-START(utf-8) // ANCHOR("A") // FLOW-SEQUENCE-START // ALIAS("A") // FLOW-SEQUENCE-END // STREAM-END // // 2. A tagged scalar: // // !!float "3.14" # A good approximation. // // Tokens: // // STREAM-START(utf-8) // TAG("!!","float") // SCALAR("3.14",double-quoted) // STREAM-END // // 3. Various scalar styles: // // --- # Implicit empty plain scalars do not produce tokens. // --- a plain scalar // --- 'a single-quoted scalar' // --- "a double-quoted scalar" // --- |- // a literal scalar // --- >- // a folded // scalar // // Tokens: // // STREAM-START(utf-8) // DOCUMENT-START // DOCUMENT-START // SCALAR("a plain scalar",plain) // DOCUMENT-START // SCALAR("a single-quoted scalar",single-quoted) // DOCUMENT-START // SCALAR("a double-quoted scalar",double-quoted) // DOCUMENT-START // SCALAR("a literal scalar",literal) // DOCUMENT-START // SCALAR("a folded scalar",folded) // STREAM-END // // Now it's time to review collection-related tokens. We will start with // flow collections: // // FLOW-SEQUENCE-START // FLOW-SEQUENCE-END // FLOW-MAPPING-START // FLOW-MAPPING-END // FLOW-ENTRY // KEY // VALUE // // The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and // FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' // correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the // indicators '?' and ':', which are used for denoting mapping keys and values, // are represented by the KEY and VALUE tokens. // // The following examples show flow collections: // // 1. A flow sequence: // // [item 1, item 2, item 3] // // Tokens: // // STREAM-START(utf-8) // FLOW-SEQUENCE-START // SCALAR("item 1",plain) // FLOW-ENTRY // SCALAR("item 2",plain) // FLOW-ENTRY // SCALAR("item 3",plain) // FLOW-SEQUENCE-END // STREAM-END // // 2. A flow mapping: // // { // a simple key: a value, # Note that the KEY token is produced. // ? a complex key: another value, // } // // Tokens: // // STREAM-START(utf-8) // FLOW-MAPPING-START // KEY // SCALAR("a simple key",plain) // VALUE // SCALAR("a value",plain) // FLOW-ENTRY // KEY // SCALAR("a complex key",plain) // VALUE // SCALAR("another value",plain) // FLOW-ENTRY // FLOW-MAPPING-END // STREAM-END // // A simple key is a key which is not denoted by the '?' indicator. Note that // the Scanner still produce the KEY token whenever it encounters a simple key. // // For scanning block collections, the following tokens are used (note that we // repeat KEY and VALUE here): // // BLOCK-SEQUENCE-START // BLOCK-MAPPING-START // BLOCK-END // BLOCK-ENTRY // KEY // VALUE // // The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation // increase that precedes a block collection (cf. the INDENT token in Python). // The token BLOCK-END denote indentation decrease that ends a block collection // (cf. the DEDENT token in Python). However YAML has some syntax pecularities // that makes detections of these tokens more complex. // // The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators // '-', '?', and ':' correspondingly. // // The following examples show how the tokens BLOCK-SEQUENCE-START, // BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: // // 1. Block sequences: // // - item 1 // - item 2 // - // - item 3.1 // - item 3.2 // - // key 1: value 1 // key 2: value 2 // // Tokens: // // STREAM-START(utf-8) // BLOCK-SEQUENCE-START // BLOCK-ENTRY // SCALAR("item 1",plain) // BLOCK-ENTRY // SCALAR("item 2",plain) // BLOCK-ENTRY // BLOCK-SEQUENCE-START // BLOCK-ENTRY // SCALAR("item 3.1",plain) // BLOCK-ENTRY // SCALAR("item 3.2",plain) // BLOCK-END // BLOCK-ENTRY // BLOCK-MAPPING-START // KEY // SCALAR("key 1",plain) // VALUE // SCALAR("value 1",plain) // KEY // SCALAR("key 2",plain) // VALUE // SCALAR("value 2",plain) // BLOCK-END // BLOCK-END // STREAM-END // // 2. Block mappings: // // a simple key: a value # The KEY token is produced here. // ? a complex key // : another value // a mapping: // key 1: value 1 // key 2: value 2 // a sequence: // - item 1 // - item 2 // // Tokens: // // STREAM-START(utf-8) // BLOCK-MAPPING-START // KEY // SCALAR("a simple key",plain) // VALUE // SCALAR("a value",plain) // KEY // SCALAR("a complex key",plain) // VALUE // SCALAR("another value",plain) // KEY // SCALAR("a mapping",plain) // BLOCK-MAPPING-START // KEY // SCALAR("key 1",plain) // VALUE // SCALAR("value 1",plain) // KEY // SCALAR("key 2",plain) // VALUE // SCALAR("value 2",plain) // BLOCK-END // KEY // SCALAR("a sequence",plain) // VALUE // BLOCK-SEQUENCE-START // BLOCK-ENTRY // SCALAR("item 1",plain) // BLOCK-ENTRY // SCALAR("item 2",plain) // BLOCK-END // BLOCK-END // STREAM-END // // YAML does not always require to start a new block collection from a new // line. If the current line contains only '-', '?', and ':' indicators, a new // block collection may start at the current line. The following examples // illustrate this case: // // 1. Collections in a sequence: // // - - item 1 // - item 2 // - key 1: value 1 // key 2: value 2 // - ? complex key // : complex value // // Tokens: // // STREAM-START(utf-8) // BLOCK-SEQUENCE-START // BLOCK-ENTRY // BLOCK-SEQUENCE-START // BLOCK-ENTRY // SCALAR("item 1",plain) // BLOCK-ENTRY // SCALAR("item 2",plain) // BLOCK-END // BLOCK-ENTRY // BLOCK-MAPPING-START // KEY // SCALAR("key 1",plain) // VALUE // SCALAR("value 1",plain) // KEY // SCALAR("key 2",plain) // VALUE // SCALAR("value 2",plain) // BLOCK-END // BLOCK-ENTRY // BLOCK-MAPPING-START // KEY // SCALAR("complex key") // VALUE // SCALAR("complex value") // BLOCK-END // BLOCK-END // STREAM-END // // 2. Collections in a mapping: // // ? a sequence // : - item 1 // - item 2 // ? a mapping // : key 1: value 1 // key 2: value 2 // // Tokens: // // STREAM-START(utf-8) // BLOCK-MAPPING-START // KEY // SCALAR("a sequence",plain) // VALUE // BLOCK-SEQUENCE-START // BLOCK-ENTRY // SCALAR("item 1",plain) // BLOCK-ENTRY // SCALAR("item 2",plain) // BLOCK-END // KEY // SCALAR("a mapping",plain) // VALUE // BLOCK-MAPPING-START // KEY // SCALAR("key 1",plain) // VALUE // SCALAR("value 1",plain) // KEY // SCALAR("key 2",plain) // VALUE // SCALAR("value 2",plain) // BLOCK-END // BLOCK-END // STREAM-END // // YAML also permits non-indented sequences if they are included into a block // mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: // // key: // - item 1 # BLOCK-SEQUENCE-START is NOT produced here. // - item 2 // // Tokens: // // STREAM-START(utf-8) // BLOCK-MAPPING-START // KEY // SCALAR("key",plain) // VALUE // BLOCK-ENTRY // SCALAR("item 1",plain) // BLOCK-ENTRY // SCALAR("item 2",plain) // BLOCK-END // // Ensure that the buffer contains the required number of characters. // Return true on success, false on failure (reader error or memory error). func cache(parser *yaml_parser_t, length int) bool { // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) return parser.unread >= length || yaml_parser_update_buffer(parser, length) } // Advance the buffer pointer. func skip(parser *yaml_parser_t) { parser.mark.index++ parser.mark.column++ parser.unread-- parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) } func skip_line(parser *yaml_parser_t) { if is_crlf(parser.buffer, parser.buffer_pos) { parser.mark.index += 2 parser.mark.column = 0 parser.mark.line++ parser.unread -= 2 parser.buffer_pos += 2 } else if is_break(parser.buffer, parser.buffer_pos) { parser.mark.index++ parser.mark.column = 0 parser.mark.line++ parser.unread-- parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) } } // Copy a character to a string buffer and advance pointers. func read(parser *yaml_parser_t, s []byte) []byte { w := width(parser.buffer[parser.buffer_pos]) if w == 0 { panic("invalid character sequence") } if len(s) == 0 { s = make([]byte, 0, 32) } if w == 1 && len(s)+w <= cap(s) { s = s[:len(s)+1] s[len(s)-1] = parser.buffer[parser.buffer_pos] parser.buffer_pos++ } else { s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) parser.buffer_pos += w } parser.mark.index++ parser.mark.column++ parser.unread-- return s } // Copy a line break character to a string buffer and advance pointers. func read_line(parser *yaml_parser_t, s []byte) []byte { buf := parser.buffer pos := parser.buffer_pos switch { case buf[pos] == '\r' && buf[pos+1] == '\n': // CR LF . LF s = append(s, '\n') parser.buffer_pos += 2 parser.mark.index++ parser.unread-- case buf[pos] == '\r' || buf[pos] == '\n': // CR|LF . LF s = append(s, '\n') parser.buffer_pos += 1 case buf[pos] == '\xC2' && buf[pos+1] == '\x85': // NEL . LF s = append(s, '\n') parser.buffer_pos += 2 case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): // LS|PS . LS|PS s = append(s, buf[parser.buffer_pos:pos+3]...) parser.buffer_pos += 3 default: return s } parser.mark.index++ parser.mark.column = 0 parser.mark.line++ parser.unread-- return s } // Get the next token. func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { // Erase the token object. *token = yaml_token_t{} // [Go] Is this necessary? // No tokens after STREAM-END or error. if parser.stream_end_produced || parser.error != yaml_NO_ERROR { return true } // Ensure that the tokens queue contains enough tokens. if !parser.token_available { if !yaml_parser_fetch_more_tokens(parser) { return false } } // Fetch the next token from the queue. *token = parser.tokens[parser.tokens_head] parser.tokens_head++ parser.tokens_parsed++ parser.token_available = false if token.typ == yaml_STREAM_END_TOKEN { parser.stream_end_produced = true } return true } // Set the scanner error and return false. func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { parser.error = yaml_SCANNER_ERROR parser.context = context parser.context_mark = context_mark parser.problem = problem parser.problem_mark = parser.mark return false } func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { context := "while parsing a tag" if directive { context = "while parsing a %TAG directive" } return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") } func trace(args ...interface{}) func() { pargs := append([]interface{}{"+++"}, args...) fmt.Println(pargs...) pargs = append([]interface{}{"---"}, args...) return func() { fmt.Println(pargs...) } } // Ensure that the tokens queue contains at least one token which can be // returned to the Parser. func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { // While we need more tokens to fetch, do it. for { // Check if we really need to fetch more tokens. need_more_tokens := false if parser.tokens_head == len(parser.tokens) { // Queue is empty. need_more_tokens = true } else { // Check if any potential simple key may occupy the head position. if !yaml_parser_stale_simple_keys(parser) { return false } for i := range parser.simple_keys { simple_key := &parser.simple_keys[i] if simple_key.possible && simple_key.token_number == parser.tokens_parsed { need_more_tokens = true break } } } // We are finished. if !need_more_tokens { break } // Fetch the next token. if !yaml_parser_fetch_next_token(parser) { return false } } parser.token_available = true return true } // The dispatcher for token fetchers. func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { // Ensure that the buffer is initialized. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } // Check if we just started scanning. Fetch STREAM-START then. if !parser.stream_start_produced { return yaml_parser_fetch_stream_start(parser) } // Eat whitespaces and comments until we reach the next token. if !yaml_parser_scan_to_next_token(parser) { return false } // Remove obsolete potential simple keys. if !yaml_parser_stale_simple_keys(parser) { return false } // Check the indentation level against the current column. if !yaml_parser_unroll_indent(parser, parser.mark.column) { return false } // Ensure that the buffer contains at least 4 characters. 4 is the length // of the longest indicators ('--- ' and '... '). if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { return false } // Is it the end of the stream? if is_z(parser.buffer, parser.buffer_pos) { return yaml_parser_fetch_stream_end(parser) } // Is it a directive? if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { return yaml_parser_fetch_directive(parser) } buf := parser.buffer pos := parser.buffer_pos // Is it the document start indicator? if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) } // Is it the document end indicator? if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) } // Is it the flow sequence start indicator? if buf[pos] == '[' { return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) } // Is it the flow mapping start indicator? if parser.buffer[parser.buffer_pos] == '{' { return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) } // Is it the flow sequence end indicator? if parser.buffer[parser.buffer_pos] == ']' { return yaml_parser_fetch_flow_collection_end(parser, yaml_FLOW_SEQUENCE_END_TOKEN) } // Is it the flow mapping end indicator? if parser.buffer[parser.buffer_pos] == '}' { return yaml_parser_fetch_flow_collection_end(parser, yaml_FLOW_MAPPING_END_TOKEN) } // Is it the flow entry indicator? if parser.buffer[parser.buffer_pos] == ',' { return yaml_parser_fetch_flow_entry(parser) } // Is it the block entry indicator? if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { return yaml_parser_fetch_block_entry(parser) } // Is it the key indicator? if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { return yaml_parser_fetch_key(parser) } // Is it the value indicator? if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { return yaml_parser_fetch_value(parser) } // Is it an alias? if parser.buffer[parser.buffer_pos] == '*' { return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) } // Is it an anchor? if parser.buffer[parser.buffer_pos] == '&' { return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) } // Is it a tag? if parser.buffer[parser.buffer_pos] == '!' { return yaml_parser_fetch_tag(parser) } // Is it a literal scalar? if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { return yaml_parser_fetch_block_scalar(parser, true) } // Is it a folded scalar? if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { return yaml_parser_fetch_block_scalar(parser, false) } // Is it a single-quoted scalar? if parser.buffer[parser.buffer_pos] == '\'' { return yaml_parser_fetch_flow_scalar(parser, true) } // Is it a double-quoted scalar? if parser.buffer[parser.buffer_pos] == '"' { return yaml_parser_fetch_flow_scalar(parser, false) } // Is it a plain scalar? // // A plain scalar may start with any non-blank characters except // // '-', '?', ':', ',', '[', ']', '{', '}', // '#', '&', '*', '!', '|', '>', '\'', '\"', // '%', '@', '`'. // // In the block context (and, for the '-' indicator, in the flow context // too), it may also start with the characters // // '-', '?', ':' // // if it is followed by a non-space character. // // The last rule is more restrictive than the specification requires. // [Go] Make this logic more reasonable. //switch parser.buffer[parser.buffer_pos] { //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': //} if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || (parser.flow_level == 0 && (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && !is_blankz(parser.buffer, parser.buffer_pos+1)) { return yaml_parser_fetch_plain_scalar(parser) } // If we don't determine the token type so far, it is an error. return yaml_parser_set_scanner_error(parser, "while scanning for the next token", parser.mark, "found character that cannot start any token") } // Check the list of potential simple keys and remove the positions that // cannot contain simple keys anymore. func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { // Check for a potential simple key for each flow level. for i := range parser.simple_keys { simple_key := &parser.simple_keys[i] // The specification requires that a simple key // // - is limited to a single line, // - is shorter than 1024 characters. if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { // Check if the potential simple key to be removed is required. if simple_key.required { return yaml_parser_set_scanner_error(parser, "while scanning a simple key", simple_key.mark, "could not find expected ':'") } simple_key.possible = false } } return true } // Check if a simple key may start at the current position and add it if // needed. func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { // A simple key is required at the current position if the scanner is in // the block context and the current column coincides with the indentation // level. required := parser.flow_level == 0 && parser.indent == parser.mark.column // A simple key is required only when it is the first token in the current // line. Therefore it is always allowed. But we add a check anyway. if required && !parser.simple_key_allowed { panic("should not happen") } // // If the current position may start a simple key, save it. // if parser.simple_key_allowed { simple_key := yaml_simple_key_t{ possible: true, required: required, token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), } simple_key.mark = parser.mark if !yaml_parser_remove_simple_key(parser) { return false } parser.simple_keys[len(parser.simple_keys)-1] = simple_key } return true } // Remove a potential simple key at the current flow level. func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { i := len(parser.simple_keys) - 1 if parser.simple_keys[i].possible { // If the key is required, it is an error. if parser.simple_keys[i].required { return yaml_parser_set_scanner_error(parser, "while scanning a simple key", parser.simple_keys[i].mark, "could not find expected ':'") } } // Remove the key from the stack. parser.simple_keys[i].possible = false return true } // Increase the flow level and resize the simple key list if needed. func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { // Reset the simple key on the next level. parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) // Increase the flow level. parser.flow_level++ return true } // Decrease the flow level. func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { if parser.flow_level > 0 { parser.flow_level-- parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] } return true } // Push the current indentation level to the stack and set the new level // the current column is greater than the indentation level. In this case, // append or insert the specified token into the token queue. func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { // In the flow context, do nothing. if parser.flow_level > 0 { return true } if parser.indent < column { // Push the current indentation level to the stack and set the new // indentation level. parser.indents = append(parser.indents, parser.indent) parser.indent = column // Create a token and insert it into the queue. token := yaml_token_t{ typ: typ, start_mark: mark, end_mark: mark, } if number > -1 { number -= parser.tokens_parsed } yaml_insert_token(parser, number, &token) } return true } // Pop indentation levels from the indents stack until the current level // becomes less or equal to the column. For each indentation level, append // the BLOCK-END token. func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { // In the flow context, do nothing. if parser.flow_level > 0 { return true } // Loop through the indentation levels in the stack. for parser.indent > column { // Create a token and append it to the queue. token := yaml_token_t{ typ: yaml_BLOCK_END_TOKEN, start_mark: parser.mark, end_mark: parser.mark, } yaml_insert_token(parser, -1, &token) // Pop the indentation level. parser.indent = parser.indents[len(parser.indents)-1] parser.indents = parser.indents[:len(parser.indents)-1] } return true } // Initialize the scanner and produce the STREAM-START token. func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { // Set the initial indentation. parser.indent = -1 // Initialize the simple key stack. parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) // A simple key is allowed at the beginning of the stream. parser.simple_key_allowed = true // We have started. parser.stream_start_produced = true // Create the STREAM-START token and append it to the queue. token := yaml_token_t{ typ: yaml_STREAM_START_TOKEN, start_mark: parser.mark, end_mark: parser.mark, encoding: parser.encoding, } yaml_insert_token(parser, -1, &token) return true } // Produce the STREAM-END token and shut down the scanner. func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { // Force new line. if parser.mark.column != 0 { parser.mark.column = 0 parser.mark.line++ } // Reset the indentation level. if !yaml_parser_unroll_indent(parser, -1) { return false } // Reset simple keys. if !yaml_parser_remove_simple_key(parser) { return false } parser.simple_key_allowed = false // Create the STREAM-END token and append it to the queue. token := yaml_token_t{ typ: yaml_STREAM_END_TOKEN, start_mark: parser.mark, end_mark: parser.mark, } yaml_insert_token(parser, -1, &token) return true } // Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { // Reset the indentation level. if !yaml_parser_unroll_indent(parser, -1) { return false } // Reset simple keys. if !yaml_parser_remove_simple_key(parser) { return false } parser.simple_key_allowed = false // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. token := yaml_token_t{} if !yaml_parser_scan_directive(parser, &token) { return false } // Append the token to the queue. yaml_insert_token(parser, -1, &token) return true } // Produce the DOCUMENT-START or DOCUMENT-END token. func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { // Reset the indentation level. if !yaml_parser_unroll_indent(parser, -1) { return false } // Reset simple keys. if !yaml_parser_remove_simple_key(parser) { return false } parser.simple_key_allowed = false // Consume the token. start_mark := parser.mark skip(parser) skip(parser) skip(parser) end_mark := parser.mark // Create the DOCUMENT-START or DOCUMENT-END token. token := yaml_token_t{ typ: typ, start_mark: start_mark, end_mark: end_mark, } // Append the token to the queue. yaml_insert_token(parser, -1, &token) return true } // Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { // The indicators '[' and '{' may start a simple key. if !yaml_parser_save_simple_key(parser) { return false } // Increase the flow level. if !yaml_parser_increase_flow_level(parser) { return false } // A simple key may follow the indicators '[' and '{'. parser.simple_key_allowed = true // Consume the token. start_mark := parser.mark skip(parser) end_mark := parser.mark // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. token := yaml_token_t{ typ: typ, start_mark: start_mark, end_mark: end_mark, } // Append the token to the queue. yaml_insert_token(parser, -1, &token) return true } // Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { // Reset any potential simple key on the current flow level. if !yaml_parser_remove_simple_key(parser) { return false } // Decrease the flow level. if !yaml_parser_decrease_flow_level(parser) { return false } // No simple keys after the indicators ']' and '}'. parser.simple_key_allowed = false // Consume the token. start_mark := parser.mark skip(parser) end_mark := parser.mark // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. token := yaml_token_t{ typ: typ, start_mark: start_mark, end_mark: end_mark, } // Append the token to the queue. yaml_insert_token(parser, -1, &token) return true } // Produce the FLOW-ENTRY token. func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { // Reset any potential simple keys on the current flow level. if !yaml_parser_remove_simple_key(parser) { return false } // Simple keys are allowed after ','. parser.simple_key_allowed = true // Consume the token. start_mark := parser.mark skip(parser) end_mark := parser.mark // Create the FLOW-ENTRY token and append it to the queue. token := yaml_token_t{ typ: yaml_FLOW_ENTRY_TOKEN, start_mark: start_mark, end_mark: end_mark, } yaml_insert_token(parser, -1, &token) return true } // Produce the BLOCK-ENTRY token. func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { // Check if the scanner is in the block context. if parser.flow_level == 0 { // Check if we are allowed to start a new entry. if !parser.simple_key_allowed { return yaml_parser_set_scanner_error(parser, "", parser.mark, "block sequence entries are not allowed in this context") } // Add the BLOCK-SEQUENCE-START token if needed. if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { return false } } else { // It is an error for the '-' indicator to occur in the flow context, // but we let the Parser detect and report about it because the Parser // is able to point to the context. } // Reset any potential simple keys on the current flow level. if !yaml_parser_remove_simple_key(parser) { return false } // Simple keys are allowed after '-'. parser.simple_key_allowed = true // Consume the token. start_mark := parser.mark skip(parser) end_mark := parser.mark // Create the BLOCK-ENTRY token and append it to the queue. token := yaml_token_t{ typ: yaml_BLOCK_ENTRY_TOKEN, start_mark: start_mark, end_mark: end_mark, } yaml_insert_token(parser, -1, &token) return true } // Produce the KEY token. func yaml_parser_fetch_key(parser *yaml_parser_t) bool { // In the block context, additional checks are required. if parser.flow_level == 0 { // Check if we are allowed to start a new key (not nessesary simple). if !parser.simple_key_allowed { return yaml_parser_set_scanner_error(parser, "", parser.mark, "mapping keys are not allowed in this context") } // Add the BLOCK-MAPPING-START token if needed. if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { return false } } // Reset any potential simple keys on the current flow level. if !yaml_parser_remove_simple_key(parser) { return false } // Simple keys are allowed after '?' in the block context. parser.simple_key_allowed = parser.flow_level == 0 // Consume the token. start_mark := parser.mark skip(parser) end_mark := parser.mark // Create the KEY token and append it to the queue. token := yaml_token_t{ typ: yaml_KEY_TOKEN, start_mark: start_mark, end_mark: end_mark, } yaml_insert_token(parser, -1, &token) return true } // Produce the VALUE token. func yaml_parser_fetch_value(parser *yaml_parser_t) bool { simple_key := &parser.simple_keys[len(parser.simple_keys)-1] // Have we found a simple key? if simple_key.possible { // Create the KEY token and insert it into the queue. token := yaml_token_t{ typ: yaml_KEY_TOKEN, start_mark: simple_key.mark, end_mark: simple_key.mark, } yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) // In the block context, we may need to add the BLOCK-MAPPING-START token. if !yaml_parser_roll_indent(parser, simple_key.mark.column, simple_key.token_number, yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { return false } // Remove the simple key. simple_key.possible = false // A simple key cannot follow another simple key. parser.simple_key_allowed = false } else { // The ':' indicator follows a complex key. // In the block context, extra checks are required. if parser.flow_level == 0 { // Check if we are allowed to start a complex value. if !parser.simple_key_allowed { return yaml_parser_set_scanner_error(parser, "", parser.mark, "mapping values are not allowed in this context") } // Add the BLOCK-MAPPING-START token if needed. if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { return false } } // Simple keys after ':' are allowed in the block context. parser.simple_key_allowed = parser.flow_level == 0 } // Consume the token. start_mark := parser.mark skip(parser) end_mark := parser.mark // Create the VALUE token and append it to the queue. token := yaml_token_t{ typ: yaml_VALUE_TOKEN, start_mark: start_mark, end_mark: end_mark, } yaml_insert_token(parser, -1, &token) return true } // Produce the ALIAS or ANCHOR token. func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { // An anchor or an alias could be a simple key. if !yaml_parser_save_simple_key(parser) { return false } // A simple key cannot follow an anchor or an alias. parser.simple_key_allowed = false // Create the ALIAS or ANCHOR token and append it to the queue. var token yaml_token_t if !yaml_parser_scan_anchor(parser, &token, typ) { return false } yaml_insert_token(parser, -1, &token) return true } // Produce the TAG token. func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { // A tag could be a simple key. if !yaml_parser_save_simple_key(parser) { return false } // A simple key cannot follow a tag. parser.simple_key_allowed = false // Create the TAG token and append it to the queue. var token yaml_token_t if !yaml_parser_scan_tag(parser, &token) { return false } yaml_insert_token(parser, -1, &token) return true } // Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { // Remove any potential simple keys. if !yaml_parser_remove_simple_key(parser) { return false } // A simple key may follow a block scalar. parser.simple_key_allowed = true // Create the SCALAR token and append it to the queue. var token yaml_token_t if !yaml_parser_scan_block_scalar(parser, &token, literal) { return false } yaml_insert_token(parser, -1, &token) return true } // Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { // A plain scalar could be a simple key. if !yaml_parser_save_simple_key(parser) { return false } // A simple key cannot follow a flow scalar. parser.simple_key_allowed = false // Create the SCALAR token and append it to the queue. var token yaml_token_t if !yaml_parser_scan_flow_scalar(parser, &token, single) { return false } yaml_insert_token(parser, -1, &token) return true } // Produce the SCALAR(...,plain) token. func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { // A plain scalar could be a simple key. if !yaml_parser_save_simple_key(parser) { return false } // A simple key cannot follow a flow scalar. parser.simple_key_allowed = false // Create the SCALAR token and append it to the queue. var token yaml_token_t if !yaml_parser_scan_plain_scalar(parser, &token) { return false } yaml_insert_token(parser, -1, &token) return true } // Eat whitespaces and comments until the next token is found. func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { // Until the next token is not found. for { // Allow the BOM mark to start a line. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { skip(parser) } // Eat whitespaces. // Tabs are allowed: // - in the flow context // - in the block context, but not at the beginning of the line or // after '-', '?', or ':' (complex value). if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } // Eat a comment until a line break. if parser.buffer[parser.buffer_pos] == '#' { for !is_breakz(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } } // If it is a line break, eat it. if is_break(parser.buffer, parser.buffer_pos) { if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { return false } skip_line(parser) // In the block context, a new line may start a simple key. if parser.flow_level == 0 { parser.simple_key_allowed = true } } else { break // We have found a token. } } return true } // Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. // // Scope: // %YAML 1.1 # a comment \n // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ // %TAG !yaml! tag:yaml.org,2002: \n // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ // func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { // Eat '%'. start_mark := parser.mark skip(parser) // Scan the directive name. var name []byte if !yaml_parser_scan_directive_name(parser, start_mark, &name) { return false } // Is it a YAML directive? if bytes.Equal(name, []byte("YAML")) { // Scan the VERSION directive value. var major, minor int8 if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { return false } end_mark := parser.mark // Create a VERSION-DIRECTIVE token. *token = yaml_token_t{ typ: yaml_VERSION_DIRECTIVE_TOKEN, start_mark: start_mark, end_mark: end_mark, major: major, minor: minor, } // Is it a TAG directive? } else if bytes.Equal(name, []byte("TAG")) { // Scan the TAG directive value. var handle, prefix []byte if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { return false } end_mark := parser.mark // Create a TAG-DIRECTIVE token. *token = yaml_token_t{ typ: yaml_TAG_DIRECTIVE_TOKEN, start_mark: start_mark, end_mark: end_mark, value: handle, prefix: prefix, } // Unknown directive. } else { yaml_parser_set_scanner_error(parser, "while scanning a directive", start_mark, "found uknown directive name") return false } // Eat the rest of the line including any comments. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } for is_blank(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } if parser.buffer[parser.buffer_pos] == '#' { for !is_breakz(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } } // Check if we are at the end of the line. if !is_breakz(parser.buffer, parser.buffer_pos) { yaml_parser_set_scanner_error(parser, "while scanning a directive", start_mark, "did not find expected comment or line break") return false } // Eat a line break. if is_break(parser.buffer, parser.buffer_pos) { if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { return false } skip_line(parser) } return true } // Scan the directive name. // // Scope: // %YAML 1.1 # a comment \n // ^^^^ // %TAG !yaml! tag:yaml.org,2002: \n // ^^^ // func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { // Consume the directive name. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } var s []byte for is_alpha(parser.buffer, parser.buffer_pos) { s = read(parser, s) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } // Check if the name is empty. if len(s) == 0 { yaml_parser_set_scanner_error(parser, "while scanning a directive", start_mark, "could not find expected directive name") return false } // Check for an blank character after the name. if !is_blankz(parser.buffer, parser.buffer_pos) { yaml_parser_set_scanner_error(parser, "while scanning a directive", start_mark, "found unexpected non-alphabetical character") return false } *name = s return true } // Scan the value of VERSION-DIRECTIVE. // // Scope: // %YAML 1.1 # a comment \n // ^^^^^^ func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { // Eat whitespaces. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } for is_blank(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } // Consume the major version number. if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { return false } // Eat '.'. if parser.buffer[parser.buffer_pos] != '.' { return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", start_mark, "did not find expected digit or '.' character") } skip(parser) // Consume the minor version number. if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { return false } return true } const max_number_length = 2 // Scan the version number of VERSION-DIRECTIVE. // // Scope: // %YAML 1.1 # a comment \n // ^ // %YAML 1.1 # a comment \n // ^ func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { // Repeat while the next character is digit. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } var value, length int8 for is_digit(parser.buffer, parser.buffer_pos) { // Check if the number is too long. length++ if length > max_number_length { return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", start_mark, "found extremely long version number") } value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } // Check if the number was present. if length == 0 { return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", start_mark, "did not find expected version number") } *number = value return true } // Scan the value of a TAG-DIRECTIVE token. // // Scope: // %TAG !yaml! tag:yaml.org,2002: \n // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ // func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { var handle_value, prefix_value []byte // Eat whitespaces. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } for is_blank(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } // Scan a handle. if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { return false } // Expect a whitespace. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } if !is_blank(parser.buffer, parser.buffer_pos) { yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", start_mark, "did not find expected whitespace") return false } // Eat whitespaces. for is_blank(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } // Scan a prefix. if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { return false } // Expect a whitespace or line break. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } if !is_blankz(parser.buffer, parser.buffer_pos) { yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", start_mark, "did not find expected whitespace or line break") return false } *handle = handle_value *prefix = prefix_value return true } func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { var s []byte // Eat the indicator character. start_mark := parser.mark skip(parser) // Consume the value. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } for is_alpha(parser.buffer, parser.buffer_pos) { s = read(parser, s) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } end_mark := parser.mark /* * Check if length of the anchor is greater than 0 and it is followed by * a whitespace character or one of the indicators: * * '?', ':', ',', ']', '}', '%', '@', '`'. */ if len(s) == 0 || !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') { context := "while scanning an alias" if typ == yaml_ANCHOR_TOKEN { context = "while scanning an anchor" } yaml_parser_set_scanner_error(parser, context, start_mark, "did not find expected alphabetic or numeric character") return false } // Create a token. *token = yaml_token_t{ typ: typ, start_mark: start_mark, end_mark: end_mark, value: s, } return true } /* * Scan a TAG token. */ func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { var handle, suffix []byte start_mark := parser.mark // Check if the tag is in the canonical form. if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { return false } if parser.buffer[parser.buffer_pos+1] == '<' { // Keep the handle as '' // Eat '!<' skip(parser) skip(parser) // Consume the tag value. if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { return false } // Check for '>' and eat it. if parser.buffer[parser.buffer_pos] != '>' { yaml_parser_set_scanner_error(parser, "while scanning a tag", start_mark, "did not find the expected '>'") return false } skip(parser) } else { // The tag has either the '!suffix' or the '!handle!suffix' form. // First, try to scan a handle. if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { return false } // Check if it is, indeed, handle. if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { // Scan the suffix now. if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { return false } } else { // It wasn't a handle after all. Scan the rest of the tag. if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { return false } // Set the handle to '!'. handle = []byte{'!'} // A special case: the '!' tag. Set the handle to '' and the // suffix to '!'. if len(suffix) == 0 { handle, suffix = suffix, handle } } } // Check the character which ends the tag. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } if !is_blankz(parser.buffer, parser.buffer_pos) { yaml_parser_set_scanner_error(parser, "while scanning a tag", start_mark, "did not find expected whitespace or line break") return false } end_mark := parser.mark // Create a token. *token = yaml_token_t{ typ: yaml_TAG_TOKEN, start_mark: start_mark, end_mark: end_mark, value: handle, suffix: suffix, } return true } // Scan a tag handle. func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { // Check the initial '!' character. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } if parser.buffer[parser.buffer_pos] != '!' { yaml_parser_set_scanner_tag_error(parser, directive, start_mark, "did not find expected '!'") return false } var s []byte // Copy the '!' character. s = read(parser, s) // Copy all subsequent alphabetical and numerical characters. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } for is_alpha(parser.buffer, parser.buffer_pos) { s = read(parser, s) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } // Check if the trailing character is '!' and copy it. if parser.buffer[parser.buffer_pos] == '!' { s = read(parser, s) } else { // It's either the '!' tag or not really a tag handle. If it's a %TAG // directive, it's an error. If it's a tag token, it must be a part of URI. if directive && !(s[0] == '!' && s[1] == 0) { yaml_parser_set_scanner_tag_error(parser, directive, start_mark, "did not find expected '!'") return false } } *handle = s return true } // Scan a tag. func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { //size_t length = head ? strlen((char *)head) : 0 var s []byte // Copy the head if needed. // // Note that we don't copy the leading '!' character. if len(head) > 1 { s = append(s, head[1:]...) } // Scan the tag. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } // The set of characters that may appear in URI is as follows: // // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', // '%'. // [Go] Convert this into more reasonable logic. for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '%' { // Check if it is a URI-escape sequence. if parser.buffer[parser.buffer_pos] == '%' { if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { return false } } else { s = read(parser, s) } if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } // Check if the tag is non-empty. if len(s) == 0 { yaml_parser_set_scanner_tag_error(parser, directive, start_mark, "did not find expected tag URI") return false } *uri = s return true } // Decode an URI-escape sequence corresponding to a single UTF-8 character. func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { // Decode the required number of characters. w := 1024 for w > 0 { // Check for a URI-escaped octet. if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { return false } if !(parser.buffer[parser.buffer_pos] == '%' && is_hex(parser.buffer, parser.buffer_pos+1) && is_hex(parser.buffer, parser.buffer_pos+2)) { return yaml_parser_set_scanner_tag_error(parser, directive, start_mark, "did not find URI escaped octet") } // Get the octet. octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) // If it is the leading octet, determine the length of the UTF-8 sequence. if w == 1024 { w = width(octet) if w == 0 { return yaml_parser_set_scanner_tag_error(parser, directive, start_mark, "found an incorrect leading UTF-8 octet") } } else { // Check if the trailing octet is correct. if octet&0xC0 != 0x80 { return yaml_parser_set_scanner_tag_error(parser, directive, start_mark, "found an incorrect trailing UTF-8 octet") } } // Copy the octet and move the pointers. *s = append(*s, octet) skip(parser) skip(parser) skip(parser) w-- } return true } // Scan a block scalar. func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { // Eat the indicator '|' or '>'. start_mark := parser.mark skip(parser) // Scan the additional block scalar indicators. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } // Check for a chomping indicator. var chomping, increment int if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { // Set the chomping method and eat the indicator. if parser.buffer[parser.buffer_pos] == '+' { chomping = +1 } else { chomping = -1 } skip(parser) // Check for an indentation indicator. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } if is_digit(parser.buffer, parser.buffer_pos) { // Check that the indentation is greater than 0. if parser.buffer[parser.buffer_pos] == '0' { yaml_parser_set_scanner_error(parser, "while scanning a block scalar", start_mark, "found an indentation indicator equal to 0") return false } // Get the indentation level and eat the indicator. increment = as_digit(parser.buffer, parser.buffer_pos) skip(parser) } } else if is_digit(parser.buffer, parser.buffer_pos) { // Do the same as above, but in the opposite order. if parser.buffer[parser.buffer_pos] == '0' { yaml_parser_set_scanner_error(parser, "while scanning a block scalar", start_mark, "found an indentation indicator equal to 0") return false } increment = as_digit(parser.buffer, parser.buffer_pos) skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { if parser.buffer[parser.buffer_pos] == '+' { chomping = +1 } else { chomping = -1 } skip(parser) } } // Eat whitespaces and comments to the end of the line. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } for is_blank(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } if parser.buffer[parser.buffer_pos] == '#' { for !is_breakz(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } } // Check if we are at the end of the line. if !is_breakz(parser.buffer, parser.buffer_pos) { yaml_parser_set_scanner_error(parser, "while scanning a block scalar", start_mark, "did not find expected comment or line break") return false } // Eat a line break. if is_break(parser.buffer, parser.buffer_pos) { if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { return false } skip_line(parser) } end_mark := parser.mark // Set the indentation level if it was specified. var indent int if increment > 0 { if parser.indent >= 0 { indent = parser.indent + increment } else { indent = increment } } // Scan the leading line breaks and determine the indentation level if needed. var s, leading_break, trailing_breaks []byte if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { return false } // Scan the block scalar content. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } var leading_blank, trailing_blank bool for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { // We are at the beginning of a non-empty line. // Is it a trailing whitespace? trailing_blank = is_blank(parser.buffer, parser.buffer_pos) // Check if we need to fold the leading line break. if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { // Do we need to join the lines by space? if len(trailing_breaks) == 0 { s = append(s, ' ') } } else { s = append(s, leading_break...) } leading_break = leading_break[:0] // Append the remaining line breaks. s = append(s, trailing_breaks...) trailing_breaks = trailing_breaks[:0] // Is it a leading whitespace? leading_blank = is_blank(parser.buffer, parser.buffer_pos) // Consume the current line. for !is_breakz(parser.buffer, parser.buffer_pos) { s = read(parser, s) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } // Consume the line break. if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { return false } leading_break = read_line(parser, leading_break) // Eat the following indentation spaces and line breaks. if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { return false } } // Chomp the tail. if chomping != -1 { s = append(s, leading_break...) } if chomping == 1 { s = append(s, trailing_breaks...) } // Create a token. *token = yaml_token_t{ typ: yaml_SCALAR_TOKEN, start_mark: start_mark, end_mark: end_mark, value: s, style: yaml_LITERAL_SCALAR_STYLE, } if !literal { token.style = yaml_FOLDED_SCALAR_STYLE } return true } // Scan indentation spaces and line breaks for a block scalar. Determine the // indentation level if needed. func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { *end_mark = parser.mark // Eat the indentation spaces and line breaks. max_indent := 0 for { // Eat the indentation spaces. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } if parser.mark.column > max_indent { max_indent = parser.mark.column } // Check for a tab character messing the indentation. if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", start_mark, "found a tab character where an indentation space is expected") } // Have we found a non-empty line? if !is_break(parser.buffer, parser.buffer_pos) { break } // Consume the line break. if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { return false } // [Go] Should really be returning breaks instead. *breaks = read_line(parser, *breaks) *end_mark = parser.mark } // Determine the indentation level if needed. if *indent == 0 { *indent = max_indent if *indent < parser.indent+1 { *indent = parser.indent + 1 } if *indent < 1 { *indent = 1 } } return true } // Scan a quoted scalar. func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { // Eat the left quote. start_mark := parser.mark skip(parser) // Consume the content of the quoted scalar. var s, leading_break, trailing_breaks, whitespaces []byte for { // Check that there are no document indicators at the beginning of the line. if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { return false } if parser.mark.column == 0 && ((parser.buffer[parser.buffer_pos+0] == '-' && parser.buffer[parser.buffer_pos+1] == '-' && parser.buffer[parser.buffer_pos+2] == '-') || (parser.buffer[parser.buffer_pos+0] == '.' && parser.buffer[parser.buffer_pos+1] == '.' && parser.buffer[parser.buffer_pos+2] == '.')) && is_blankz(parser.buffer, parser.buffer_pos+3) { yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", start_mark, "found unexpected document indicator") return false } // Check for EOF. if is_z(parser.buffer, parser.buffer_pos) { yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", start_mark, "found unexpected end of stream") return false } // Consume non-blank characters. leading_blanks := false for !is_blankz(parser.buffer, parser.buffer_pos) { if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { // Is is an escaped single quote. s = append(s, '\'') skip(parser) skip(parser) } else if single && parser.buffer[parser.buffer_pos] == '\'' { // It is a right single quote. break } else if !single && parser.buffer[parser.buffer_pos] == '"' { // It is a right double quote. break } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { // It is an escaped line break. if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { return false } skip(parser) skip_line(parser) leading_blanks = true break } else if !single && parser.buffer[parser.buffer_pos] == '\\' { // It is an escape sequence. code_length := 0 // Check the escape character. switch parser.buffer[parser.buffer_pos+1] { case '0': s = append(s, 0) case 'a': s = append(s, '\x07') case 'b': s = append(s, '\x08') case 't', '\t': s = append(s, '\x09') case 'n': s = append(s, '\x0A') case 'v': s = append(s, '\x0B') case 'f': s = append(s, '\x0C') case 'r': s = append(s, '\x0D') case 'e': s = append(s, '\x1B') case ' ': s = append(s, '\x20') case '"': s = append(s, '"') case '\'': s = append(s, '\'') case '\\': s = append(s, '\\') case 'N': // NEL (#x85) s = append(s, '\xC2') s = append(s, '\x85') case '_': // #xA0 s = append(s, '\xC2') s = append(s, '\xA0') case 'L': // LS (#x2028) s = append(s, '\xE2') s = append(s, '\x80') s = append(s, '\xA8') case 'P': // PS (#x2029) s = append(s, '\xE2') s = append(s, '\x80') s = append(s, '\xA9') case 'x': code_length = 2 case 'u': code_length = 4 case 'U': code_length = 8 default: yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", start_mark, "found unknown escape character") return false } skip(parser) skip(parser) // Consume an arbitrary escape code. if code_length > 0 { var value int // Scan the character value. if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { return false } for k := 0; k < code_length; k++ { if !is_hex(parser.buffer, parser.buffer_pos+k) { yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", start_mark, "did not find expected hexdecimal number") return false } value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) } // Check the value and write the character. if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", start_mark, "found invalid Unicode character escape code") return false } if value <= 0x7F { s = append(s, byte(value)) } else if value <= 0x7FF { s = append(s, byte(0xC0+(value>>6))) s = append(s, byte(0x80+(value&0x3F))) } else if value <= 0xFFFF { s = append(s, byte(0xE0+(value>>12))) s = append(s, byte(0x80+((value>>6)&0x3F))) s = append(s, byte(0x80+(value&0x3F))) } else { s = append(s, byte(0xF0+(value>>18))) s = append(s, byte(0x80+((value>>12)&0x3F))) s = append(s, byte(0x80+((value>>6)&0x3F))) s = append(s, byte(0x80+(value&0x3F))) } // Advance the pointer. for k := 0; k < code_length; k++ { skip(parser) } } } else { // It is a non-escaped non-blank character. s = read(parser, s) } if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { return false } } // Check if we are at the end of the scalar. if single { if parser.buffer[parser.buffer_pos] == '\'' { break } } else { if parser.buffer[parser.buffer_pos] == '"' { break } } // Consume blank characters. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { if is_blank(parser.buffer, parser.buffer_pos) { // Consume a space or a tab character. if !leading_blanks { whitespaces = read(parser, whitespaces) } else { skip(parser) } } else { if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { return false } // Check if it is a first line break. if !leading_blanks { whitespaces = whitespaces[:0] leading_break = read_line(parser, leading_break) leading_blanks = true } else { trailing_breaks = read_line(parser, trailing_breaks) } } if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } // Join the whitespaces or fold line breaks. if leading_blanks { // Do we need to fold line breaks? if len(leading_break) > 0 && leading_break[0] == '\n' { if len(trailing_breaks) == 0 { s = append(s, ' ') } else { s = append(s, trailing_breaks...) } } else { s = append(s, leading_break...) s = append(s, trailing_breaks...) } trailing_breaks = trailing_breaks[:0] leading_break = leading_break[:0] } else { s = append(s, whitespaces...) whitespaces = whitespaces[:0] } } // Eat the right quote. skip(parser) end_mark := parser.mark // Create a token. *token = yaml_token_t{ typ: yaml_SCALAR_TOKEN, start_mark: start_mark, end_mark: end_mark, value: s, style: yaml_SINGLE_QUOTED_SCALAR_STYLE, } if !single { token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE } return true } // Scan a plain scalar. func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { var s, leading_break, trailing_breaks, whitespaces []byte var leading_blanks bool var indent = parser.indent + 1 start_mark := parser.mark end_mark := parser.mark // Consume the content of the plain scalar. for { // Check for a document indicator. if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { return false } if parser.mark.column == 0 && ((parser.buffer[parser.buffer_pos+0] == '-' && parser.buffer[parser.buffer_pos+1] == '-' && parser.buffer[parser.buffer_pos+2] == '-') || (parser.buffer[parser.buffer_pos+0] == '.' && parser.buffer[parser.buffer_pos+1] == '.' && parser.buffer[parser.buffer_pos+2] == '.')) && is_blankz(parser.buffer, parser.buffer_pos+3) { break } // Check for a comment. if parser.buffer[parser.buffer_pos] == '#' { break } // Consume non-blank characters. for !is_blankz(parser.buffer, parser.buffer_pos) { // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". if parser.flow_level > 0 && parser.buffer[parser.buffer_pos] == ':' && !is_blankz(parser.buffer, parser.buffer_pos+1) { yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", start_mark, "found unexpected ':'") return false } // Check for indicators that may end a plain scalar. if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || (parser.flow_level > 0 && (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || parser.buffer[parser.buffer_pos] == '}')) { break } // Check if we need to join whitespaces and breaks. if leading_blanks || len(whitespaces) > 0 { if leading_blanks { // Do we need to fold line breaks? if leading_break[0] == '\n' { if len(trailing_breaks) == 0 { s = append(s, ' ') } else { s = append(s, trailing_breaks...) } } else { s = append(s, leading_break...) s = append(s, trailing_breaks...) } trailing_breaks = trailing_breaks[:0] leading_break = leading_break[:0] leading_blanks = false } else { s = append(s, whitespaces...) whitespaces = whitespaces[:0] } } // Copy the character. s = read(parser, s) end_mark = parser.mark if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { return false } } // Is it the end? if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { break } // Consume blank characters. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { if is_blank(parser.buffer, parser.buffer_pos) { // Check for tab character that abuse indentation. if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", start_mark, "found a tab character that violate indentation") return false } // Consume a space or a tab character. if !leading_blanks { whitespaces = read(parser, whitespaces) } else { skip(parser) } } else { if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { return false } // Check if it is a first line break. if !leading_blanks { whitespaces = whitespaces[:0] leading_break = read_line(parser, leading_break) leading_blanks = true } else { trailing_breaks = read_line(parser, trailing_breaks) } } if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } } // Check indentation level. if parser.flow_level == 0 && parser.mark.column < indent { break } } // Create a token. *token = yaml_token_t{ typ: yaml_SCALAR_TOKEN, start_mark: start_mark, end_mark: end_mark, value: s, style: yaml_PLAIN_SCALAR_STYLE, } // Note that we change the 'simple_key_allowed' flag. if leading_blanks { parser.simple_key_allowed = true } return true } charm-2.1.1/src/gopkg.in/yaml.v2/decode_test.go0000664000175000017500000005147312672604537020225 0ustar marcomarcopackage yaml_test import ( "errors" . "gopkg.in/check.v1" "gopkg.in/yaml.v2" "math" "net" "reflect" "strings" "time" ) var unmarshalIntTest = 123 var unmarshalTests = []struct { data string value interface{} }{ { "", &struct{}{}, }, { "{}", &struct{}{}, }, { "v: hi", map[string]string{"v": "hi"}, }, { "v: hi", map[string]interface{}{"v": "hi"}, }, { "v: true", map[string]string{"v": "true"}, }, { "v: true", map[string]interface{}{"v": true}, }, { "v: 10", map[string]interface{}{"v": 10}, }, { "v: 0b10", map[string]interface{}{"v": 2}, }, { "v: 0xA", map[string]interface{}{"v": 10}, }, { "v: 4294967296", map[string]int64{"v": 4294967296}, }, { "v: 0.1", map[string]interface{}{"v": 0.1}, }, { "v: .1", map[string]interface{}{"v": 0.1}, }, { "v: .Inf", map[string]interface{}{"v": math.Inf(+1)}, }, { "v: -.Inf", map[string]interface{}{"v": math.Inf(-1)}, }, { "v: -10", map[string]interface{}{"v": -10}, }, { "v: -.1", map[string]interface{}{"v": -0.1}, }, // Simple values. { "123", &unmarshalIntTest, }, // Floats from spec { "canonical: 6.8523e+5", map[string]interface{}{"canonical": 6.8523e+5}, }, { "expo: 685.230_15e+03", map[string]interface{}{"expo": 685.23015e+03}, }, { "fixed: 685_230.15", map[string]interface{}{"fixed": 685230.15}, }, { "neginf: -.inf", map[string]interface{}{"neginf": math.Inf(-1)}, }, { "fixed: 685_230.15", map[string]float64{"fixed": 685230.15}, }, //{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported //{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails. // Bools from spec { "canonical: y", map[string]interface{}{"canonical": true}, }, { "answer: NO", map[string]interface{}{"answer": false}, }, { "logical: True", map[string]interface{}{"logical": true}, }, { "option: on", map[string]interface{}{"option": true}, }, { "option: on", map[string]bool{"option": true}, }, // Ints from spec { "canonical: 685230", map[string]interface{}{"canonical": 685230}, }, { "decimal: +685_230", map[string]interface{}{"decimal": 685230}, }, { "octal: 02472256", map[string]interface{}{"octal": 685230}, }, { "hexa: 0x_0A_74_AE", map[string]interface{}{"hexa": 685230}, }, { "bin: 0b1010_0111_0100_1010_1110", map[string]interface{}{"bin": 685230}, }, { "bin: -0b101010", map[string]interface{}{"bin": -42}, }, { "decimal: +685_230", map[string]int{"decimal": 685230}, }, //{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported // Nulls from spec { "empty:", map[string]interface{}{"empty": nil}, }, { "canonical: ~", map[string]interface{}{"canonical": nil}, }, { "english: null", map[string]interface{}{"english": nil}, }, { "~: null key", map[interface{}]string{nil: "null key"}, }, { "empty:", map[string]*bool{"empty": nil}, }, // Flow sequence { "seq: [A,B]", map[string]interface{}{"seq": []interface{}{"A", "B"}}, }, { "seq: [A,B,C,]", map[string][]string{"seq": []string{"A", "B", "C"}}, }, { "seq: [A,1,C]", map[string][]string{"seq": []string{"A", "1", "C"}}, }, { "seq: [A,1,C]", map[string][]int{"seq": []int{1}}, }, { "seq: [A,1,C]", map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, }, // Block sequence { "seq:\n - A\n - B", map[string]interface{}{"seq": []interface{}{"A", "B"}}, }, { "seq:\n - A\n - B\n - C", map[string][]string{"seq": []string{"A", "B", "C"}}, }, { "seq:\n - A\n - 1\n - C", map[string][]string{"seq": []string{"A", "1", "C"}}, }, { "seq:\n - A\n - 1\n - C", map[string][]int{"seq": []int{1}}, }, { "seq:\n - A\n - 1\n - C", map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, }, // Literal block scalar { "scalar: | # Comment\n\n literal\n\n \ttext\n\n", map[string]string{"scalar": "\nliteral\n\n\ttext\n"}, }, // Folded block scalar { "scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n", map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"}, }, // Map inside interface with no type hints. { "a: {b: c}", map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, }, // Structs and type conversions. { "hello: world", &struct{ Hello string }{"world"}, }, { "a: {b: c}", &struct{ A struct{ B string } }{struct{ B string }{"c"}}, }, { "a: {b: c}", &struct{ A *struct{ B string } }{&struct{ B string }{"c"}}, }, { "a: {b: c}", &struct{ A map[string]string }{map[string]string{"b": "c"}}, }, { "a: {b: c}", &struct{ A *map[string]string }{&map[string]string{"b": "c"}}, }, { "a:", &struct{ A map[string]string }{}, }, { "a: 1", &struct{ A int }{1}, }, { "a: 1", &struct{ A float64 }{1}, }, { "a: 1.0", &struct{ A int }{1}, }, { "a: 1.0", &struct{ A uint }{1}, }, { "a: [1, 2]", &struct{ A []int }{[]int{1, 2}}, }, { "a: 1", &struct{ B int }{0}, }, { "a: 1", &struct { B int "a" }{1}, }, { "a: y", &struct{ A bool }{true}, }, // Some cross type conversions { "v: 42", map[string]uint{"v": 42}, }, { "v: -42", map[string]uint{}, }, { "v: 4294967296", map[string]uint64{"v": 4294967296}, }, { "v: -4294967296", map[string]uint64{}, }, // int { "int_max: 2147483647", map[string]int{"int_max": math.MaxInt32}, }, { "int_min: -2147483648", map[string]int{"int_min": math.MinInt32}, }, { "int_overflow: 9223372036854775808", // math.MaxInt64 + 1 map[string]int{}, }, // int64 { "int64_max: 9223372036854775807", map[string]int64{"int64_max": math.MaxInt64}, }, { "int64_max_base2: 0b111111111111111111111111111111111111111111111111111111111111111", map[string]int64{"int64_max_base2": math.MaxInt64}, }, { "int64_min: -9223372036854775808", map[string]int64{"int64_min": math.MinInt64}, }, { "int64_neg_base2: -0b111111111111111111111111111111111111111111111111111111111111111", map[string]int64{"int64_neg_base2": -math.MaxInt64}, }, { "int64_overflow: 9223372036854775808", // math.MaxInt64 + 1 map[string]int64{}, }, // uint { "uint_min: 0", map[string]uint{"uint_min": 0}, }, { "uint_max: 4294967295", map[string]uint{"uint_max": math.MaxUint32}, }, { "uint_underflow: -1", map[string]uint{}, }, // uint64 { "uint64_min: 0", map[string]uint{"uint64_min": 0}, }, { "uint64_max: 18446744073709551615", map[string]uint64{"uint64_max": math.MaxUint64}, }, { "uint64_max_base2: 0b1111111111111111111111111111111111111111111111111111111111111111", map[string]uint64{"uint64_max_base2": math.MaxUint64}, }, { "uint64_maxint64: 9223372036854775807", map[string]uint64{"uint64_maxint64": math.MaxInt64}, }, { "uint64_underflow: -1", map[string]uint64{}, }, // float32 { "float32_max: 3.40282346638528859811704183484516925440e+38", map[string]float32{"float32_max": math.MaxFloat32}, }, { "float32_nonzero: 1.401298464324817070923729583289916131280e-45", map[string]float32{"float32_nonzero": math.SmallestNonzeroFloat32}, }, { "float32_maxuint64: 18446744073709551615", map[string]float32{"float32_maxuint64": float32(math.MaxUint64)}, }, { "float32_maxuint64+1: 18446744073709551616", map[string]float32{"float32_maxuint64+1": float32(math.MaxUint64 + 1)}, }, // float64 { "float64_max: 1.797693134862315708145274237317043567981e+308", map[string]float64{"float64_max": math.MaxFloat64}, }, { "float64_nonzero: 4.940656458412465441765687928682213723651e-324", map[string]float64{"float64_nonzero": math.SmallestNonzeroFloat64}, }, { "float64_maxuint64: 18446744073709551615", map[string]float64{"float64_maxuint64": float64(math.MaxUint64)}, }, { "float64_maxuint64+1: 18446744073709551616", map[string]float64{"float64_maxuint64+1": float64(math.MaxUint64 + 1)}, }, // Overflow cases. { "v: 4294967297", map[string]int32{}, }, { "v: 128", map[string]int8{}, }, // Quoted values. { "'1': '\"2\"'", map[interface{}]interface{}{"1": "\"2\""}, }, { "v:\n- A\n- 'B\n\n C'\n", map[string][]string{"v": []string{"A", "B\nC"}}, }, // Explicit tags. { "v: !!float '1.1'", map[string]interface{}{"v": 1.1}, }, { "v: !!null ''", map[string]interface{}{"v": nil}, }, { "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'", map[string]interface{}{"v": 1}, }, // Anchors and aliases. { "a: &x 1\nb: &y 2\nc: *x\nd: *y\n", &struct{ A, B, C, D int }{1, 2, 1, 2}, }, { "a: &a {c: 1}\nb: *a", &struct { A, B struct { C int } }{struct{ C int }{1}, struct{ C int }{1}}, }, { "a: &a [1, 2]\nb: *a", &struct{ B []int }{[]int{1, 2}}, }, { "b: *a\na: &a {c: 1}", &struct { A, B struct { C int } }{struct{ C int }{1}, struct{ C int }{1}}, }, // Bug #1133337 { "foo: ''", map[string]*string{"foo": new(string)}, }, { "foo: null", map[string]string{"foo": ""}, }, { "foo: null", map[string]interface{}{"foo": nil}, }, // Ignored field { "a: 1\nb: 2\n", &struct { A int B int "-" }{1, 0}, }, // Bug #1191981 { "" + "%YAML 1.1\n" + "--- !!str\n" + `"Generic line break (no glyph)\n\` + "\n" + ` Generic line break (glyphed)\n\` + "\n" + ` Line separator\u2028\` + "\n" + ` Paragraph separator\u2029"` + "\n", "" + "Generic line break (no glyph)\n" + "Generic line break (glyphed)\n" + "Line separator\u2028Paragraph separator\u2029", }, // Struct inlining { "a: 1\nb: 2\nc: 3\n", &struct { A int C inlineB `yaml:",inline"` }{1, inlineB{2, inlineC{3}}}, }, // Map inlining { "a: 1\nb: 2\nc: 3\n", &struct { A int C map[string]int `yaml:",inline"` }{1, map[string]int{"b": 2, "c": 3}}, }, // bug 1243827 { "a: -b_c", map[string]interface{}{"a": "-b_c"}, }, { "a: +b_c", map[string]interface{}{"a": "+b_c"}, }, { "a: 50cent_of_dollar", map[string]interface{}{"a": "50cent_of_dollar"}, }, // Duration { "a: 3s", map[string]time.Duration{"a": 3 * time.Second}, }, // Issue #24. { "a: ", map[string]string{"a": ""}, }, // Base 60 floats are obsolete and unsupported. { "a: 1:1\n", map[string]string{"a": "1:1"}, }, // Binary data. { "a: !!binary gIGC\n", map[string]string{"a": "\x80\x81\x82"}, }, { "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", map[string]string{"a": strings.Repeat("\x90", 54)}, }, { "a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n", map[string]string{"a": strings.Repeat("\x00", 52)}, }, // Ordered maps. { "{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}", &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, }, // Issue #39. { "a:\n b:\n c: d\n", map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}}, }, // Custom map type. { "a: {b: c}", M{"a": M{"b": "c"}}, }, // Support encoding.TextUnmarshaler. { "a: 1.2.3.4\n", map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, }, { "a: 2015-02-24T18:19:39Z\n", map[string]time.Time{"a": time.Unix(1424801979, 0)}, }, // Encode empty lists as zero-length slices. { "a: []", &struct{ A []int }{[]int{}}, }, } type M map[interface{}]interface{} type inlineB struct { B int inlineC `yaml:",inline"` } type inlineC struct { C int } func (s *S) TestUnmarshal(c *C) { for _, item := range unmarshalTests { t := reflect.ValueOf(item.value).Type() var value interface{} switch t.Kind() { case reflect.Map: value = reflect.MakeMap(t).Interface() case reflect.String: value = reflect.New(t).Interface() case reflect.Ptr: value = reflect.New(t.Elem()).Interface() default: c.Fatalf("missing case for %s", t) } err := yaml.Unmarshal([]byte(item.data), value) if _, ok := err.(*yaml.TypeError); !ok { c.Assert(err, IsNil) } if t.Kind() == reflect.String { c.Assert(*value.(*string), Equals, item.value) } else { c.Assert(value, DeepEquals, item.value) } } } func (s *S) TestUnmarshalNaN(c *C) { value := map[string]interface{}{} err := yaml.Unmarshal([]byte("notanum: .NaN"), &value) c.Assert(err, IsNil) c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true) } var unmarshalErrorTests = []struct { data, error string }{ {"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"}, {"v: [A,", "yaml: line 1: did not find expected node content"}, {"v:\n- [A,", "yaml: line 2: did not find expected node content"}, {"a: *b\n", "yaml: unknown anchor 'b' referenced"}, {"a: &a\n b: *a\n", "yaml: anchor 'a' value contains itself"}, {"value: -", "yaml: block sequence entries are not allowed in this context"}, {"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"}, {"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`}, {"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`}, } func (s *S) TestUnmarshalErrors(c *C) { for _, item := range unmarshalErrorTests { var value interface{} err := yaml.Unmarshal([]byte(item.data), &value) c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value)) } } var unmarshalerTests = []struct { data, tag string value interface{} }{ {"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}}, {"_: [1,A]", "!!seq", []interface{}{1, "A"}}, {"_: 10", "!!int", 10}, {"_: null", "!!null", nil}, {`_: BAR!`, "!!str", "BAR!"}, {`_: "BAR!"`, "!!str", "BAR!"}, {"_: !!foo 'BAR!'", "!!foo", "BAR!"}, } var unmarshalerResult = map[int]error{} type unmarshalerType struct { value interface{} } func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error { if err := unmarshal(&o.value); err != nil { return err } if i, ok := o.value.(int); ok { if result, ok := unmarshalerResult[i]; ok { return result } } return nil } type unmarshalerPointer struct { Field *unmarshalerType "_" } type unmarshalerValue struct { Field unmarshalerType "_" } func (s *S) TestUnmarshalerPointerField(c *C) { for _, item := range unmarshalerTests { obj := &unmarshalerPointer{} err := yaml.Unmarshal([]byte(item.data), obj) c.Assert(err, IsNil) if item.value == nil { c.Assert(obj.Field, IsNil) } else { c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) c.Assert(obj.Field.value, DeepEquals, item.value) } } } func (s *S) TestUnmarshalerValueField(c *C) { for _, item := range unmarshalerTests { obj := &unmarshalerValue{} err := yaml.Unmarshal([]byte(item.data), obj) c.Assert(err, IsNil) c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) c.Assert(obj.Field.value, DeepEquals, item.value) } } func (s *S) TestUnmarshalerWholeDocument(c *C) { obj := &unmarshalerType{} err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj) c.Assert(err, IsNil) value, ok := obj.value.(map[interface{}]interface{}) c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value)) c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value) } func (s *S) TestUnmarshalerTypeError(c *C) { unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}} unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}} defer func() { delete(unmarshalerResult, 2) delete(unmarshalerResult, 4) }() type T struct { Before int After int M map[string]*unmarshalerType } var v T data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}` err := yaml.Unmarshal([]byte(data), &v) c.Assert(err, ErrorMatches, ""+ "yaml: unmarshal errors:\n"+ " line 1: cannot unmarshal !!str `A` into int\n"+ " foo\n"+ " bar\n"+ " line 1: cannot unmarshal !!str `B` into int") c.Assert(v.M["abc"], NotNil) c.Assert(v.M["def"], IsNil) c.Assert(v.M["ghi"], NotNil) c.Assert(v.M["jkl"], IsNil) c.Assert(v.M["abc"].value, Equals, 1) c.Assert(v.M["ghi"].value, Equals, 3) } type proxyTypeError struct{} func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error { var s string var a int32 var b int64 if err := unmarshal(&s); err != nil { panic(err) } if s == "a" { if err := unmarshal(&b); err == nil { panic("should have failed") } return unmarshal(&a) } if err := unmarshal(&a); err == nil { panic("should have failed") } return unmarshal(&b) } func (s *S) TestUnmarshalerTypeErrorProxying(c *C) { type T struct { Before int After int M map[string]*proxyTypeError } var v T data := `{before: A, m: {abc: a, def: b}, after: B}` err := yaml.Unmarshal([]byte(data), &v) c.Assert(err, ErrorMatches, ""+ "yaml: unmarshal errors:\n"+ " line 1: cannot unmarshal !!str `A` into int\n"+ " line 1: cannot unmarshal !!str `a` into int32\n"+ " line 1: cannot unmarshal !!str `b` into int64\n"+ " line 1: cannot unmarshal !!str `B` into int") } type failingUnmarshaler struct{} var failingErr = errors.New("failingErr") func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { return failingErr } func (s *S) TestUnmarshalerError(c *C) { err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{}) c.Assert(err, Equals, failingErr) } type sliceUnmarshaler []int func (su *sliceUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { var slice []int err := unmarshal(&slice) if err == nil { *su = slice return nil } var intVal int err = unmarshal(&intVal) if err == nil { *su = []int{intVal} return nil } return err } func (s *S) TestUnmarshalerRetry(c *C) { var su sliceUnmarshaler err := yaml.Unmarshal([]byte("[1, 2, 3]"), &su) c.Assert(err, IsNil) c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1, 2, 3})) err = yaml.Unmarshal([]byte("1"), &su) c.Assert(err, IsNil) c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1})) } // From http://yaml.org/type/merge.html var mergeTests = ` anchors: list: - &CENTER { "x": 1, "y": 2 } - &LEFT { "x": 0, "y": 2 } - &BIG { "r": 10 } - &SMALL { "r": 1 } # All the following maps are equal: plain: # Explicit keys "x": 1 "y": 2 "r": 10 label: center/big mergeOne: # Merge one map << : *CENTER "r": 10 label: center/big mergeMultiple: # Merge multiple maps << : [ *CENTER, *BIG ] label: center/big override: # Override << : [ *BIG, *LEFT, *SMALL ] "x": 1 label: center/big shortTag: # Explicit short merge tag !!merge "<<" : [ *CENTER, *BIG ] label: center/big longTag: # Explicit merge long tag ! "<<" : [ *CENTER, *BIG ] label: center/big inlineMap: # Inlined map << : {"x": 1, "y": 2, "r": 10} label: center/big inlineSequenceMap: # Inlined map in sequence << : [ *CENTER, {"r": 10} ] label: center/big ` func (s *S) TestMerge(c *C) { var want = map[interface{}]interface{}{ "x": 1, "y": 2, "r": 10, "label": "center/big", } var m map[interface{}]interface{} err := yaml.Unmarshal([]byte(mergeTests), &m) c.Assert(err, IsNil) for name, test := range m { if name == "anchors" { continue } c.Assert(test, DeepEquals, want, Commentf("test %q failed", name)) } } func (s *S) TestMergeStruct(c *C) { type Data struct { X, Y, R int Label string } want := Data{1, 2, 10, "center/big"} var m map[string]Data err := yaml.Unmarshal([]byte(mergeTests), &m) c.Assert(err, IsNil) for name, test := range m { if name == "anchors" { continue } c.Assert(test, Equals, want, Commentf("test %q failed", name)) } } var unmarshalNullTests = []func() interface{}{ func() interface{} { var v interface{}; v = "v"; return &v }, func() interface{} { var s = "s"; return &s }, func() interface{} { var s = "s"; sptr := &s; return &sptr }, func() interface{} { var i = 1; return &i }, func() interface{} { var i = 1; iptr := &i; return &iptr }, func() interface{} { m := map[string]int{"s": 1}; return &m }, func() interface{} { m := map[string]int{"s": 1}; return m }, } func (s *S) TestUnmarshalNull(c *C) { for _, test := range unmarshalNullTests { item := test() zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface() err := yaml.Unmarshal([]byte("null"), item) c.Assert(err, IsNil) if reflect.TypeOf(item).Kind() == reflect.Map { c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface()) } else { c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero) } } } func (s *S) TestUnmarshalSliceOnPreset(c *C) { // Issue #48. v := struct{ A []int }{[]int{1}} yaml.Unmarshal([]byte("a: [2]"), &v) c.Assert(v.A, DeepEquals, []int{2}) } //var data []byte //func init() { // var err error // data, err = ioutil.ReadFile("/tmp/file.yaml") // if err != nil { // panic(err) // } //} // //func (s *S) BenchmarkUnmarshal(c *C) { // var err error // for i := 0; i < c.N; i++ { // var v map[string]interface{} // err = yaml.Unmarshal(data, &v) // } // if err != nil { // panic(err) // } //} // //func (s *S) BenchmarkMarshal(c *C) { // var v map[string]interface{} // yaml.Unmarshal(data, &v) // c.ResetTimer() // for i := 0; i < c.N; i++ { // yaml.Marshal(&v) // } //} charm-2.1.1/src/gopkg.in/yaml.v2/readerc.go0000664000175000017500000002716512672604537017351 0ustar marcomarcopackage yaml import ( "io" ) // Set the reader error and return 0. func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { parser.error = yaml_READER_ERROR parser.problem = problem parser.problem_offset = offset parser.problem_value = value return false } // Byte order marks. const ( bom_UTF8 = "\xef\xbb\xbf" bom_UTF16LE = "\xff\xfe" bom_UTF16BE = "\xfe\xff" ) // Determine the input stream encoding by checking the BOM symbol. If no BOM is // found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { // Ensure that we had enough bytes in the raw buffer. for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { if !yaml_parser_update_raw_buffer(parser) { return false } } // Determine the encoding. buf := parser.raw_buffer pos := parser.raw_buffer_pos avail := len(buf) - pos if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { parser.encoding = yaml_UTF16LE_ENCODING parser.raw_buffer_pos += 2 parser.offset += 2 } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { parser.encoding = yaml_UTF16BE_ENCODING parser.raw_buffer_pos += 2 parser.offset += 2 } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { parser.encoding = yaml_UTF8_ENCODING parser.raw_buffer_pos += 3 parser.offset += 3 } else { parser.encoding = yaml_UTF8_ENCODING } return true } // Update the raw buffer. func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { size_read := 0 // Return if the raw buffer is full. if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { return true } // Return on EOF. if parser.eof { return true } // Move the remaining bytes in the raw buffer to the beginning. if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) } parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] parser.raw_buffer_pos = 0 // Call the read handler to fill the buffer. size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] if err == io.EOF { parser.eof = true } else if err != nil { return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) } return true } // Ensure that the buffer contains at least `length` characters. // Return true on success, false on failure. // // The length is supposed to be significantly less that the buffer size. func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { if parser.read_handler == nil { panic("read handler must be set") } // If the EOF flag is set and the raw buffer is empty, do nothing. if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { return true } // Return if the buffer contains enough characters. if parser.unread >= length { return true } // Determine the input encoding if it is not known yet. if parser.encoding == yaml_ANY_ENCODING { if !yaml_parser_determine_encoding(parser) { return false } } // Move the unread characters to the beginning of the buffer. buffer_len := len(parser.buffer) if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { copy(parser.buffer, parser.buffer[parser.buffer_pos:]) buffer_len -= parser.buffer_pos parser.buffer_pos = 0 } else if parser.buffer_pos == buffer_len { buffer_len = 0 parser.buffer_pos = 0 } // Open the whole buffer for writing, and cut it before returning. parser.buffer = parser.buffer[:cap(parser.buffer)] // Fill the buffer until it has enough characters. first := true for parser.unread < length { // Fill the raw buffer if necessary. if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { if !yaml_parser_update_raw_buffer(parser) { parser.buffer = parser.buffer[:buffer_len] return false } } first = false // Decode the raw buffer. inner: for parser.raw_buffer_pos != len(parser.raw_buffer) { var value rune var width int raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos // Decode the next character. switch parser.encoding { case yaml_UTF8_ENCODING: // Decode a UTF-8 character. Check RFC 3629 // (http://www.ietf.org/rfc/rfc3629.txt) for more details. // // The following table (taken from the RFC) is used for // decoding. // // Char. number range | UTF-8 octet sequence // (hexadecimal) | (binary) // --------------------+------------------------------------ // 0000 0000-0000 007F | 0xxxxxxx // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx // // Additionally, the characters in the range 0xD800-0xDFFF // are prohibited as they are reserved for use with UTF-16 // surrogate pairs. // Determine the length of the UTF-8 sequence. octet := parser.raw_buffer[parser.raw_buffer_pos] switch { case octet&0x80 == 0x00: width = 1 case octet&0xE0 == 0xC0: width = 2 case octet&0xF0 == 0xE0: width = 3 case octet&0xF8 == 0xF0: width = 4 default: // The leading octet is invalid. return yaml_parser_set_reader_error(parser, "invalid leading UTF-8 octet", parser.offset, int(octet)) } // Check if the raw buffer contains an incomplete character. if width > raw_unread { if parser.eof { return yaml_parser_set_reader_error(parser, "incomplete UTF-8 octet sequence", parser.offset, -1) } break inner } // Decode the leading octet. switch { case octet&0x80 == 0x00: value = rune(octet & 0x7F) case octet&0xE0 == 0xC0: value = rune(octet & 0x1F) case octet&0xF0 == 0xE0: value = rune(octet & 0x0F) case octet&0xF8 == 0xF0: value = rune(octet & 0x07) default: value = 0 } // Check and decode the trailing octets. for k := 1; k < width; k++ { octet = parser.raw_buffer[parser.raw_buffer_pos+k] // Check if the octet is valid. if (octet & 0xC0) != 0x80 { return yaml_parser_set_reader_error(parser, "invalid trailing UTF-8 octet", parser.offset+k, int(octet)) } // Decode the octet. value = (value << 6) + rune(octet&0x3F) } // Check the length of the sequence against the value. switch { case width == 1: case width == 2 && value >= 0x80: case width == 3 && value >= 0x800: case width == 4 && value >= 0x10000: default: return yaml_parser_set_reader_error(parser, "invalid length of a UTF-8 sequence", parser.offset, -1) } // Check the range of the value. if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { return yaml_parser_set_reader_error(parser, "invalid Unicode character", parser.offset, int(value)) } case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: var low, high int if parser.encoding == yaml_UTF16LE_ENCODING { low, high = 0, 1 } else { high, low = 1, 0 } // The UTF-16 encoding is not as simple as one might // naively think. Check RFC 2781 // (http://www.ietf.org/rfc/rfc2781.txt). // // Normally, two subsequent bytes describe a Unicode // character. However a special technique (called a // surrogate pair) is used for specifying character // values larger than 0xFFFF. // // A surrogate pair consists of two pseudo-characters: // high surrogate area (0xD800-0xDBFF) // low surrogate area (0xDC00-0xDFFF) // // The following formulas are used for decoding // and encoding characters using surrogate pairs: // // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) // W1 = 110110yyyyyyyyyy // W2 = 110111xxxxxxxxxx // // where U is the character value, W1 is the high surrogate // area, W2 is the low surrogate area. // Check for incomplete UTF-16 character. if raw_unread < 2 { if parser.eof { return yaml_parser_set_reader_error(parser, "incomplete UTF-16 character", parser.offset, -1) } break inner } // Get the character. value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) // Check for unexpected low surrogate area. if value&0xFC00 == 0xDC00 { return yaml_parser_set_reader_error(parser, "unexpected low surrogate area", parser.offset, int(value)) } // Check for a high surrogate area. if value&0xFC00 == 0xD800 { width = 4 // Check for incomplete surrogate pair. if raw_unread < 4 { if parser.eof { return yaml_parser_set_reader_error(parser, "incomplete UTF-16 surrogate pair", parser.offset, -1) } break inner } // Get the next character. value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) // Check for a low surrogate area. if value2&0xFC00 != 0xDC00 { return yaml_parser_set_reader_error(parser, "expected low surrogate area", parser.offset+2, int(value2)) } // Generate the value of the surrogate pair. value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) } else { width = 2 } default: panic("impossible") } // Check if the character is in the allowed range: // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) // | [#x10000-#x10FFFF] (32 bit) switch { case value == 0x09: case value == 0x0A: case value == 0x0D: case value >= 0x20 && value <= 0x7E: case value == 0x85: case value >= 0xA0 && value <= 0xD7FF: case value >= 0xE000 && value <= 0xFFFD: case value >= 0x10000 && value <= 0x10FFFF: default: return yaml_parser_set_reader_error(parser, "control characters are not allowed", parser.offset, int(value)) } // Move the raw pointers. parser.raw_buffer_pos += width parser.offset += width // Finally put the character into the buffer. if value <= 0x7F { // 0000 0000-0000 007F . 0xxxxxxx parser.buffer[buffer_len+0] = byte(value) } else if value <= 0x7FF { // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) } else if value <= 0xFFFF { // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) } else { // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) } buffer_len += width parser.unread++ } // On EOF, put NUL into the buffer and return. if parser.eof { parser.buffer[buffer_len] = 0 buffer_len++ parser.unread++ break } } parser.buffer = parser.buffer[:buffer_len] return true } charm-2.1.1/src/gopkg.in/yaml.v2/yamlprivateh.go0000664000175000017500000001154112672604537020440 0ustar marcomarcopackage yaml const ( // The size of the input raw buffer. input_raw_buffer_size = 512 // The size of the input buffer. // It should be possible to decode the whole raw buffer. input_buffer_size = input_raw_buffer_size * 3 // The size of the output buffer. output_buffer_size = 128 // The size of the output raw buffer. // It should be possible to encode the whole output buffer. output_raw_buffer_size = (output_buffer_size*2 + 2) // The size of other stacks and queues. initial_stack_size = 16 initial_queue_size = 16 initial_string_size = 16 ) // Check if the character at the specified position is an alphabetical // character, a digit, '_', or '-'. func is_alpha(b []byte, i int) bool { return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' } // Check if the character at the specified position is a digit. func is_digit(b []byte, i int) bool { return b[i] >= '0' && b[i] <= '9' } // Get the value of a digit. func as_digit(b []byte, i int) int { return int(b[i]) - '0' } // Check if the character at the specified position is a hex-digit. func is_hex(b []byte, i int) bool { return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' } // Get the value of a hex-digit. func as_hex(b []byte, i int) int { bi := b[i] if bi >= 'A' && bi <= 'F' { return int(bi) - 'A' + 10 } if bi >= 'a' && bi <= 'f' { return int(bi) - 'a' + 10 } return int(bi) - '0' } // Check if the character is ASCII. func is_ascii(b []byte, i int) bool { return b[i] <= 0x7F } // Check if the character at the start of the buffer can be printed unescaped. func is_printable(b []byte, i int) bool { return ((b[i] == 0x0A) || // . == #x0A (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF (b[i] > 0xC2 && b[i] < 0xED) || (b[i] == 0xED && b[i+1] < 0xA0) || (b[i] == 0xEE) || (b[i] == 0xEF && // #xE000 <= . <= #xFFFD !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) } // Check if the character at the specified position is NUL. func is_z(b []byte, i int) bool { return b[i] == 0x00 } // Check if the beginning of the buffer is a BOM. func is_bom(b []byte, i int) bool { return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF } // Check if the character at the specified position is space. func is_space(b []byte, i int) bool { return b[i] == ' ' } // Check if the character at the specified position is tab. func is_tab(b []byte, i int) bool { return b[i] == '\t' } // Check if the character at the specified position is blank (space or tab). func is_blank(b []byte, i int) bool { //return is_space(b, i) || is_tab(b, i) return b[i] == ' ' || b[i] == '\t' } // Check if the character at the specified position is a line break. func is_break(b []byte, i int) bool { return (b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) } func is_crlf(b []byte, i int) bool { return b[i] == '\r' && b[i+1] == '\n' } // Check if the character is a line break or NUL. func is_breakz(b []byte, i int) bool { //return is_break(b, i) || is_z(b, i) return ( // is_break: b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) // is_z: b[i] == 0) } // Check if the character is a line break, space, or NUL. func is_spacez(b []byte, i int) bool { //return is_space(b, i) || is_breakz(b, i) return ( // is_space: b[i] == ' ' || // is_breakz: b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) b[i] == 0) } // Check if the character is a line break, space, tab, or NUL. func is_blankz(b []byte, i int) bool { //return is_blank(b, i) || is_breakz(b, i) return ( // is_blank: b[i] == ' ' || b[i] == '\t' || // is_breakz: b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) b[i] == 0) } // Determine the width of the character. func width(b byte) int { // Don't replace these by a switch without first // confirming that it is being inlined. if b&0x80 == 0x00 { return 1 } if b&0xE0 == 0xC0 { return 2 } if b&0xF0 == 0xE0 { return 3 } if b&0xF8 == 0xF0 { return 4 } return 0 } charm-2.1.1/src/gopkg.in/yaml.v2/README.md0000664000175000017500000000512212672604537016661 0ustar marcomarco# YAML support for the Go language Introduction ------------ The yaml package enables Go programs to comfortably encode and decode YAML values. It was developed within [Canonical](https://www.canonical.com) as part of the [juju](https://juju.ubuntu.com) project, and is based on a pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) C library to parse and generate YAML data quickly and reliably. Compatibility ------------- The yaml package supports most of YAML 1.1 and 1.2, including support for anchors, tags, map merging, etc. Multi-document unmarshalling is not yet implemented, and base-60 floats from YAML 1.1 are purposefully not supported since they're a poor design and are gone in YAML 1.2. Installation and usage ---------------------- The import path for the package is *gopkg.in/yaml.v2*. To install it, run: go get gopkg.in/yaml.v2 API documentation ----------------- If opened in a browser, the import path itself leads to the API documentation: * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) API stability ------------- The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). License ------- The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details. Example ------- ```Go package main import ( "fmt" "log" "gopkg.in/yaml.v2" ) var data = ` a: Easy! b: c: 2 d: [3, 4] ` type T struct { A string B struct { RenamedC int `yaml:"c"` D []int `yaml:",flow"` } } func main() { t := T{} err := yaml.Unmarshal([]byte(data), &t) if err != nil { log.Fatalf("error: %v", err) } fmt.Printf("--- t:\n%v\n\n", t) d, err := yaml.Marshal(&t) if err != nil { log.Fatalf("error: %v", err) } fmt.Printf("--- t dump:\n%s\n\n", string(d)) m := make(map[interface{}]interface{}) err = yaml.Unmarshal([]byte(data), &m) if err != nil { log.Fatalf("error: %v", err) } fmt.Printf("--- m:\n%v\n\n", m) d, err = yaml.Marshal(&m) if err != nil { log.Fatalf("error: %v", err) } fmt.Printf("--- m dump:\n%s\n\n", string(d)) } ``` This example will generate the following output: ``` --- t: {Easy! {2 [3 4]}} --- t dump: a: Easy! b: c: 2 d: [3, 4] --- m: map[a:Easy! b:map[c:2 d:[3 4]]] --- m dump: a: Easy! b: c: 2 d: - 3 - 4 ``` charm-2.1.1/src/gopkg.in/yaml.v2/LICENSE.libyaml0000664000175000017500000000244112672604537020040 0ustar marcomarcoThe following files were ported to Go from C files of libyaml, and thus are still covered by their original copyright and license: apic.go emitterc.go parserc.go readerc.go scannerc.go writerc.go yamlh.go yamlprivateh.go Copyright (c) 2006 Kirill Simonov Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. charm-2.1.1/src/gopkg.in/yaml.v2/encode_test.go0000664000175000017500000002164112672604537020231 0ustar marcomarcopackage yaml_test import ( "fmt" "math" "strconv" "strings" "time" . "gopkg.in/check.v1" "gopkg.in/yaml.v2" "net" "os" ) var marshalIntTest = 123 var marshalTests = []struct { value interface{} data string }{ { nil, "null\n", }, { &struct{}{}, "{}\n", }, { map[string]string{"v": "hi"}, "v: hi\n", }, { map[string]interface{}{"v": "hi"}, "v: hi\n", }, { map[string]string{"v": "true"}, "v: \"true\"\n", }, { map[string]string{"v": "false"}, "v: \"false\"\n", }, { map[string]interface{}{"v": true}, "v: true\n", }, { map[string]interface{}{"v": false}, "v: false\n", }, { map[string]interface{}{"v": 10}, "v: 10\n", }, { map[string]interface{}{"v": -10}, "v: -10\n", }, { map[string]uint{"v": 42}, "v: 42\n", }, { map[string]interface{}{"v": int64(4294967296)}, "v: 4294967296\n", }, { map[string]int64{"v": int64(4294967296)}, "v: 4294967296\n", }, { map[string]uint64{"v": 4294967296}, "v: 4294967296\n", }, { map[string]interface{}{"v": "10"}, "v: \"10\"\n", }, { map[string]interface{}{"v": 0.1}, "v: 0.1\n", }, { map[string]interface{}{"v": float64(0.1)}, "v: 0.1\n", }, { map[string]interface{}{"v": -0.1}, "v: -0.1\n", }, { map[string]interface{}{"v": math.Inf(+1)}, "v: .inf\n", }, { map[string]interface{}{"v": math.Inf(-1)}, "v: -.inf\n", }, { map[string]interface{}{"v": math.NaN()}, "v: .nan\n", }, { map[string]interface{}{"v": nil}, "v: null\n", }, { map[string]interface{}{"v": ""}, "v: \"\"\n", }, { map[string][]string{"v": []string{"A", "B"}}, "v:\n- A\n- B\n", }, { map[string][]string{"v": []string{"A", "B\nC"}}, "v:\n- A\n- |-\n B\n C\n", }, { map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}}, "v:\n- A\n- 1\n- B:\n - 2\n - 3\n", }, { map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, "a:\n b: c\n", }, { map[string]interface{}{"a": "-"}, "a: '-'\n", }, // Simple values. { &marshalIntTest, "123\n", }, // Structures { &struct{ Hello string }{"world"}, "hello: world\n", }, { &struct { A struct { B string } }{struct{ B string }{"c"}}, "a:\n b: c\n", }, { &struct { A *struct { B string } }{&struct{ B string }{"c"}}, "a:\n b: c\n", }, { &struct { A *struct { B string } }{}, "a: null\n", }, { &struct{ A int }{1}, "a: 1\n", }, { &struct{ A []int }{[]int{1, 2}}, "a:\n- 1\n- 2\n", }, { &struct { B int "a" }{1}, "a: 1\n", }, { &struct{ A bool }{true}, "a: true\n", }, // Conditional flag { &struct { A int "a,omitempty" B int "b,omitempty" }{1, 0}, "a: 1\n", }, { &struct { A int "a,omitempty" B int "b,omitempty" }{0, 0}, "{}\n", }, { &struct { A *struct{ X, y int } "a,omitempty,flow" }{&struct{ X, y int }{1, 2}}, "a: {x: 1}\n", }, { &struct { A *struct{ X, y int } "a,omitempty,flow" }{nil}, "{}\n", }, { &struct { A *struct{ X, y int } "a,omitempty,flow" }{&struct{ X, y int }{}}, "a: {x: 0}\n", }, { &struct { A struct{ X, y int } "a,omitempty,flow" }{struct{ X, y int }{1, 2}}, "a: {x: 1}\n", }, { &struct { A struct{ X, y int } "a,omitempty,flow" }{struct{ X, y int }{0, 1}}, "{}\n", }, { &struct { A float64 "a,omitempty" B float64 "b,omitempty" }{1, 0}, "a: 1\n", }, // Flow flag { &struct { A []int "a,flow" }{[]int{1, 2}}, "a: [1, 2]\n", }, { &struct { A map[string]string "a,flow" }{map[string]string{"b": "c", "d": "e"}}, "a: {b: c, d: e}\n", }, { &struct { A struct { B, D string } "a,flow" }{struct{ B, D string }{"c", "e"}}, "a: {b: c, d: e}\n", }, // Unexported field { &struct { u int A int }{0, 1}, "a: 1\n", }, // Ignored field { &struct { A int B int "-" }{1, 2}, "a: 1\n", }, // Struct inlining { &struct { A int C inlineB `yaml:",inline"` }{1, inlineB{2, inlineC{3}}}, "a: 1\nb: 2\nc: 3\n", }, // Map inlining { &struct { A int C map[string]int `yaml:",inline"` }{1, map[string]int{"b": 2, "c": 3}}, "a: 1\nb: 2\nc: 3\n", }, // Duration { map[string]time.Duration{"a": 3 * time.Second}, "a: 3s\n", }, // Issue #24: bug in map merging logic. { map[string]string{"a": ""}, "a: \n", }, // Issue #34: marshal unsupported base 60 floats quoted for compatibility // with old YAML 1.1 parsers. { map[string]string{"a": "1:1"}, "a: \"1:1\"\n", }, // Binary data. { map[string]string{"a": "\x00"}, "a: \"\\0\"\n", }, { map[string]string{"a": "\x80\x81\x82"}, "a: !!binary gIGC\n", }, { map[string]string{"a": strings.Repeat("\x90", 54)}, "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", }, // Ordered maps. { &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, "b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n", }, // Encode unicode as utf-8 rather than in escaped form. { map[string]string{"a": "你好"}, "a: 你好\n", }, // Support encoding.TextMarshaler. { map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, "a: 1.2.3.4\n", }, { map[string]time.Time{"a": time.Unix(1424801979, 0)}, "a: 2015-02-24T18:19:39Z\n", }, // Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible). { map[string]string{"a": "b: c"}, "a: 'b: c'\n", }, // Containing hash mark ('#') in string should be quoted { map[string]string{"a": "Hello #comment"}, "a: 'Hello #comment'\n", }, { map[string]string{"a": "你好 #comment"}, "a: '你好 #comment'\n", }, } func (s *S) TestMarshal(c *C) { defer os.Setenv("TZ", os.Getenv("TZ")) os.Setenv("TZ", "UTC") for _, item := range marshalTests { data, err := yaml.Marshal(item.value) c.Assert(err, IsNil) c.Assert(string(data), Equals, item.data) } } var marshalErrorTests = []struct { value interface{} error string panic string }{{ value: &struct { B int inlineB ",inline" }{1, inlineB{2, inlineC{3}}}, panic: `Duplicated key 'b' in struct struct \{ B int; .*`, }, { value: &struct { A int B map[string]int ",inline" }{1, map[string]int{"a": 2}}, panic: `Can't have key "a" in inlined map; conflicts with struct field`, }} func (s *S) TestMarshalErrors(c *C) { for _, item := range marshalErrorTests { if item.panic != "" { c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic) } else { _, err := yaml.Marshal(item.value) c.Assert(err, ErrorMatches, item.error) } } } func (s *S) TestMarshalTypeCache(c *C) { var data []byte var err error func() { type T struct{ A int } data, err = yaml.Marshal(&T{}) c.Assert(err, IsNil) }() func() { type T struct{ B int } data, err = yaml.Marshal(&T{}) c.Assert(err, IsNil) }() c.Assert(string(data), Equals, "b: 0\n") } var marshalerTests = []struct { data string value interface{} }{ {"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}}, {"_:\n- 1\n- A\n", []interface{}{1, "A"}}, {"_: 10\n", 10}, {"_: null\n", nil}, {"_: BAR!\n", "BAR!"}, } type marshalerType struct { value interface{} } func (o marshalerType) MarshalText() ([]byte, error) { panic("MarshalText called on type with MarshalYAML") } func (o marshalerType) MarshalYAML() (interface{}, error) { return o.value, nil } type marshalerValue struct { Field marshalerType "_" } func (s *S) TestMarshaler(c *C) { for _, item := range marshalerTests { obj := &marshalerValue{} obj.Field.value = item.value data, err := yaml.Marshal(obj) c.Assert(err, IsNil) c.Assert(string(data), Equals, string(item.data)) } } func (s *S) TestMarshalerWholeDocument(c *C) { obj := &marshalerType{} obj.value = map[string]string{"hello": "world!"} data, err := yaml.Marshal(obj) c.Assert(err, IsNil) c.Assert(string(data), Equals, "hello: world!\n") } type failingMarshaler struct{} func (ft *failingMarshaler) MarshalYAML() (interface{}, error) { return nil, failingErr } func (s *S) TestMarshalerError(c *C) { _, err := yaml.Marshal(&failingMarshaler{}) c.Assert(err, Equals, failingErr) } func (s *S) TestSortedOutput(c *C) { order := []interface{}{ false, true, 1, uint(1), 1.0, 1.1, 1.2, 2, uint(2), 2.0, 2.1, "", ".1", ".2", ".a", "1", "2", "a!10", "a/2", "a/10", "a~10", "ab/1", "b/1", "b/01", "b/2", "b/02", "b/3", "b/03", "b1", "b01", "b3", "c2.10", "c10.2", "d1", "d12", "d12a", } m := make(map[interface{}]int) for _, k := range order { m[k] = 1 } data, err := yaml.Marshal(m) c.Assert(err, IsNil) out := "\n" + string(data) last := 0 for i, k := range order { repr := fmt.Sprint(k) if s, ok := k.(string); ok { if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil { repr = `"` + repr + `"` } } index := strings.Index(out, "\n"+repr+":") if index == -1 { c.Fatalf("%#v is not in the output: %#v", k, out) } if index < last { c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out) } last = index } } charm-2.1.1/src/gopkg.in/yaml.v2/resolve.go0000664000175000017500000001174712672604537017422 0ustar marcomarcopackage yaml import ( "encoding/base64" "math" "strconv" "strings" "unicode/utf8" ) type resolveMapItem struct { value interface{} tag string } var resolveTable = make([]byte, 256) var resolveMap = make(map[string]resolveMapItem) func init() { t := resolveTable t[int('+')] = 'S' // Sign t[int('-')] = 'S' for _, c := range "0123456789" { t[int(c)] = 'D' // Digit } for _, c := range "yYnNtTfFoO~" { t[int(c)] = 'M' // In map } t[int('.')] = '.' // Float (potentially in map) var resolveMapList = []struct { v interface{} tag string l []string }{ {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, {"<<", yaml_MERGE_TAG, []string{"<<"}}, } m := resolveMap for _, item := range resolveMapList { for _, s := range item.l { m[s] = resolveMapItem{item.v, item.tag} } } } const longTagPrefix = "tag:yaml.org,2002:" func shortTag(tag string) string { // TODO This can easily be made faster and produce less garbage. if strings.HasPrefix(tag, longTagPrefix) { return "!!" + tag[len(longTagPrefix):] } return tag } func longTag(tag string) string { if strings.HasPrefix(tag, "!!") { return longTagPrefix + tag[2:] } return tag } func resolvableTag(tag string) bool { switch tag { case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: return true } return false } func resolve(tag string, in string) (rtag string, out interface{}) { if !resolvableTag(tag) { return tag, in } defer func() { switch tag { case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: return } failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) }() // Any data is accepted as a !!str or !!binary. // Otherwise, the prefix is enough of a hint about what it might be. hint := byte('N') if in != "" { hint = resolveTable[in[0]] } if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { // Handle things we can lookup in a map. if item, ok := resolveMap[in]; ok { return item.tag, item.value } // Base 60 floats are a bad idea, were dropped in YAML 1.2, and // are purposefully unsupported here. They're still quoted on // the way out for compatibility with other parser, though. switch hint { case 'M': // We've already checked the map above. case '.': // Not in the map, so maybe a normal float. floatv, err := strconv.ParseFloat(in, 64) if err == nil { return yaml_FLOAT_TAG, floatv } case 'D', 'S': // Int, float, or timestamp. plain := strings.Replace(in, "_", "", -1) intv, err := strconv.ParseInt(plain, 0, 64) if err == nil { if intv == int64(int(intv)) { return yaml_INT_TAG, int(intv) } else { return yaml_INT_TAG, intv } } uintv, err := strconv.ParseUint(plain, 0, 64) if err == nil { return yaml_INT_TAG, uintv } floatv, err := strconv.ParseFloat(plain, 64) if err == nil { return yaml_FLOAT_TAG, floatv } if strings.HasPrefix(plain, "0b") { intv, err := strconv.ParseInt(plain[2:], 2, 64) if err == nil { if intv == int64(int(intv)) { return yaml_INT_TAG, int(intv) } else { return yaml_INT_TAG, intv } } uintv, err := strconv.ParseUint(plain[2:], 2, 64) if err == nil { return yaml_INT_TAG, uintv } } else if strings.HasPrefix(plain, "-0b") { intv, err := strconv.ParseInt(plain[3:], 2, 64) if err == nil { if intv == int64(int(intv)) { return yaml_INT_TAG, -int(intv) } else { return yaml_INT_TAG, -intv } } } // XXX Handle timestamps here. default: panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") } } if tag == yaml_BINARY_TAG { return yaml_BINARY_TAG, in } if utf8.ValidString(in) { return yaml_STR_TAG, in } return yaml_BINARY_TAG, encodeBase64(in) } // encodeBase64 encodes s as base64 that is broken up into multiple lines // as appropriate for the resulting length. func encodeBase64(s string) string { const lineLen = 70 encLen := base64.StdEncoding.EncodedLen(len(s)) lines := encLen/lineLen + 1 buf := make([]byte, encLen*2+lines) in := buf[0:encLen] out := buf[encLen:] base64.StdEncoding.Encode(in, []byte(s)) k := 0 for i := 0; i < len(in); i += lineLen { j := i + lineLen if j > len(in) { j = len(in) } k += copy(out[k:], in[i:j]) if lines > 1 { out[k] = '\n' k++ } } return string(out[:k]) } charm-2.1.1/src/gopkg.in/yaml.v2/suite_test.go0000664000175000017500000000021712672604537020121 0ustar marcomarcopackage yaml_test import ( . "gopkg.in/check.v1" "testing" ) func Test(t *testing.T) { TestingT(t) } type S struct{} var _ = Suite(&S{}) charm-2.1.1/src/gopkg.in/yaml.v2/parserc.go0000664000175000017500000010372712672604537017402 0ustar marcomarcopackage yaml import ( "bytes" ) // The parser implements the following grammar: // // stream ::= STREAM-START implicit_document? explicit_document* STREAM-END // implicit_document ::= block_node DOCUMENT-END* // explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* // block_node_or_indentless_sequence ::= // ALIAS // | properties (block_content | indentless_block_sequence)? // | block_content // | indentless_block_sequence // block_node ::= ALIAS // | properties block_content? // | block_content // flow_node ::= ALIAS // | properties flow_content? // | flow_content // properties ::= TAG ANCHOR? | ANCHOR TAG? // block_content ::= block_collection | flow_collection | SCALAR // flow_content ::= flow_collection | SCALAR // block_collection ::= block_sequence | block_mapping // flow_collection ::= flow_sequence | flow_mapping // block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END // indentless_sequence ::= (BLOCK-ENTRY block_node?)+ // block_mapping ::= BLOCK-MAPPING_START // ((KEY block_node_or_indentless_sequence?)? // (VALUE block_node_or_indentless_sequence?)?)* // BLOCK-END // flow_sequence ::= FLOW-SEQUENCE-START // (flow_sequence_entry FLOW-ENTRY)* // flow_sequence_entry? // FLOW-SEQUENCE-END // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? // flow_mapping ::= FLOW-MAPPING-START // (flow_mapping_entry FLOW-ENTRY)* // flow_mapping_entry? // FLOW-MAPPING-END // flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? // Peek the next token in the token queue. func peek_token(parser *yaml_parser_t) *yaml_token_t { if parser.token_available || yaml_parser_fetch_more_tokens(parser) { return &parser.tokens[parser.tokens_head] } return nil } // Remove the next token from the queue (must be called after peek_token). func skip_token(parser *yaml_parser_t) { parser.token_available = false parser.tokens_parsed++ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN parser.tokens_head++ } // Get the next event. func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { // Erase the event object. *event = yaml_event_t{} // No events after the end of the stream or error. if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { return true } // Generate the next event. return yaml_parser_state_machine(parser, event) } // Set parser error. func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { parser.error = yaml_PARSER_ERROR parser.problem = problem parser.problem_mark = problem_mark return false } func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { parser.error = yaml_PARSER_ERROR parser.context = context parser.context_mark = context_mark parser.problem = problem parser.problem_mark = problem_mark return false } // State dispatcher. func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { //trace("yaml_parser_state_machine", "state:", parser.state.String()) switch parser.state { case yaml_PARSE_STREAM_START_STATE: return yaml_parser_parse_stream_start(parser, event) case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: return yaml_parser_parse_document_start(parser, event, true) case yaml_PARSE_DOCUMENT_START_STATE: return yaml_parser_parse_document_start(parser, event, false) case yaml_PARSE_DOCUMENT_CONTENT_STATE: return yaml_parser_parse_document_content(parser, event) case yaml_PARSE_DOCUMENT_END_STATE: return yaml_parser_parse_document_end(parser, event) case yaml_PARSE_BLOCK_NODE_STATE: return yaml_parser_parse_node(parser, event, true, false) case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: return yaml_parser_parse_node(parser, event, true, true) case yaml_PARSE_FLOW_NODE_STATE: return yaml_parser_parse_node(parser, event, false, false) case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: return yaml_parser_parse_block_sequence_entry(parser, event, true) case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: return yaml_parser_parse_block_sequence_entry(parser, event, false) case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: return yaml_parser_parse_indentless_sequence_entry(parser, event) case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: return yaml_parser_parse_block_mapping_key(parser, event, true) case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: return yaml_parser_parse_block_mapping_key(parser, event, false) case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: return yaml_parser_parse_block_mapping_value(parser, event) case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: return yaml_parser_parse_flow_sequence_entry(parser, event, true) case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: return yaml_parser_parse_flow_sequence_entry(parser, event, false) case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: return yaml_parser_parse_flow_mapping_key(parser, event, true) case yaml_PARSE_FLOW_MAPPING_KEY_STATE: return yaml_parser_parse_flow_mapping_key(parser, event, false) case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: return yaml_parser_parse_flow_mapping_value(parser, event, false) case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: return yaml_parser_parse_flow_mapping_value(parser, event, true) default: panic("invalid parser state") } return false } // Parse the production: // stream ::= STREAM-START implicit_document? explicit_document* STREAM-END // ************ func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { return false } if token.typ != yaml_STREAM_START_TOKEN { return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) } parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE *event = yaml_event_t{ typ: yaml_STREAM_START_EVENT, start_mark: token.start_mark, end_mark: token.end_mark, encoding: token.encoding, } skip_token(parser) return true } // Parse the productions: // implicit_document ::= block_node DOCUMENT-END* // * // explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* // ************************* func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { token := peek_token(parser) if token == nil { return false } // Parse extra document end indicators. if !implicit { for token.typ == yaml_DOCUMENT_END_TOKEN { skip_token(parser) token = peek_token(parser) if token == nil { return false } } } if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && token.typ != yaml_TAG_DIRECTIVE_TOKEN && token.typ != yaml_DOCUMENT_START_TOKEN && token.typ != yaml_STREAM_END_TOKEN { // Parse an implicit document. if !yaml_parser_process_directives(parser, nil, nil) { return false } parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) parser.state = yaml_PARSE_BLOCK_NODE_STATE *event = yaml_event_t{ typ: yaml_DOCUMENT_START_EVENT, start_mark: token.start_mark, end_mark: token.end_mark, } } else if token.typ != yaml_STREAM_END_TOKEN { // Parse an explicit document. var version_directive *yaml_version_directive_t var tag_directives []yaml_tag_directive_t start_mark := token.start_mark if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { return false } token = peek_token(parser) if token == nil { return false } if token.typ != yaml_DOCUMENT_START_TOKEN { yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) return false } parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE end_mark := token.end_mark *event = yaml_event_t{ typ: yaml_DOCUMENT_START_EVENT, start_mark: start_mark, end_mark: end_mark, version_directive: version_directive, tag_directives: tag_directives, implicit: false, } skip_token(parser) } else { // Parse the stream end. parser.state = yaml_PARSE_END_STATE *event = yaml_event_t{ typ: yaml_STREAM_END_EVENT, start_mark: token.start_mark, end_mark: token.end_mark, } skip_token(parser) } return true } // Parse the productions: // explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* // *********** // func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { return false } if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN || token.typ == yaml_DOCUMENT_START_TOKEN || token.typ == yaml_DOCUMENT_END_TOKEN || token.typ == yaml_STREAM_END_TOKEN { parser.state = parser.states[len(parser.states)-1] parser.states = parser.states[:len(parser.states)-1] return yaml_parser_process_empty_scalar(parser, event, token.start_mark) } return yaml_parser_parse_node(parser, event, true, false) } // Parse the productions: // implicit_document ::= block_node DOCUMENT-END* // ************* // explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* // func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { return false } start_mark := token.start_mark end_mark := token.start_mark implicit := true if token.typ == yaml_DOCUMENT_END_TOKEN { end_mark = token.end_mark skip_token(parser) implicit = false } parser.tag_directives = parser.tag_directives[:0] parser.state = yaml_PARSE_DOCUMENT_START_STATE *event = yaml_event_t{ typ: yaml_DOCUMENT_END_EVENT, start_mark: start_mark, end_mark: end_mark, implicit: implicit, } return true } // Parse the productions: // block_node_or_indentless_sequence ::= // ALIAS // ***** // | properties (block_content | indentless_block_sequence)? // ********** * // | block_content | indentless_block_sequence // * // block_node ::= ALIAS // ***** // | properties block_content? // ********** * // | block_content // * // flow_node ::= ALIAS // ***** // | properties flow_content? // ********** * // | flow_content // * // properties ::= TAG ANCHOR? | ANCHOR TAG? // ************************* // block_content ::= block_collection | flow_collection | SCALAR // ****** // flow_content ::= flow_collection | SCALAR // ****** func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() token := peek_token(parser) if token == nil { return false } if token.typ == yaml_ALIAS_TOKEN { parser.state = parser.states[len(parser.states)-1] parser.states = parser.states[:len(parser.states)-1] *event = yaml_event_t{ typ: yaml_ALIAS_EVENT, start_mark: token.start_mark, end_mark: token.end_mark, anchor: token.value, } skip_token(parser) return true } start_mark := token.start_mark end_mark := token.start_mark var tag_token bool var tag_handle, tag_suffix, anchor []byte var tag_mark yaml_mark_t if token.typ == yaml_ANCHOR_TOKEN { anchor = token.value start_mark = token.start_mark end_mark = token.end_mark skip_token(parser) token = peek_token(parser) if token == nil { return false } if token.typ == yaml_TAG_TOKEN { tag_token = true tag_handle = token.value tag_suffix = token.suffix tag_mark = token.start_mark end_mark = token.end_mark skip_token(parser) token = peek_token(parser) if token == nil { return false } } } else if token.typ == yaml_TAG_TOKEN { tag_token = true tag_handle = token.value tag_suffix = token.suffix start_mark = token.start_mark tag_mark = token.start_mark end_mark = token.end_mark skip_token(parser) token = peek_token(parser) if token == nil { return false } if token.typ == yaml_ANCHOR_TOKEN { anchor = token.value end_mark = token.end_mark skip_token(parser) token = peek_token(parser) if token == nil { return false } } } var tag []byte if tag_token { if len(tag_handle) == 0 { tag = tag_suffix tag_suffix = nil } else { for i := range parser.tag_directives { if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { tag = append([]byte(nil), parser.tag_directives[i].prefix...) tag = append(tag, tag_suffix...) break } } if len(tag) == 0 { yaml_parser_set_parser_error_context(parser, "while parsing a node", start_mark, "found undefined tag handle", tag_mark) return false } } } implicit := len(tag) == 0 if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { end_mark = token.end_mark parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE *event = yaml_event_t{ typ: yaml_SEQUENCE_START_EVENT, start_mark: start_mark, end_mark: end_mark, anchor: anchor, tag: tag, implicit: implicit, style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), } return true } if token.typ == yaml_SCALAR_TOKEN { var plain_implicit, quoted_implicit bool end_mark = token.end_mark if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { plain_implicit = true } else if len(tag) == 0 { quoted_implicit = true } parser.state = parser.states[len(parser.states)-1] parser.states = parser.states[:len(parser.states)-1] *event = yaml_event_t{ typ: yaml_SCALAR_EVENT, start_mark: start_mark, end_mark: end_mark, anchor: anchor, tag: tag, value: token.value, implicit: plain_implicit, quoted_implicit: quoted_implicit, style: yaml_style_t(token.style), } skip_token(parser) return true } if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { // [Go] Some of the events below can be merged as they differ only on style. end_mark = token.end_mark parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE *event = yaml_event_t{ typ: yaml_SEQUENCE_START_EVENT, start_mark: start_mark, end_mark: end_mark, anchor: anchor, tag: tag, implicit: implicit, style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), } return true } if token.typ == yaml_FLOW_MAPPING_START_TOKEN { end_mark = token.end_mark parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE *event = yaml_event_t{ typ: yaml_MAPPING_START_EVENT, start_mark: start_mark, end_mark: end_mark, anchor: anchor, tag: tag, implicit: implicit, style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), } return true } if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { end_mark = token.end_mark parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE *event = yaml_event_t{ typ: yaml_SEQUENCE_START_EVENT, start_mark: start_mark, end_mark: end_mark, anchor: anchor, tag: tag, implicit: implicit, style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), } return true } if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { end_mark = token.end_mark parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE *event = yaml_event_t{ typ: yaml_MAPPING_START_EVENT, start_mark: start_mark, end_mark: end_mark, anchor: anchor, tag: tag, implicit: implicit, style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), } return true } if len(anchor) > 0 || len(tag) > 0 { parser.state = parser.states[len(parser.states)-1] parser.states = parser.states[:len(parser.states)-1] *event = yaml_event_t{ typ: yaml_SCALAR_EVENT, start_mark: start_mark, end_mark: end_mark, anchor: anchor, tag: tag, implicit: implicit, quoted_implicit: false, style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), } return true } context := "while parsing a flow node" if block { context = "while parsing a block node" } yaml_parser_set_parser_error_context(parser, context, start_mark, "did not find expected node content", token.start_mark) return false } // Parse the productions: // block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END // ******************** *********** * ********* // func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } token := peek_token(parser) if token == nil { return false } if token.typ == yaml_BLOCK_ENTRY_TOKEN { mark := token.end_mark skip_token(parser) token = peek_token(parser) if token == nil { return false } if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) return yaml_parser_parse_node(parser, event, true, false) } else { parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE return yaml_parser_process_empty_scalar(parser, event, mark) } } if token.typ == yaml_BLOCK_END_TOKEN { parser.state = parser.states[len(parser.states)-1] parser.states = parser.states[:len(parser.states)-1] parser.marks = parser.marks[:len(parser.marks)-1] *event = yaml_event_t{ typ: yaml_SEQUENCE_END_EVENT, start_mark: token.start_mark, end_mark: token.end_mark, } skip_token(parser) return true } context_mark := parser.marks[len(parser.marks)-1] parser.marks = parser.marks[:len(parser.marks)-1] return yaml_parser_set_parser_error_context(parser, "while parsing a block collection", context_mark, "did not find expected '-' indicator", token.start_mark) } // Parse the productions: // indentless_sequence ::= (BLOCK-ENTRY block_node?)+ // *********** * func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { return false } if token.typ == yaml_BLOCK_ENTRY_TOKEN { mark := token.end_mark skip_token(parser) token = peek_token(parser) if token == nil { return false } if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_KEY_TOKEN && token.typ != yaml_VALUE_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) return yaml_parser_parse_node(parser, event, true, false) } parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE return yaml_parser_process_empty_scalar(parser, event, mark) } parser.state = parser.states[len(parser.states)-1] parser.states = parser.states[:len(parser.states)-1] *event = yaml_event_t{ typ: yaml_SEQUENCE_END_EVENT, start_mark: token.start_mark, end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? } return true } // Parse the productions: // block_mapping ::= BLOCK-MAPPING_START // ******************* // ((KEY block_node_or_indentless_sequence?)? // *** * // (VALUE block_node_or_indentless_sequence?)?)* // // BLOCK-END // ********* // func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } token := peek_token(parser) if token == nil { return false } if token.typ == yaml_KEY_TOKEN { mark := token.end_mark skip_token(parser) token = peek_token(parser) if token == nil { return false } if token.typ != yaml_KEY_TOKEN && token.typ != yaml_VALUE_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) return yaml_parser_parse_node(parser, event, true, true) } else { parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE return yaml_parser_process_empty_scalar(parser, event, mark) } } else if token.typ == yaml_BLOCK_END_TOKEN { parser.state = parser.states[len(parser.states)-1] parser.states = parser.states[:len(parser.states)-1] parser.marks = parser.marks[:len(parser.marks)-1] *event = yaml_event_t{ typ: yaml_MAPPING_END_EVENT, start_mark: token.start_mark, end_mark: token.end_mark, } skip_token(parser) return true } context_mark := parser.marks[len(parser.marks)-1] parser.marks = parser.marks[:len(parser.marks)-1] return yaml_parser_set_parser_error_context(parser, "while parsing a block mapping", context_mark, "did not find expected key", token.start_mark) } // Parse the productions: // block_mapping ::= BLOCK-MAPPING_START // // ((KEY block_node_or_indentless_sequence?)? // // (VALUE block_node_or_indentless_sequence?)?)* // ***** * // BLOCK-END // // func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { return false } if token.typ == yaml_VALUE_TOKEN { mark := token.end_mark skip_token(parser) token = peek_token(parser) if token == nil { return false } if token.typ != yaml_KEY_TOKEN && token.typ != yaml_VALUE_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) return yaml_parser_parse_node(parser, event, true, true) } parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE return yaml_parser_process_empty_scalar(parser, event, mark) } parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE return yaml_parser_process_empty_scalar(parser, event, token.start_mark) } // Parse the productions: // flow_sequence ::= FLOW-SEQUENCE-START // ******************* // (flow_sequence_entry FLOW-ENTRY)* // * ********** // flow_sequence_entry? // * // FLOW-SEQUENCE-END // ***************** // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? // * // func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } token := peek_token(parser) if token == nil { return false } if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { if !first { if token.typ == yaml_FLOW_ENTRY_TOKEN { skip_token(parser) token = peek_token(parser) if token == nil { return false } } else { context_mark := parser.marks[len(parser.marks)-1] parser.marks = parser.marks[:len(parser.marks)-1] return yaml_parser_set_parser_error_context(parser, "while parsing a flow sequence", context_mark, "did not find expected ',' or ']'", token.start_mark) } } if token.typ == yaml_KEY_TOKEN { parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE *event = yaml_event_t{ typ: yaml_MAPPING_START_EVENT, start_mark: token.start_mark, end_mark: token.end_mark, implicit: true, style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), } skip_token(parser) return true } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) return yaml_parser_parse_node(parser, event, false, false) } } parser.state = parser.states[len(parser.states)-1] parser.states = parser.states[:len(parser.states)-1] parser.marks = parser.marks[:len(parser.marks)-1] *event = yaml_event_t{ typ: yaml_SEQUENCE_END_EVENT, start_mark: token.start_mark, end_mark: token.end_mark, } skip_token(parser) return true } // // Parse the productions: // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? // *** * // func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { return false } if token.typ != yaml_VALUE_TOKEN && token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) return yaml_parser_parse_node(parser, event, false, false) } mark := token.end_mark skip_token(parser) parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE return yaml_parser_process_empty_scalar(parser, event, mark) } // Parse the productions: // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? // ***** * // func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { return false } if token.typ == yaml_VALUE_TOKEN { skip_token(parser) token := peek_token(parser) if token == nil { return false } if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) return yaml_parser_parse_node(parser, event, false, false) } } parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE return yaml_parser_process_empty_scalar(parser, event, token.start_mark) } // Parse the productions: // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? // * // func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { return false } parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE *event = yaml_event_t{ typ: yaml_MAPPING_END_EVENT, start_mark: token.start_mark, end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? } return true } // Parse the productions: // flow_mapping ::= FLOW-MAPPING-START // ****************** // (flow_mapping_entry FLOW-ENTRY)* // * ********** // flow_mapping_entry? // ****************** // FLOW-MAPPING-END // **************** // flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? // * *** * // func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } token := peek_token(parser) if token == nil { return false } if token.typ != yaml_FLOW_MAPPING_END_TOKEN { if !first { if token.typ == yaml_FLOW_ENTRY_TOKEN { skip_token(parser) token = peek_token(parser) if token == nil { return false } } else { context_mark := parser.marks[len(parser.marks)-1] parser.marks = parser.marks[:len(parser.marks)-1] return yaml_parser_set_parser_error_context(parser, "while parsing a flow mapping", context_mark, "did not find expected ',' or '}'", token.start_mark) } } if token.typ == yaml_KEY_TOKEN { skip_token(parser) token = peek_token(parser) if token == nil { return false } if token.typ != yaml_VALUE_TOKEN && token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) return yaml_parser_parse_node(parser, event, false, false) } else { parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE return yaml_parser_process_empty_scalar(parser, event, token.start_mark) } } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) return yaml_parser_parse_node(parser, event, false, false) } } parser.state = parser.states[len(parser.states)-1] parser.states = parser.states[:len(parser.states)-1] parser.marks = parser.marks[:len(parser.marks)-1] *event = yaml_event_t{ typ: yaml_MAPPING_END_EVENT, start_mark: token.start_mark, end_mark: token.end_mark, } skip_token(parser) return true } // Parse the productions: // flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? // * ***** * // func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { token := peek_token(parser) if token == nil { return false } if empty { parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE return yaml_parser_process_empty_scalar(parser, event, token.start_mark) } if token.typ == yaml_VALUE_TOKEN { skip_token(parser) token = peek_token(parser) if token == nil { return false } if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) return yaml_parser_parse_node(parser, event, false, false) } } parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE return yaml_parser_process_empty_scalar(parser, event, token.start_mark) } // Generate an empty scalar event. func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { *event = yaml_event_t{ typ: yaml_SCALAR_EVENT, start_mark: mark, end_mark: mark, value: nil, // Empty implicit: true, style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), } return true } var default_tag_directives = []yaml_tag_directive_t{ {[]byte("!"), []byte("!")}, {[]byte("!!"), []byte("tag:yaml.org,2002:")}, } // Parse directives. func yaml_parser_process_directives(parser *yaml_parser_t, version_directive_ref **yaml_version_directive_t, tag_directives_ref *[]yaml_tag_directive_t) bool { var version_directive *yaml_version_directive_t var tag_directives []yaml_tag_directive_t token := peek_token(parser) if token == nil { return false } for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { if version_directive != nil { yaml_parser_set_parser_error(parser, "found duplicate %YAML directive", token.start_mark) return false } if token.major != 1 || token.minor != 1 { yaml_parser_set_parser_error(parser, "found incompatible YAML document", token.start_mark) return false } version_directive = &yaml_version_directive_t{ major: token.major, minor: token.minor, } } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { value := yaml_tag_directive_t{ handle: token.value, prefix: token.prefix, } if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { return false } tag_directives = append(tag_directives, value) } skip_token(parser) token = peek_token(parser) if token == nil { return false } } for i := range default_tag_directives { if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { return false } } if version_directive_ref != nil { *version_directive_ref = version_directive } if tag_directives_ref != nil { *tag_directives_ref = tag_directives } return true } // Append a tag directive to the directives stack. func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { for i := range parser.tag_directives { if bytes.Equal(value.handle, parser.tag_directives[i].handle) { if allow_duplicates { return true } return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) } } // [Go] I suspect the copy is unnecessary. This was likely done // because there was no way to track ownership of the data. value_copy := yaml_tag_directive_t{ handle: make([]byte, len(value.handle)), prefix: make([]byte, len(value.prefix)), } copy(value_copy.handle, value.handle) copy(value_copy.prefix, value.prefix) parser.tag_directives = append(parser.tag_directives, value_copy) return true } charm-2.1.1/src/gopkg.in/yaml.v2/encode.go0000664000175000017500000001644212672604537017175 0ustar marcomarcopackage yaml import ( "encoding" "fmt" "reflect" "regexp" "sort" "strconv" "strings" "time" ) type encoder struct { emitter yaml_emitter_t event yaml_event_t out []byte flow bool } func newEncoder() (e *encoder) { e = &encoder{} e.must(yaml_emitter_initialize(&e.emitter)) yaml_emitter_set_output_string(&e.emitter, &e.out) yaml_emitter_set_unicode(&e.emitter, true) e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) e.emit() e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) e.emit() return e } func (e *encoder) finish() { e.must(yaml_document_end_event_initialize(&e.event, true)) e.emit() e.emitter.open_ended = false e.must(yaml_stream_end_event_initialize(&e.event)) e.emit() } func (e *encoder) destroy() { yaml_emitter_delete(&e.emitter) } func (e *encoder) emit() { // This will internally delete the e.event value. if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { e.must(false) } } func (e *encoder) must(ok bool) { if !ok { msg := e.emitter.problem if msg == "" { msg = "unknown problem generating YAML content" } failf("%s", msg) } } func (e *encoder) marshal(tag string, in reflect.Value) { if !in.IsValid() { e.nilv() return } iface := in.Interface() if m, ok := iface.(Marshaler); ok { v, err := m.MarshalYAML() if err != nil { fail(err) } if v == nil { e.nilv() return } in = reflect.ValueOf(v) } else if m, ok := iface.(encoding.TextMarshaler); ok { text, err := m.MarshalText() if err != nil { fail(err) } in = reflect.ValueOf(string(text)) } switch in.Kind() { case reflect.Interface: if in.IsNil() { e.nilv() } else { e.marshal(tag, in.Elem()) } case reflect.Map: e.mapv(tag, in) case reflect.Ptr: if in.IsNil() { e.nilv() } else { e.marshal(tag, in.Elem()) } case reflect.Struct: e.structv(tag, in) case reflect.Slice: if in.Type().Elem() == mapItemType { e.itemsv(tag, in) } else { e.slicev(tag, in) } case reflect.String: e.stringv(tag, in) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: if in.Type() == durationType { e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) } else { e.intv(tag, in) } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: e.uintv(tag, in) case reflect.Float32, reflect.Float64: e.floatv(tag, in) case reflect.Bool: e.boolv(tag, in) default: panic("cannot marshal type: " + in.Type().String()) } } func (e *encoder) mapv(tag string, in reflect.Value) { e.mappingv(tag, func() { keys := keyList(in.MapKeys()) sort.Sort(keys) for _, k := range keys { e.marshal("", k) e.marshal("", in.MapIndex(k)) } }) } func (e *encoder) itemsv(tag string, in reflect.Value) { e.mappingv(tag, func() { slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) for _, item := range slice { e.marshal("", reflect.ValueOf(item.Key)) e.marshal("", reflect.ValueOf(item.Value)) } }) } func (e *encoder) structv(tag string, in reflect.Value) { sinfo, err := getStructInfo(in.Type()) if err != nil { panic(err) } e.mappingv(tag, func() { for _, info := range sinfo.FieldsList { var value reflect.Value if info.Inline == nil { value = in.Field(info.Num) } else { value = in.FieldByIndex(info.Inline) } if info.OmitEmpty && isZero(value) { continue } e.marshal("", reflect.ValueOf(info.Key)) e.flow = info.Flow e.marshal("", value) } if sinfo.InlineMap >= 0 { m := in.Field(sinfo.InlineMap) if m.Len() > 0 { e.flow = false keys := keyList(m.MapKeys()) sort.Sort(keys) for _, k := range keys { if _, found := sinfo.FieldsMap[k.String()]; found { panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) } e.marshal("", k) e.flow = false e.marshal("", m.MapIndex(k)) } } } }) } func (e *encoder) mappingv(tag string, f func()) { implicit := tag == "" style := yaml_BLOCK_MAPPING_STYLE if e.flow { e.flow = false style = yaml_FLOW_MAPPING_STYLE } e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) e.emit() f() e.must(yaml_mapping_end_event_initialize(&e.event)) e.emit() } func (e *encoder) slicev(tag string, in reflect.Value) { implicit := tag == "" style := yaml_BLOCK_SEQUENCE_STYLE if e.flow { e.flow = false style = yaml_FLOW_SEQUENCE_STYLE } e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) e.emit() n := in.Len() for i := 0; i < n; i++ { e.marshal("", in.Index(i)) } e.must(yaml_sequence_end_event_initialize(&e.event)) e.emit() } // isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. // // The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported // in YAML 1.2 and by this package, but these should be marshalled quoted for // the time being for compatibility with other parsers. func isBase60Float(s string) (result bool) { // Fast path. if s == "" { return false } c := s[0] if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { return false } // Do the full match. return base60float.MatchString(s) } // From http://yaml.org/type/float.html, except the regular expression there // is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) func (e *encoder) stringv(tag string, in reflect.Value) { var style yaml_scalar_style_t s := in.String() rtag, rs := resolve("", s) if rtag == yaml_BINARY_TAG { if tag == "" || tag == yaml_STR_TAG { tag = rtag s = rs.(string) } else if tag == yaml_BINARY_TAG { failf("explicitly tagged !!binary data must be base64-encoded") } else { failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) } } if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { style = yaml_DOUBLE_QUOTED_SCALAR_STYLE } else if strings.Contains(s, "\n") { style = yaml_LITERAL_SCALAR_STYLE } else { style = yaml_PLAIN_SCALAR_STYLE } e.emitScalar(s, "", tag, style) } func (e *encoder) boolv(tag string, in reflect.Value) { var s string if in.Bool() { s = "true" } else { s = "false" } e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) } func (e *encoder) intv(tag string, in reflect.Value) { s := strconv.FormatInt(in.Int(), 10) e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) } func (e *encoder) uintv(tag string, in reflect.Value) { s := strconv.FormatUint(in.Uint(), 10) e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) } func (e *encoder) floatv(tag string, in reflect.Value) { // FIXME: Handle 64 bits here. s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) switch s { case "+Inf": s = ".inf" case "-Inf": s = "-.inf" case "NaN": s = ".nan" } e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) } func (e *encoder) nilv() { e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) } func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { implicit := tag == "" e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) e.emit() } charm-2.1.1/src/gopkg.in/yaml.v2/sorter.go0000664000175000017500000000465412672604537017260 0ustar marcomarcopackage yaml import ( "reflect" "unicode" ) type keyList []reflect.Value func (l keyList) Len() int { return len(l) } func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } func (l keyList) Less(i, j int) bool { a := l[i] b := l[j] ak := a.Kind() bk := b.Kind() for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { a = a.Elem() ak = a.Kind() } for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { b = b.Elem() bk = b.Kind() } af, aok := keyFloat(a) bf, bok := keyFloat(b) if aok && bok { if af != bf { return af < bf } if ak != bk { return ak < bk } return numLess(a, b) } if ak != reflect.String || bk != reflect.String { return ak < bk } ar, br := []rune(a.String()), []rune(b.String()) for i := 0; i < len(ar) && i < len(br); i++ { if ar[i] == br[i] { continue } al := unicode.IsLetter(ar[i]) bl := unicode.IsLetter(br[i]) if al && bl { return ar[i] < br[i] } if al || bl { return bl } var ai, bi int var an, bn int64 for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { an = an*10 + int64(ar[ai]-'0') } for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { bn = bn*10 + int64(br[bi]-'0') } if an != bn { return an < bn } if ai != bi { return ai < bi } return ar[i] < br[i] } return len(ar) < len(br) } // keyFloat returns a float value for v if it is a number/bool // and whether it is a number/bool or not. func keyFloat(v reflect.Value) (f float64, ok bool) { switch v.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return float64(v.Int()), true case reflect.Float32, reflect.Float64: return v.Float(), true case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return float64(v.Uint()), true case reflect.Bool: if v.Bool() { return 1, true } return 0, true } return 0, false } // numLess returns whether a < b. // a and b must necessarily have the same kind. func numLess(a, b reflect.Value) bool { switch a.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return a.Int() < b.Int() case reflect.Float32, reflect.Float64: return a.Float() < b.Float() case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return a.Uint() < b.Uint() case reflect.Bool: return !a.Bool() && b.Bool() } panic("not a number") } charm-2.1.1/src/gopkg.in/yaml.v2/apic.go0000664000175000017500000005137612672604537016661 0ustar marcomarcopackage yaml import ( "io" "os" ) func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) // Check if we can move the queue at the beginning of the buffer. if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { if parser.tokens_head != len(parser.tokens) { copy(parser.tokens, parser.tokens[parser.tokens_head:]) } parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] parser.tokens_head = 0 } parser.tokens = append(parser.tokens, *token) if pos < 0 { return } copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) parser.tokens[parser.tokens_head+pos] = *token } // Create a new parser object. func yaml_parser_initialize(parser *yaml_parser_t) bool { *parser = yaml_parser_t{ raw_buffer: make([]byte, 0, input_raw_buffer_size), buffer: make([]byte, 0, input_buffer_size), } return true } // Destroy a parser object. func yaml_parser_delete(parser *yaml_parser_t) { *parser = yaml_parser_t{} } // String read handler. func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { if parser.input_pos == len(parser.input) { return 0, io.EOF } n = copy(buffer, parser.input[parser.input_pos:]) parser.input_pos += n return n, nil } // File read handler. func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { return parser.input_file.Read(buffer) } // Set a string input. func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { if parser.read_handler != nil { panic("must set the input source only once") } parser.read_handler = yaml_string_read_handler parser.input = input parser.input_pos = 0 } // Set a file input. func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { if parser.read_handler != nil { panic("must set the input source only once") } parser.read_handler = yaml_file_read_handler parser.input_file = file } // Set the source encoding. func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { if parser.encoding != yaml_ANY_ENCODING { panic("must set the encoding only once") } parser.encoding = encoding } // Create a new emitter object. func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { *emitter = yaml_emitter_t{ buffer: make([]byte, output_buffer_size), raw_buffer: make([]byte, 0, output_raw_buffer_size), states: make([]yaml_emitter_state_t, 0, initial_stack_size), events: make([]yaml_event_t, 0, initial_queue_size), } return true } // Destroy an emitter object. func yaml_emitter_delete(emitter *yaml_emitter_t) { *emitter = yaml_emitter_t{} } // String write handler. func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { *emitter.output_buffer = append(*emitter.output_buffer, buffer...) return nil } // File write handler. func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { _, err := emitter.output_file.Write(buffer) return err } // Set a string output. func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { if emitter.write_handler != nil { panic("must set the output target only once") } emitter.write_handler = yaml_string_write_handler emitter.output_buffer = output_buffer } // Set a file output. func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { if emitter.write_handler != nil { panic("must set the output target only once") } emitter.write_handler = yaml_file_write_handler emitter.output_file = file } // Set the output encoding. func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { if emitter.encoding != yaml_ANY_ENCODING { panic("must set the output encoding only once") } emitter.encoding = encoding } // Set the canonical output style. func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { emitter.canonical = canonical } //// Set the indentation increment. func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { if indent < 2 || indent > 9 { indent = 2 } emitter.best_indent = indent } // Set the preferred line width. func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { if width < 0 { width = -1 } emitter.best_width = width } // Set if unescaped non-ASCII characters are allowed. func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { emitter.unicode = unicode } // Set the preferred line break character. func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { emitter.line_break = line_break } ///* // * Destroy a token object. // */ // //YAML_DECLARE(void) //yaml_token_delete(yaml_token_t *token) //{ // assert(token); // Non-NULL token object expected. // // switch (token.type) // { // case YAML_TAG_DIRECTIVE_TOKEN: // yaml_free(token.data.tag_directive.handle); // yaml_free(token.data.tag_directive.prefix); // break; // // case YAML_ALIAS_TOKEN: // yaml_free(token.data.alias.value); // break; // // case YAML_ANCHOR_TOKEN: // yaml_free(token.data.anchor.value); // break; // // case YAML_TAG_TOKEN: // yaml_free(token.data.tag.handle); // yaml_free(token.data.tag.suffix); // break; // // case YAML_SCALAR_TOKEN: // yaml_free(token.data.scalar.value); // break; // // default: // break; // } // // memset(token, 0, sizeof(yaml_token_t)); //} // ///* // * Check if a string is a valid UTF-8 sequence. // * // * Check 'reader.c' for more details on UTF-8 encoding. // */ // //static int //yaml_check_utf8(yaml_char_t *start, size_t length) //{ // yaml_char_t *end = start+length; // yaml_char_t *pointer = start; // // while (pointer < end) { // unsigned char octet; // unsigned int width; // unsigned int value; // size_t k; // // octet = pointer[0]; // width = (octet & 0x80) == 0x00 ? 1 : // (octet & 0xE0) == 0xC0 ? 2 : // (octet & 0xF0) == 0xE0 ? 3 : // (octet & 0xF8) == 0xF0 ? 4 : 0; // value = (octet & 0x80) == 0x00 ? octet & 0x7F : // (octet & 0xE0) == 0xC0 ? octet & 0x1F : // (octet & 0xF0) == 0xE0 ? octet & 0x0F : // (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; // if (!width) return 0; // if (pointer+width > end) return 0; // for (k = 1; k < width; k ++) { // octet = pointer[k]; // if ((octet & 0xC0) != 0x80) return 0; // value = (value << 6) + (octet & 0x3F); // } // if (!((width == 1) || // (width == 2 && value >= 0x80) || // (width == 3 && value >= 0x800) || // (width == 4 && value >= 0x10000))) return 0; // // pointer += width; // } // // return 1; //} // // Create STREAM-START. func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { *event = yaml_event_t{ typ: yaml_STREAM_START_EVENT, encoding: encoding, } return true } // Create STREAM-END. func yaml_stream_end_event_initialize(event *yaml_event_t) bool { *event = yaml_event_t{ typ: yaml_STREAM_END_EVENT, } return true } // Create DOCUMENT-START. func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, tag_directives []yaml_tag_directive_t, implicit bool) bool { *event = yaml_event_t{ typ: yaml_DOCUMENT_START_EVENT, version_directive: version_directive, tag_directives: tag_directives, implicit: implicit, } return true } // Create DOCUMENT-END. func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { *event = yaml_event_t{ typ: yaml_DOCUMENT_END_EVENT, implicit: implicit, } return true } ///* // * Create ALIAS. // */ // //YAML_DECLARE(int) //yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) //{ // mark yaml_mark_t = { 0, 0, 0 } // anchor_copy *yaml_char_t = NULL // // assert(event) // Non-NULL event object is expected. // assert(anchor) // Non-NULL anchor is expected. // // if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 // // anchor_copy = yaml_strdup(anchor) // if (!anchor_copy) // return 0 // // ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) // // return 1 //} // Create SCALAR. func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { *event = yaml_event_t{ typ: yaml_SCALAR_EVENT, anchor: anchor, tag: tag, value: value, implicit: plain_implicit, quoted_implicit: quoted_implicit, style: yaml_style_t(style), } return true } // Create SEQUENCE-START. func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { *event = yaml_event_t{ typ: yaml_SEQUENCE_START_EVENT, anchor: anchor, tag: tag, implicit: implicit, style: yaml_style_t(style), } return true } // Create SEQUENCE-END. func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { *event = yaml_event_t{ typ: yaml_SEQUENCE_END_EVENT, } return true } // Create MAPPING-START. func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { *event = yaml_event_t{ typ: yaml_MAPPING_START_EVENT, anchor: anchor, tag: tag, implicit: implicit, style: yaml_style_t(style), } return true } // Create MAPPING-END. func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { *event = yaml_event_t{ typ: yaml_MAPPING_END_EVENT, } return true } // Destroy an event object. func yaml_event_delete(event *yaml_event_t) { *event = yaml_event_t{} } ///* // * Create a document object. // */ // //YAML_DECLARE(int) //yaml_document_initialize(document *yaml_document_t, // version_directive *yaml_version_directive_t, // tag_directives_start *yaml_tag_directive_t, // tag_directives_end *yaml_tag_directive_t, // start_implicit int, end_implicit int) //{ // struct { // error yaml_error_type_t // } context // struct { // start *yaml_node_t // end *yaml_node_t // top *yaml_node_t // } nodes = { NULL, NULL, NULL } // version_directive_copy *yaml_version_directive_t = NULL // struct { // start *yaml_tag_directive_t // end *yaml_tag_directive_t // top *yaml_tag_directive_t // } tag_directives_copy = { NULL, NULL, NULL } // value yaml_tag_directive_t = { NULL, NULL } // mark yaml_mark_t = { 0, 0, 0 } // // assert(document) // Non-NULL document object is expected. // assert((tag_directives_start && tag_directives_end) || // (tag_directives_start == tag_directives_end)) // // Valid tag directives are expected. // // if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error // // if (version_directive) { // version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) // if (!version_directive_copy) goto error // version_directive_copy.major = version_directive.major // version_directive_copy.minor = version_directive.minor // } // // if (tag_directives_start != tag_directives_end) { // tag_directive *yaml_tag_directive_t // if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) // goto error // for (tag_directive = tag_directives_start // tag_directive != tag_directives_end; tag_directive ++) { // assert(tag_directive.handle) // assert(tag_directive.prefix) // if (!yaml_check_utf8(tag_directive.handle, // strlen((char *)tag_directive.handle))) // goto error // if (!yaml_check_utf8(tag_directive.prefix, // strlen((char *)tag_directive.prefix))) // goto error // value.handle = yaml_strdup(tag_directive.handle) // value.prefix = yaml_strdup(tag_directive.prefix) // if (!value.handle || !value.prefix) goto error // if (!PUSH(&context, tag_directives_copy, value)) // goto error // value.handle = NULL // value.prefix = NULL // } // } // // DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, // tag_directives_copy.start, tag_directives_copy.top, // start_implicit, end_implicit, mark, mark) // // return 1 // //error: // STACK_DEL(&context, nodes) // yaml_free(version_directive_copy) // while (!STACK_EMPTY(&context, tag_directives_copy)) { // value yaml_tag_directive_t = POP(&context, tag_directives_copy) // yaml_free(value.handle) // yaml_free(value.prefix) // } // STACK_DEL(&context, tag_directives_copy) // yaml_free(value.handle) // yaml_free(value.prefix) // // return 0 //} // ///* // * Destroy a document object. // */ // //YAML_DECLARE(void) //yaml_document_delete(document *yaml_document_t) //{ // struct { // error yaml_error_type_t // } context // tag_directive *yaml_tag_directive_t // // context.error = YAML_NO_ERROR // Eliminate a compliler warning. // // assert(document) // Non-NULL document object is expected. // // while (!STACK_EMPTY(&context, document.nodes)) { // node yaml_node_t = POP(&context, document.nodes) // yaml_free(node.tag) // switch (node.type) { // case YAML_SCALAR_NODE: // yaml_free(node.data.scalar.value) // break // case YAML_SEQUENCE_NODE: // STACK_DEL(&context, node.data.sequence.items) // break // case YAML_MAPPING_NODE: // STACK_DEL(&context, node.data.mapping.pairs) // break // default: // assert(0) // Should not happen. // } // } // STACK_DEL(&context, document.nodes) // // yaml_free(document.version_directive) // for (tag_directive = document.tag_directives.start // tag_directive != document.tag_directives.end // tag_directive++) { // yaml_free(tag_directive.handle) // yaml_free(tag_directive.prefix) // } // yaml_free(document.tag_directives.start) // // memset(document, 0, sizeof(yaml_document_t)) //} // ///** // * Get a document node. // */ // //YAML_DECLARE(yaml_node_t *) //yaml_document_get_node(document *yaml_document_t, index int) //{ // assert(document) // Non-NULL document object is expected. // // if (index > 0 && document.nodes.start + index <= document.nodes.top) { // return document.nodes.start + index - 1 // } // return NULL //} // ///** // * Get the root object. // */ // //YAML_DECLARE(yaml_node_t *) //yaml_document_get_root_node(document *yaml_document_t) //{ // assert(document) // Non-NULL document object is expected. // // if (document.nodes.top != document.nodes.start) { // return document.nodes.start // } // return NULL //} // ///* // * Add a scalar node to a document. // */ // //YAML_DECLARE(int) //yaml_document_add_scalar(document *yaml_document_t, // tag *yaml_char_t, value *yaml_char_t, length int, // style yaml_scalar_style_t) //{ // struct { // error yaml_error_type_t // } context // mark yaml_mark_t = { 0, 0, 0 } // tag_copy *yaml_char_t = NULL // value_copy *yaml_char_t = NULL // node yaml_node_t // // assert(document) // Non-NULL document object is expected. // assert(value) // Non-NULL value is expected. // // if (!tag) { // tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG // } // // if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error // tag_copy = yaml_strdup(tag) // if (!tag_copy) goto error // // if (length < 0) { // length = strlen((char *)value) // } // // if (!yaml_check_utf8(value, length)) goto error // value_copy = yaml_malloc(length+1) // if (!value_copy) goto error // memcpy(value_copy, value, length) // value_copy[length] = '\0' // // SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) // if (!PUSH(&context, document.nodes, node)) goto error // // return document.nodes.top - document.nodes.start // //error: // yaml_free(tag_copy) // yaml_free(value_copy) // // return 0 //} // ///* // * Add a sequence node to a document. // */ // //YAML_DECLARE(int) //yaml_document_add_sequence(document *yaml_document_t, // tag *yaml_char_t, style yaml_sequence_style_t) //{ // struct { // error yaml_error_type_t // } context // mark yaml_mark_t = { 0, 0, 0 } // tag_copy *yaml_char_t = NULL // struct { // start *yaml_node_item_t // end *yaml_node_item_t // top *yaml_node_item_t // } items = { NULL, NULL, NULL } // node yaml_node_t // // assert(document) // Non-NULL document object is expected. // // if (!tag) { // tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG // } // // if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error // tag_copy = yaml_strdup(tag) // if (!tag_copy) goto error // // if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error // // SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, // style, mark, mark) // if (!PUSH(&context, document.nodes, node)) goto error // // return document.nodes.top - document.nodes.start // //error: // STACK_DEL(&context, items) // yaml_free(tag_copy) // // return 0 //} // ///* // * Add a mapping node to a document. // */ // //YAML_DECLARE(int) //yaml_document_add_mapping(document *yaml_document_t, // tag *yaml_char_t, style yaml_mapping_style_t) //{ // struct { // error yaml_error_type_t // } context // mark yaml_mark_t = { 0, 0, 0 } // tag_copy *yaml_char_t = NULL // struct { // start *yaml_node_pair_t // end *yaml_node_pair_t // top *yaml_node_pair_t // } pairs = { NULL, NULL, NULL } // node yaml_node_t // // assert(document) // Non-NULL document object is expected. // // if (!tag) { // tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG // } // // if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error // tag_copy = yaml_strdup(tag) // if (!tag_copy) goto error // // if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error // // MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, // style, mark, mark) // if (!PUSH(&context, document.nodes, node)) goto error // // return document.nodes.top - document.nodes.start // //error: // STACK_DEL(&context, pairs) // yaml_free(tag_copy) // // return 0 //} // ///* // * Append an item to a sequence node. // */ // //YAML_DECLARE(int) //yaml_document_append_sequence_item(document *yaml_document_t, // sequence int, item int) //{ // struct { // error yaml_error_type_t // } context // // assert(document) // Non-NULL document is required. // assert(sequence > 0 // && document.nodes.start + sequence <= document.nodes.top) // // Valid sequence id is required. // assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) // // A sequence node is required. // assert(item > 0 && document.nodes.start + item <= document.nodes.top) // // Valid item id is required. // // if (!PUSH(&context, // document.nodes.start[sequence-1].data.sequence.items, item)) // return 0 // // return 1 //} // ///* // * Append a pair of a key and a value to a mapping node. // */ // //YAML_DECLARE(int) //yaml_document_append_mapping_pair(document *yaml_document_t, // mapping int, key int, value int) //{ // struct { // error yaml_error_type_t // } context // // pair yaml_node_pair_t // // assert(document) // Non-NULL document is required. // assert(mapping > 0 // && document.nodes.start + mapping <= document.nodes.top) // // Valid mapping id is required. // assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) // // A mapping node is required. // assert(key > 0 && document.nodes.start + key <= document.nodes.top) // // Valid key id is required. // assert(value > 0 && document.nodes.start + value <= document.nodes.top) // // Valid value id is required. // // pair.key = key // pair.value = value // // if (!PUSH(&context, // document.nodes.start[mapping-1].data.mapping.pairs, pair)) // return 0 // // return 1 //} // // charm-2.1.1/src/gopkg.in/yaml.v2/yaml.go0000664000175000017500000002320112672604537016671 0ustar marcomarco// Package yaml implements YAML support for the Go language. // // Source code and other details for the project are available at GitHub: // // https://github.com/go-yaml/yaml // package yaml import ( "errors" "fmt" "reflect" "strings" "sync" ) // MapSlice encodes and decodes as a YAML map. // The order of keys is preserved when encoding and decoding. type MapSlice []MapItem // MapItem is an item in a MapSlice. type MapItem struct { Key, Value interface{} } // The Unmarshaler interface may be implemented by types to customize their // behavior when being unmarshaled from a YAML document. The UnmarshalYAML // method receives a function that may be called to unmarshal the original // YAML value into a field or variable. It is safe to call the unmarshal // function parameter more than once if necessary. type Unmarshaler interface { UnmarshalYAML(unmarshal func(interface{}) error) error } // The Marshaler interface may be implemented by types to customize their // behavior when being marshaled into a YAML document. The returned value // is marshaled in place of the original value implementing Marshaler. // // If an error is returned by MarshalYAML, the marshaling procedure stops // and returns with the provided error. type Marshaler interface { MarshalYAML() (interface{}, error) } // Unmarshal decodes the first document found within the in byte slice // and assigns decoded values into the out value. // // Maps and pointers (to a struct, string, int, etc) are accepted as out // values. If an internal pointer within a struct is not initialized, // the yaml package will initialize it if necessary for unmarshalling // the provided data. The out parameter must not be nil. // // The type of the decoded values should be compatible with the respective // values in out. If one or more values cannot be decoded due to a type // mismatches, decoding continues partially until the end of the YAML // content, and a *yaml.TypeError is returned with details for all // missed values. // // Struct fields are only unmarshalled if they are exported (have an // upper case first letter), and are unmarshalled using the field name // lowercased as the default key. Custom keys may be defined via the // "yaml" name in the field tag: the content preceding the first comma // is used as the key, and the following comma-separated options are // used to tweak the marshalling process (see Marshal). // Conflicting names result in a runtime error. // // For example: // // type T struct { // F int `yaml:"a,omitempty"` // B int // } // var t T // yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) // // See the documentation of Marshal for the format of tags and a list of // supported tag options. // func Unmarshal(in []byte, out interface{}) (err error) { defer handleErr(&err) d := newDecoder() p := newParser(in) defer p.destroy() node := p.parse() if node != nil { v := reflect.ValueOf(out) if v.Kind() == reflect.Ptr && !v.IsNil() { v = v.Elem() } d.unmarshal(node, v) } if len(d.terrors) > 0 { return &TypeError{d.terrors} } return nil } // Marshal serializes the value provided into a YAML document. The structure // of the generated document will reflect the structure of the value itself. // Maps and pointers (to struct, string, int, etc) are accepted as the in value. // // Struct fields are only unmarshalled if they are exported (have an upper case // first letter), and are unmarshalled using the field name lowercased as the // default key. Custom keys may be defined via the "yaml" name in the field // tag: the content preceding the first comma is used as the key, and the // following comma-separated options are used to tweak the marshalling process. // Conflicting names result in a runtime error. // // The field tag format accepted is: // // `(...) yaml:"[][,[,]]" (...)` // // The following flags are currently supported: // // omitempty Only include the field if it's not set to the zero // value for the type or to empty slices or maps. // Does not apply to zero valued structs. // // flow Marshal using a flow style (useful for structs, // sequences and maps). // // inline Inline the field, which must be a struct or a map, // causing all of its fields or keys to be processed as if // they were part of the outer struct. For maps, keys must // not conflict with the yaml keys of other struct fields. // // In addition, if the key is "-", the field is ignored. // // For example: // // type T struct { // F int "a,omitempty" // B int // } // yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" // yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" // func Marshal(in interface{}) (out []byte, err error) { defer handleErr(&err) e := newEncoder() defer e.destroy() e.marshal("", reflect.ValueOf(in)) e.finish() out = e.out return } func handleErr(err *error) { if v := recover(); v != nil { if e, ok := v.(yamlError); ok { *err = e.err } else { panic(v) } } } type yamlError struct { err error } func fail(err error) { panic(yamlError{err}) } func failf(format string, args ...interface{}) { panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) } // A TypeError is returned by Unmarshal when one or more fields in // the YAML document cannot be properly decoded into the requested // types. When this error is returned, the value is still // unmarshaled partially. type TypeError struct { Errors []string } func (e *TypeError) Error() string { return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) } // -------------------------------------------------------------------------- // Maintain a mapping of keys to structure field indexes // The code in this section was copied from mgo/bson. // structInfo holds details for the serialization of fields of // a given struct. type structInfo struct { FieldsMap map[string]fieldInfo FieldsList []fieldInfo // InlineMap is the number of the field in the struct that // contains an ,inline map, or -1 if there's none. InlineMap int } type fieldInfo struct { Key string Num int OmitEmpty bool Flow bool // Inline holds the field index if the field is part of an inlined struct. Inline []int } var structMap = make(map[reflect.Type]*structInfo) var fieldMapMutex sync.RWMutex func getStructInfo(st reflect.Type) (*structInfo, error) { fieldMapMutex.RLock() sinfo, found := structMap[st] fieldMapMutex.RUnlock() if found { return sinfo, nil } n := st.NumField() fieldsMap := make(map[string]fieldInfo) fieldsList := make([]fieldInfo, 0, n) inlineMap := -1 for i := 0; i != n; i++ { field := st.Field(i) if field.PkgPath != "" { continue // Private field } info := fieldInfo{Num: i} tag := field.Tag.Get("yaml") if tag == "" && strings.Index(string(field.Tag), ":") < 0 { tag = string(field.Tag) } if tag == "-" { continue } inline := false fields := strings.Split(tag, ",") if len(fields) > 1 { for _, flag := range fields[1:] { switch flag { case "omitempty": info.OmitEmpty = true case "flow": info.Flow = true case "inline": inline = true default: return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) } } tag = fields[0] } if inline { switch field.Type.Kind() { case reflect.Map: if inlineMap >= 0 { return nil, errors.New("Multiple ,inline maps in struct " + st.String()) } if field.Type.Key() != reflect.TypeOf("") { return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) } inlineMap = info.Num case reflect.Struct: sinfo, err := getStructInfo(field.Type) if err != nil { return nil, err } for _, finfo := range sinfo.FieldsList { if _, found := fieldsMap[finfo.Key]; found { msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() return nil, errors.New(msg) } if finfo.Inline == nil { finfo.Inline = []int{i, finfo.Num} } else { finfo.Inline = append([]int{i}, finfo.Inline...) } fieldsMap[finfo.Key] = finfo fieldsList = append(fieldsList, finfo) } default: //return nil, errors.New("Option ,inline needs a struct value or map field") return nil, errors.New("Option ,inline needs a struct value field") } continue } if tag != "" { info.Key = tag } else { info.Key = strings.ToLower(field.Name) } if _, found = fieldsMap[info.Key]; found { msg := "Duplicated key '" + info.Key + "' in struct " + st.String() return nil, errors.New(msg) } fieldsList = append(fieldsList, info) fieldsMap[info.Key] = info } sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} fieldMapMutex.Lock() structMap[st] = sinfo fieldMapMutex.Unlock() return sinfo, nil } func isZero(v reflect.Value) bool { switch v.Kind() { case reflect.String: return len(v.String()) == 0 case reflect.Interface, reflect.Ptr: return v.IsNil() case reflect.Slice: return v.Len() == 0 case reflect.Map: return v.Len() == 0 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return v.Int() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return v.Uint() == 0 case reflect.Bool: return !v.Bool() case reflect.Struct: vt := v.Type() for i := v.NumField() - 1; i >= 0; i-- { if vt.Field(i).PkgPath != "" { continue // Private field } if !isZero(v.Field(i)) { return false } } return true } return false } charm-2.1.1/src/gopkg.in/errgo.v1/0000775000175000017500000000000012672604506015551 5ustar marcomarcocharm-2.1.1/src/gopkg.in/errgo.v1/export_test.go0000664000175000017500000000014212672604506020455 0ustar marcomarco// Copyright 2014 Roger Peppe. // See LICENCE file for details. package errgo var Match = match charm-2.1.1/src/gopkg.in/errgo.v1/LICENSE0000664000175000017500000000274512672604506016566 0ustar marcomarcoCopyright © 2013, Roger Peppe All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of this project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. charm-2.1.1/src/gopkg.in/errgo.v1/errors.go0000664000175000017500000002476712672604506017434 0ustar marcomarco// Copyright 2014 Roger Peppe. // See LICENCE file for details. // The errgo package provides a way to create // and diagnose errors. It is compatible with // the usual Go error idioms but adds a way to wrap errors // so that they record source location information // while retaining a consistent way for code to // inspect errors to find out particular problems. // package errgo import ( "bytes" "fmt" "log" "runtime" ) const debug = false // Err holds a description of an error along with information about // where the error was created. // // It may be embedded in custom error types to add // extra information that this errors package can // understand. type Err struct { // Message_ holds the text of the error message. It may be empty // if Underlying is set. Message_ string // Cause_ holds the cause of the error as returned // by the Cause method. Cause_ error // Underlying holds the underlying error, if any. Underlying_ error // File and Line identify the source code location where the error was // created. File string Line int } // Location implements Locationer. func (e *Err) Location() (file string, line int) { return e.File, e.Line } // Underlying returns the underlying error if any. func (e *Err) Underlying() error { return e.Underlying_ } // Cause implements Causer. func (e *Err) Cause() error { return e.Cause_ } // Message returns the top level error message. func (e *Err) Message() string { return e.Message_ } // Error implements error.Error. func (e *Err) Error() string { switch { case e.Message_ == "" && e.Underlying_ == nil: return "" case e.Message_ == "": return e.Underlying_.Error() case e.Underlying_ == nil: return e.Message_ } return fmt.Sprintf("%s: %v", e.Message_, e.Underlying_) } // GoString returns the details of the receiving error // message, so that printing an error with %#v will // produce useful information. func (e *Err) GoString() string { return Details(e) } // Causer is the type of an error that may provide // an error cause for error diagnosis. Cause may return // nil if there is no cause (for example because the // cause has been masked). type Causer interface { Cause() error } // Wrapper is the type of an error that wraps another error. It is // exposed so that external types may implement it, but should in // general not be used otherwise. type Wrapper interface { // Message returns the top level error message, // not including the message from the underlying // error. Message() string // Underlying returns the underlying error, or nil // if there is none. Underlying() error } // Locationer can be implemented by any error type // that wants to expose the source location of an error. type Locationer interface { // Location returns the name of the file and the line // number associated with an error. Location() (file string, line int) } // Details returns information about the stack of // underlying errors wrapped by err, in the format: // // [{filename:99: error one} {otherfile:55: cause of error one}] // // The details are found by type-asserting the error to // the Locationer, Causer and Wrapper interfaces. // Details of the underlying stack are found by // recursively calling Underlying when the // underlying error implements Wrapper. func Details(err error) string { if err == nil { return "[]" } var s []byte s = append(s, '[') for { s = append(s, '{') if err, ok := err.(Locationer); ok { file, line := err.Location() if file != "" { s = append(s, fmt.Sprintf("%s:%d", file, line)...) s = append(s, ": "...) } } if cerr, ok := err.(Wrapper); ok { s = append(s, cerr.Message()...) err = cerr.Underlying() } else { s = append(s, err.Error()...) err = nil } if debug { if err, ok := err.(Causer); ok { if cause := err.Cause(); cause != nil { s = append(s, fmt.Sprintf("=%T", cause)...) s = append(s, Details(cause)...) } } } s = append(s, '}') if err == nil { break } s = append(s, ' ') } s = append(s, ']') return string(s) } // Locate records the source location of the error by setting // e.Location, at callDepth stack frames above the call. func (e *Err) SetLocation(callDepth int) { _, file, line, _ := runtime.Caller(callDepth + 1) e.File, e.Line = file, line } func setLocation(err error, callDepth int) { if e, _ := err.(*Err); e != nil { e.SetLocation(callDepth + 1) } } // New returns a new error with the given error message and no cause. It // is a drop-in replacement for errors.New from the standard library. func New(s string) error { err := &Err{Message_: s} err.SetLocation(1) return err } // Newf returns a new error with the given printf-formatted error // message and no cause. func Newf(f string, a ...interface{}) error { err := &Err{Message_: fmt.Sprintf(f, a...)} err.SetLocation(1) return err } // match returns whether any of the given // functions returns true when called with err as an // argument. func match(err error, pass ...func(error) bool) bool { for _, f := range pass { if f(err) { return true } } return false } // Is returns a function that returns whether the // an error is equal to the given error. // It is intended to be used as a "pass" argument // to Mask and friends; for example: // // return errors.Mask(err, errors.Is(http.ErrNoCookie)) // // would return an error with an http.ErrNoCookie cause // only if that was err's diagnosis; otherwise the diagnosis // would be itself. func Is(err error) func(error) bool { return func(err1 error) bool { return err == err1 } } // Any returns true. It can be used as an argument to Mask // to allow any diagnosis to pass through to the wrapped // error. func Any(error) bool { return true } // NoteMask returns an Err that has the given underlying error, // with the given message added as context, and allowing // the cause of the underlying error to pass through into // the result if allowed by the specific pass functions // (see Mask for an explanation of the pass parameter). func NoteMask(underlying error, msg string, pass ...func(error) bool) error { err := noteMask(underlying, msg, pass...) setLocation(err, 1) return err } // noteMask is exactly like NoteMask except it doesn't set the location // of the returned error, so that we can avoid setting it twice // when it's used in other functions. func noteMask(underlying error, msg string, pass ...func(error) bool) error { newErr := &Err{ Underlying_: underlying, Message_: msg, } if len(pass) > 0 { if cause := Cause(underlying); match(cause, pass...) { newErr.Cause_ = cause } } if debug { if newd, oldd := newErr.Cause_, Cause(underlying); newd != oldd { log.Printf("Mask cause %[1]T(%[1]v)->%[2]T(%[2]v)", oldd, newd) log.Printf("call stack: %s", callers(0, 20)) log.Printf("len(allow) == %d", len(pass)) log.Printf("old error %#v", underlying) log.Printf("new error %#v", newErr) } } newErr.SetLocation(1) return newErr } // Mask returns an Err that wraps the given underyling error. The error // message is unchanged, but the error location records the caller of // Mask. // // If err is nil, Mask returns nil. // // By default Mask conceals the cause of the wrapped error, but if // pass(Cause(err)) returns true for any of the provided pass functions, // the cause of the returned error will be Cause(err). // // For example, the following code will return an error whose cause is // the error from the os.Open call when (and only when) the file does // not exist. // // f, err := os.Open("non-existent-file") // if err != nil { // return errors.Mask(err, os.IsNotExist) // } // // In order to add context to returned errors, it // is conventional to call Mask when returning any // error received from elsewhere. // func Mask(underlying error, pass ...func(error) bool) error { if underlying == nil { return nil } err := noteMask(underlying, "", pass...) setLocation(err, 1) return err } // Notef returns an Error that wraps the given underlying // error and adds the given formatted context message. // The returned error has no cause (use NoteMask // or WithCausef to add a message while retaining a cause). func Notef(underlying error, f string, a ...interface{}) error { err := noteMask(underlying, fmt.Sprintf(f, a...)) setLocation(err, 1) return err } // MaskFunc returns an equivalent of Mask that always allows the // specified causes in addition to any causes specified when the // returned function is called. // // It is defined for convenience, for example when all calls to Mask in // a given package wish to allow the same set of causes to be returned. func MaskFunc(allow ...func(error) bool) func(error, ...func(error) bool) error { return func(err error, allow1 ...func(error) bool) error { var allowEither []func(error) bool if len(allow1) > 0 { // This is more efficient than using a function literal, // because the compiler knows that it doesn't escape. allowEither = make([]func(error) bool, len(allow)+len(allow1)) copy(allowEither, allow) copy(allowEither[len(allow):], allow1) } else { allowEither = allow } err = Mask(err, allowEither...) setLocation(err, 1) return err } } // WithCausef returns a new Error that wraps the given // (possibly nil) underlying error and associates it with // the given cause. The given formatted message context // will also be added. If f is empty and has no arguments, // the message will be the same as the cause. func WithCausef(underlying, cause error, f string, a ...interface{}) error { var msg string if f == "" && len(a) == 0 && cause != nil { msg = cause.Error() } else { msg = fmt.Sprintf(f, a...) } err := &Err{ Underlying_: underlying, Cause_: cause, Message_: msg, } err.SetLocation(1) return err } // Cause returns the cause of the given error. If err does not // implement Causer or its Cause method returns nil, it returns err itself. // // Cause is the usual way to diagnose errors that may have // been wrapped by Mask or NoteMask. func Cause(err error) error { var diag error if err, ok := err.(Causer); ok { diag = err.Cause() } if diag != nil { return diag } return err } // callers returns the stack trace of the goroutine that called it, // starting n entries above the caller of callers, as a space-separated list // of filename:line-number pairs with no new lines. func callers(n, max int) []byte { var b bytes.Buffer prev := false for i := 0; i < max; i++ { _, file, line, ok := runtime.Caller(n + 1) if !ok { return b.Bytes() } if prev { fmt.Fprintf(&b, " ") } fmt.Fprintf(&b, "%s:%d", file, line) n++ prev = true } return b.Bytes() } charm-2.1.1/src/gopkg.in/errgo.v1/README.md0000664000175000017500000001513312672604506017033 0ustar marcomarco# errgo -- import "gopkg.in/errgo.v1" The errgo package provides a way to create and diagnose errors. It is compatible with the usual Go error idioms but adds a way to wrap errors so that they record source location information while retaining a consistent way for code to inspect errors to find out particular problems. ## Usage #### func Any ```go func Any(error) bool ``` Any returns true. It can be used as an argument to Mask to allow any diagnosis to pass through to the wrapped error. #### func Cause ```go func Cause(err error) error ``` Cause returns the cause of the given error. If err does not implement Causer or its Cause method returns nil, it returns err itself. Cause is the usual way to diagnose errors that may have been wrapped by Mask or NoteMask. #### func Details ```go func Details(err error) string ``` Details returns information about the stack of underlying errors wrapped by err, in the format: [{filename:99: error one} {otherfile:55: cause of error one}] The details are found by type-asserting the error to the Locationer, Causer and Wrapper interfaces. Details of the underlying stack are found by recursively calling Underlying when the underlying error implements Wrapper. #### func Is ```go func Is(err error) func(error) bool ``` Is returns a function that returns whether the an error is equal to the given error. It is intended to be used as a "pass" argument to Mask and friends; for example: return errors.Mask(err, errors.Is(http.ErrNoCookie)) would return an error with an http.ErrNoCookie cause only if that was err's diagnosis; otherwise the diagnosis would be itself. #### func Mask ```go func Mask(underlying error, pass ...func(error) bool) error ``` Mask returns an Err that wraps the given underyling error. The error message is unchanged, but the error location records the caller of Mask. If err is nil, Mask returns nil. By default Mask conceals the cause of the wrapped error, but if pass(Cause(err)) returns true for any of the provided pass functions, the cause of the returned error will be Cause(err). For example, the following code will return an error whose cause is the error from the os.Open call when (and only when) the file does not exist. f, err := os.Open("non-existent-file") if err != nil { return errors.Mask(err, os.IsNotExist) } In order to add context to returned errors, it is conventional to call Mask when returning any error received from elsewhere. #### func MaskFunc ```go func MaskFunc(allow ...func(error) bool) func(error, ...func(error) bool) error ``` MaskFunc returns an equivalent of Mask that always allows the specified causes in addition to any causes specified when the returned function is called. It is defined for convenience, for example when all calls to Mask in a given package wish to allow the same set of causes to be returned. #### func New ```go func New(s string) error ``` New returns a new error with the given error message and no cause. It is a drop-in replacement for errors.New from the standard library. #### func Newf ```go func Newf(f string, a ...interface{}) error ``` Newf returns a new error with the given printf-formatted error message and no cause. #### func NoteMask ```go func NoteMask(underlying error, msg string, pass ...func(error) bool) error ``` NoteMask returns an Err that has the given underlying error, with the given message added as context, and allowing the cause of the underlying error to pass through into the result if allowed by the specific pass functions (see Mask for an explanation of the pass parameter). #### func Notef ```go func Notef(underlying error, f string, a ...interface{}) error ``` Notef returns an Error that wraps the given underlying error and adds the given formatted context message. The returned error has no cause (use NoteMask or WithCausef to add a message while retaining a cause). #### func WithCausef ```go func WithCausef(underlying, cause error, f string, a ...interface{}) error ``` WithCausef returns a new Error that wraps the given (possibly nil) underlying error and associates it with the given cause. The given formatted message context will also be added. #### type Causer ```go type Causer interface { Cause() error } ``` Causer is the type of an error that may provide an error cause for error diagnosis. Cause may return nil if there is no cause (for example because the cause has been masked). #### type Err ```go type Err struct { // Message_ holds the text of the error message. It may be empty // if Underlying is set. Message_ string // Cause_ holds the cause of the error as returned // by the Cause method. Cause_ error // Underlying holds the underlying error, if any. Underlying_ error // File and Line identify the source code location where the error was // created. File string Line int } ``` Err holds a description of an error along with information about where the error was created. It may be embedded in custom error types to add extra information that this errors package can understand. #### func (*Err) Cause ```go func (e *Err) Cause() error ``` Cause implements Causer. #### func (*Err) Error ```go func (e *Err) Error() string ``` Error implements error.Error. #### func (*Err) GoString ```go func (e *Err) GoString() string ``` GoString returns the details of the receiving error message, so that printing an error with %#v will produce useful information. #### func (*Err) Location ```go func (e *Err) Location() (file string, line int) ``` Location implements Locationer. #### func (*Err) Message ```go func (e *Err) Message() string ``` Message returns the top level error message. #### func (*Err) SetLocation ```go func (e *Err) SetLocation(callDepth int) ``` Locate records the source location of the error by setting e.Location, at callDepth stack frames above the call. #### func (*Err) Underlying ```go func (e *Err) Underlying() error ``` Underlying returns the underlying error if any. #### type Locationer ```go type Locationer interface { // Location returns the name of the file and the line // number associated with an error. Location() (file string, line int) } ``` Locationer can be implemented by any error type that wants to expose the source location of an error. #### type Wrapper ```go type Wrapper interface { // Message returns the top level error message, // not including the message from the underlying // error. Message() string // Underlying returns the underlying error, or nil // if there is none. Underlying() error } ``` Wrapper is the type of an error that wraps another error. It is exposed so that external types may implement it, but should in general not be used otherwise. charm-2.1.1/src/gopkg.in/errgo.v1/errors_test.go0000664000175000017500000001622512672604506020461 0ustar marcomarco// Copyright 2014 Roger Peppe. // See LICENCE file for details. package errgo_test import ( "fmt" "io/ioutil" "runtime" "strings" "testing" gc "launchpad.net/gocheck" "gopkg.in/errgo.v1" ) var ( _ errgo.Wrapper = (*errgo.Err)(nil) _ errgo.Locationer = (*errgo.Err)(nil) _ errgo.Causer = (*errgo.Err)(nil) ) func Test(t *testing.T) { gc.TestingT(t) } type errorsSuite struct{} var _ = gc.Suite(&errorsSuite{}) func (*errorsSuite) TestNew(c *gc.C) { err := errgo.New("foo") //err TestNew checkErr(c, err, nil, "foo", "[{$TestNew$: foo}]", err) } func (*errorsSuite) TestNewf(c *gc.C) { err := errgo.Newf("foo %d", 5) //err TestNewf checkErr(c, err, nil, "foo 5", "[{$TestNewf$: foo 5}]", err) } var someErr = errgo.New("some error") //err varSomeErr func annotate1() error { err := errgo.Notef(someErr, "annotate1") //err annotate1 return err } func annotate2() error { err := annotate1() err = errgo.Notef(err, "annotate2") //err annotate2 return err } func (*errorsSuite) TestNoteUsage(c *gc.C) { err0 := annotate2() err, ok := err0.(errgo.Wrapper) c.Assert(ok, gc.Equals, true) underlying := err.Underlying() checkErr( c, err0, underlying, "annotate2: annotate1: some error", "[{$annotate2$: annotate2} {$annotate1$: annotate1} {$varSomeErr$: some error}]", err0) } func (*errorsSuite) TestMask(c *gc.C) { err0 := errgo.WithCausef(nil, someErr, "foo") //err TestMask#0 err := errgo.Mask(err0) //err TestMask#1 checkErr(c, err, err0, "foo", "[{$TestMask#1$: } {$TestMask#0$: foo}]", err) err = errgo.Mask(nil) c.Assert(err, gc.IsNil) } func (*errorsSuite) TestNotef(c *gc.C) { err0 := errgo.WithCausef(nil, someErr, "foo") //err TestNotef#0 err := errgo.Notef(err0, "bar") //err TestNotef#1 checkErr(c, err, err0, "bar: foo", "[{$TestNotef#1$: bar} {$TestNotef#0$: foo}]", err) err = errgo.Notef(nil, "bar") //err TestNotef#2 checkErr(c, err, nil, "bar", "[{$TestNotef#2$: bar}]", err) } func (*errorsSuite) TestNoteMask(c *gc.C) { err0 := errgo.WithCausef(nil, someErr, "foo") //err TestNoteMask#0 err := errgo.NoteMask(err0, "bar") //err TestNoteMask#1 checkErr(c, err, err0, "bar: foo", "[{$TestNoteMask#1$: bar} {$TestNoteMask#0$: foo}]", err) err = errgo.NoteMask(err0, "bar", errgo.Is(someErr)) //err TestNoteMask#2 checkErr(c, err, err0, "bar: foo", "[{$TestNoteMask#2$: bar} {$TestNoteMask#0$: foo}]", someErr) err = errgo.NoteMask(err0, "") //err TestNoteMask#3 checkErr(c, err, err0, "foo", "[{$TestNoteMask#3$: } {$TestNoteMask#0$: foo}]", err) } func (*errorsSuite) TestMaskFunc(c *gc.C) { err0 := errgo.New("zero") err1 := errgo.New("one") allowVals := func(vals ...error) (r []func(error) bool) { for _, val := range vals { r = append(r, errgo.Is(val)) } return } tests := []struct { err error allow0 []func(error) bool allow1 []func(error) bool cause error }{{ err: err0, allow0: allowVals(err0), cause: err0, }, { err: err1, allow0: allowVals(err0), cause: nil, }, { err: err0, allow1: allowVals(err0), cause: err0, }, { err: err0, allow0: allowVals(err1), allow1: allowVals(err0), cause: err0, }, { err: err0, allow0: allowVals(err0, err1), cause: err0, }, { err: err1, allow0: allowVals(err0, err1), cause: err1, }, { err: err0, allow1: allowVals(err0, err1), cause: err0, }, { err: err1, allow1: allowVals(err0, err1), cause: err1, }} for i, test := range tests { c.Logf("test %d", i) wrap := errgo.MaskFunc(test.allow0...) err := wrap(test.err, test.allow1...) cause := errgo.Cause(err) wantCause := test.cause if wantCause == nil { wantCause = err } c.Check(cause, gc.Equals, wantCause) } } type embed struct { *errgo.Err } func (*errorsSuite) TestCause(c *gc.C) { c.Assert(errgo.Cause(someErr), gc.Equals, someErr) causeErr := errgo.New("cause error") underlyingErr := errgo.New("underlying error") //err TestCause#1 err := errgo.WithCausef(underlyingErr, causeErr, "foo %d", 99) //err TestCause#2 c.Assert(errgo.Cause(err), gc.Equals, causeErr) checkErr(c, err, underlyingErr, "foo 99: underlying error", "[{$TestCause#2$: foo 99} {$TestCause#1$: underlying error}]", causeErr) err = &embed{err.(*errgo.Err)} c.Assert(errgo.Cause(err), gc.Equals, causeErr) } func (*errorsSuite) TestWithCausefNoMessage(c *gc.C) { cause := errgo.New("cause") err := errgo.WithCausef(nil, cause, "") c.Assert(err, gc.ErrorMatches, "cause") c.Assert(errgo.Cause(err), gc.Equals, cause) } func (*errorsSuite) TestDetails(c *gc.C) { c.Assert(errgo.Details(nil), gc.Equals, "[]") otherErr := fmt.Errorf("other") checkErr(c, otherErr, nil, "other", "[{other}]", otherErr) err0 := &embed{errgo.New("foo").(*errgo.Err)} //err TestStack#0 checkErr(c, err0, nil, "foo", "[{$TestStack#0$: foo}]", err0) err1 := &embed{errgo.Notef(err0, "bar").(*errgo.Err)} //err TestStack#1 checkErr(c, err1, err0, "bar: foo", "[{$TestStack#1$: bar} {$TestStack#0$: foo}]", err1) err2 := errgo.Mask(err1) //err TestStack#2 checkErr(c, err2, err1, "bar: foo", "[{$TestStack#2$: } {$TestStack#1$: bar} {$TestStack#0$: foo}]", err2) } func (*errorsSuite) TestMatch(c *gc.C) { type errTest func(error) bool allow := func(ss ...string) []func(error) bool { fns := make([]func(error) bool, len(ss)) for i, s := range ss { s := s fns[i] = func(err error) bool { return err != nil && err.Error() == s } } return fns } tests := []struct { err error fns []func(error) bool ok bool }{{ err: errgo.New("foo"), fns: allow("foo"), ok: true, }, { err: errgo.New("foo"), fns: allow("bar"), ok: false, }, { err: errgo.New("foo"), fns: allow("bar", "foo"), ok: true, }, { err: errgo.New("foo"), fns: nil, ok: false, }, { err: nil, fns: nil, ok: false, }} for i, test := range tests { c.Logf("test %d", i) c.Assert(errgo.Match(test.err, test.fns...), gc.Equals, test.ok) } } func checkErr(c *gc.C, err, underlying error, msg string, details string, cause error) { c.Assert(err, gc.NotNil) c.Assert(err.Error(), gc.Equals, msg) if err, ok := err.(errgo.Wrapper); ok { c.Assert(err.Underlying(), gc.Equals, underlying) } else { c.Assert(underlying, gc.IsNil) } c.Assert(errgo.Cause(err), gc.Equals, cause) wantDetails := replaceLocations(details) c.Assert(errgo.Details(err), gc.Equals, wantDetails) } func replaceLocations(s string) string { t := "" for { i := strings.Index(s, "$") if i == -1 { break } t += s[0:i] s = s[i+1:] i = strings.Index(s, "$") if i == -1 { panic("no second $") } file, line := location(s[0:i]) t += fmt.Sprintf("%s:%d", file, line) s = s[i+1:] } t += s return t } func location(tag string) (string, int) { line, ok := tagToLine[tag] if !ok { panic(fmt.Errorf("tag %q not found", tag)) } return filename, line } var tagToLine = make(map[string]int) var filename string func init() { data, err := ioutil.ReadFile("errors_test.go") if err != nil { panic(err) } lines := strings.Split(string(data), "\n") for i, line := range lines { if j := strings.Index(line, "//err "); j >= 0 { tagToLine[line[j+len("//err "):]] = i + 1 } } _, filename, _, _ = runtime.Caller(0) } charm-2.1.1/src/gopkg.in/mgo.v2/0000775000175000017500000000000012672604565015223 5ustar marcomarcocharm-2.1.1/src/gopkg.in/mgo.v2/doc.go0000664000175000017500000000231712672604565016322 0ustar marcomarco// Package mgo offers a rich MongoDB driver for Go. // // Details about the mgo project (pronounced as "mango") are found // in its web page: // // http://labix.org/mgo // // Usage of the driver revolves around the concept of sessions. To // get started, obtain a session using the Dial function: // // session, err := mgo.Dial(url) // // This will establish one or more connections with the cluster of // servers defined by the url parameter. From then on, the cluster // may be queried with multiple consistency rules (see SetMode) and // documents retrieved with statements such as: // // c := session.DB(database).C(collection) // err := c.Find(query).One(&result) // // New sessions are typically created by calling session.Copy on the // initial session obtained at dial time. These new sessions will share // the same cluster information and connection pool, and may be easily // handed into other methods and functions for organizing logic. // Every session created must have its Close method called at the end // of its life time, so its resources may be put back in the pool or // collected, depending on the case. // // For more details, see the documentation for the types and methods. // package mgo charm-2.1.1/src/gopkg.in/mgo.v2/saslimpl.go0000664000175000017500000000034012672604565017373 0ustar marcomarco//+build sasl package mgo import ( "gopkg.in/mgo.v2/internal/sasl" ) func saslNew(cred Credential, host string) (saslStepper, error) { return sasl.New(cred.Username, cred.Password, cred.Mechanism, cred.Service, host) } charm-2.1.1/src/gopkg.in/mgo.v2/bson/0000775000175000017500000000000012672604565016164 5ustar marcomarcocharm-2.1.1/src/gopkg.in/mgo.v2/bson/decode.go0000664000175000017500000004517112672604565017746 0ustar marcomarco// BSON library for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // gobson - BSON library for Go. package bson import ( "fmt" "math" "net/url" "reflect" "strconv" "sync" "time" ) type decoder struct { in []byte i int docType reflect.Type } var typeM = reflect.TypeOf(M{}) func newDecoder(in []byte) *decoder { return &decoder{in, 0, typeM} } // -------------------------------------------------------------------------- // Some helper functions. func corrupted() { panic("Document is corrupted") } func settableValueOf(i interface{}) reflect.Value { v := reflect.ValueOf(i) sv := reflect.New(v.Type()).Elem() sv.Set(v) return sv } // -------------------------------------------------------------------------- // Unmarshaling of documents. const ( setterUnknown = iota setterNone setterType setterAddr ) var setterStyles map[reflect.Type]int var setterIface reflect.Type var setterMutex sync.RWMutex func init() { var iface Setter setterIface = reflect.TypeOf(&iface).Elem() setterStyles = make(map[reflect.Type]int) } func setterStyle(outt reflect.Type) int { setterMutex.RLock() style := setterStyles[outt] setterMutex.RUnlock() if style == setterUnknown { setterMutex.Lock() defer setterMutex.Unlock() if outt.Implements(setterIface) { setterStyles[outt] = setterType } else if reflect.PtrTo(outt).Implements(setterIface) { setterStyles[outt] = setterAddr } else { setterStyles[outt] = setterNone } style = setterStyles[outt] } return style } func getSetter(outt reflect.Type, out reflect.Value) Setter { style := setterStyle(outt) if style == setterNone { return nil } if style == setterAddr { if !out.CanAddr() { return nil } out = out.Addr() } else if outt.Kind() == reflect.Ptr && out.IsNil() { out.Set(reflect.New(outt.Elem())) } return out.Interface().(Setter) } func clearMap(m reflect.Value) { var none reflect.Value for _, k := range m.MapKeys() { m.SetMapIndex(k, none) } } func (d *decoder) readDocTo(out reflect.Value) { var elemType reflect.Type outt := out.Type() outk := outt.Kind() for { if outk == reflect.Ptr && out.IsNil() { out.Set(reflect.New(outt.Elem())) } if setter := getSetter(outt, out); setter != nil { var raw Raw d.readDocTo(reflect.ValueOf(&raw)) err := setter.SetBSON(raw) if _, ok := err.(*TypeError); err != nil && !ok { panic(err) } return } if outk == reflect.Ptr { out = out.Elem() outt = out.Type() outk = out.Kind() continue } break } var fieldsMap map[string]fieldInfo var inlineMap reflect.Value start := d.i origout := out if outk == reflect.Interface { if d.docType.Kind() == reflect.Map { mv := reflect.MakeMap(d.docType) out.Set(mv) out = mv } else { dv := reflect.New(d.docType).Elem() out.Set(dv) out = dv } outt = out.Type() outk = outt.Kind() } docType := d.docType keyType := typeString convertKey := false switch outk { case reflect.Map: keyType = outt.Key() if keyType.Kind() != reflect.String { panic("BSON map must have string keys. Got: " + outt.String()) } if keyType != typeString { convertKey = true } elemType = outt.Elem() if elemType == typeIface { d.docType = outt } if out.IsNil() { out.Set(reflect.MakeMap(out.Type())) } else if out.Len() > 0 { clearMap(out) } case reflect.Struct: if outt != typeRaw { sinfo, err := getStructInfo(out.Type()) if err != nil { panic(err) } fieldsMap = sinfo.FieldsMap out.Set(sinfo.Zero) if sinfo.InlineMap != -1 { inlineMap = out.Field(sinfo.InlineMap) if !inlineMap.IsNil() && inlineMap.Len() > 0 { clearMap(inlineMap) } elemType = inlineMap.Type().Elem() if elemType == typeIface { d.docType = inlineMap.Type() } } } case reflect.Slice: switch outt.Elem() { case typeDocElem: origout.Set(d.readDocElems(outt)) return case typeRawDocElem: origout.Set(d.readRawDocElems(outt)) return } fallthrough default: panic("Unsupported document type for unmarshalling: " + out.Type().String()) } end := int(d.readInt32()) end += d.i - 4 if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { corrupted() } for d.in[d.i] != '\x00' { kind := d.readByte() name := d.readCStr() if d.i >= end { corrupted() } switch outk { case reflect.Map: e := reflect.New(elemType).Elem() if d.readElemTo(e, kind) { k := reflect.ValueOf(name) if convertKey { k = k.Convert(keyType) } out.SetMapIndex(k, e) } case reflect.Struct: if outt == typeRaw { d.dropElem(kind) } else { if info, ok := fieldsMap[name]; ok { if info.Inline == nil { d.readElemTo(out.Field(info.Num), kind) } else { d.readElemTo(out.FieldByIndex(info.Inline), kind) } } else if inlineMap.IsValid() { if inlineMap.IsNil() { inlineMap.Set(reflect.MakeMap(inlineMap.Type())) } e := reflect.New(elemType).Elem() if d.readElemTo(e, kind) { inlineMap.SetMapIndex(reflect.ValueOf(name), e) } } else { d.dropElem(kind) } } case reflect.Slice: } if d.i >= end { corrupted() } } d.i++ // '\x00' if d.i != end { corrupted() } d.docType = docType if outt == typeRaw { out.Set(reflect.ValueOf(Raw{0x03, d.in[start:d.i]})) } } func (d *decoder) readArrayDocTo(out reflect.Value) { end := int(d.readInt32()) end += d.i - 4 if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { corrupted() } i := 0 l := out.Len() for d.in[d.i] != '\x00' { if i >= l { panic("Length mismatch on array field") } kind := d.readByte() for d.i < end && d.in[d.i] != '\x00' { d.i++ } if d.i >= end { corrupted() } d.i++ d.readElemTo(out.Index(i), kind) if d.i >= end { corrupted() } i++ } if i != l { panic("Length mismatch on array field") } d.i++ // '\x00' if d.i != end { corrupted() } } func (d *decoder) readSliceDoc(t reflect.Type) interface{} { tmp := make([]reflect.Value, 0, 8) elemType := t.Elem() if elemType == typeRawDocElem { d.dropElem(0x04) return reflect.Zero(t).Interface() } end := int(d.readInt32()) end += d.i - 4 if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { corrupted() } for d.in[d.i] != '\x00' { kind := d.readByte() for d.i < end && d.in[d.i] != '\x00' { d.i++ } if d.i >= end { corrupted() } d.i++ e := reflect.New(elemType).Elem() if d.readElemTo(e, kind) { tmp = append(tmp, e) } if d.i >= end { corrupted() } } d.i++ // '\x00' if d.i != end { corrupted() } n := len(tmp) slice := reflect.MakeSlice(t, n, n) for i := 0; i != n; i++ { slice.Index(i).Set(tmp[i]) } return slice.Interface() } var typeSlice = reflect.TypeOf([]interface{}{}) var typeIface = typeSlice.Elem() func (d *decoder) readDocElems(typ reflect.Type) reflect.Value { docType := d.docType d.docType = typ slice := make([]DocElem, 0, 8) d.readDocWith(func(kind byte, name string) { e := DocElem{Name: name} v := reflect.ValueOf(&e.Value) if d.readElemTo(v.Elem(), kind) { slice = append(slice, e) } }) slicev := reflect.New(typ).Elem() slicev.Set(reflect.ValueOf(slice)) d.docType = docType return slicev } func (d *decoder) readRawDocElems(typ reflect.Type) reflect.Value { docType := d.docType d.docType = typ slice := make([]RawDocElem, 0, 8) d.readDocWith(func(kind byte, name string) { e := RawDocElem{Name: name} v := reflect.ValueOf(&e.Value) if d.readElemTo(v.Elem(), kind) { slice = append(slice, e) } }) slicev := reflect.New(typ).Elem() slicev.Set(reflect.ValueOf(slice)) d.docType = docType return slicev } func (d *decoder) readDocWith(f func(kind byte, name string)) { end := int(d.readInt32()) end += d.i - 4 if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { corrupted() } for d.in[d.i] != '\x00' { kind := d.readByte() name := d.readCStr() if d.i >= end { corrupted() } f(kind, name) if d.i >= end { corrupted() } } d.i++ // '\x00' if d.i != end { corrupted() } } // -------------------------------------------------------------------------- // Unmarshaling of individual elements within a document. var blackHole = settableValueOf(struct{}{}) func (d *decoder) dropElem(kind byte) { d.readElemTo(blackHole, kind) } // Attempt to decode an element from the document and put it into out. // If the types are not compatible, the returned ok value will be // false and out will be unchanged. func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) { start := d.i if kind == 0x03 { // Delegate unmarshaling of documents. outt := out.Type() outk := out.Kind() switch outk { case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map: d.readDocTo(out) return true } if setterStyle(outt) != setterNone { d.readDocTo(out) return true } if outk == reflect.Slice { switch outt.Elem() { case typeDocElem: out.Set(d.readDocElems(outt)) case typeRawDocElem: out.Set(d.readRawDocElems(outt)) } return true } d.readDocTo(blackHole) return true } var in interface{} switch kind { case 0x01: // Float64 in = d.readFloat64() case 0x02: // UTF-8 string in = d.readStr() case 0x03: // Document panic("Can't happen. Handled above.") case 0x04: // Array outt := out.Type() if setterStyle(outt) != setterNone { // Skip the value so its data is handed to the setter below. d.dropElem(kind) break } for outt.Kind() == reflect.Ptr { outt = outt.Elem() } switch outt.Kind() { case reflect.Array: d.readArrayDocTo(out) return true case reflect.Slice: in = d.readSliceDoc(outt) default: in = d.readSliceDoc(typeSlice) } case 0x05: // Binary b := d.readBinary() if b.Kind == 0x00 || b.Kind == 0x02 { in = b.Data } else { in = b } case 0x06: // Undefined (obsolete, but still seen in the wild) in = Undefined case 0x07: // ObjectId in = ObjectId(d.readBytes(12)) case 0x08: // Bool in = d.readBool() case 0x09: // Timestamp // MongoDB handles timestamps as milliseconds. i := d.readInt64() if i == -62135596800000 { in = time.Time{} // In UTC for convenience. } else { in = time.Unix(i/1e3, i%1e3*1e6) } case 0x0A: // Nil in = nil case 0x0B: // RegEx in = d.readRegEx() case 0x0C: in = DBPointer{Namespace: d.readStr(), Id: ObjectId(d.readBytes(12))} case 0x0D: // JavaScript without scope in = JavaScript{Code: d.readStr()} case 0x0E: // Symbol in = Symbol(d.readStr()) case 0x0F: // JavaScript with scope d.i += 4 // Skip length js := JavaScript{d.readStr(), make(M)} d.readDocTo(reflect.ValueOf(js.Scope)) in = js case 0x10: // Int32 in = int(d.readInt32()) case 0x11: // Mongo-specific timestamp in = MongoTimestamp(d.readInt64()) case 0x12: // Int64 in = d.readInt64() case 0x7F: // Max key in = MaxKey case 0xFF: // Min key in = MinKey default: panic(fmt.Sprintf("Unknown element kind (0x%02X)", kind)) } outt := out.Type() if outt == typeRaw { out.Set(reflect.ValueOf(Raw{kind, d.in[start:d.i]})) return true } if setter := getSetter(outt, out); setter != nil { err := setter.SetBSON(Raw{kind, d.in[start:d.i]}) if err == SetZero { out.Set(reflect.Zero(outt)) return true } if err == nil { return true } if _, ok := err.(*TypeError); !ok { panic(err) } return false } if in == nil { out.Set(reflect.Zero(outt)) return true } outk := outt.Kind() // Dereference and initialize pointer if necessary. first := true for outk == reflect.Ptr { if !out.IsNil() { out = out.Elem() } else { elem := reflect.New(outt.Elem()) if first { // Only set if value is compatible. first = false defer func(out, elem reflect.Value) { if good { out.Set(elem) } }(out, elem) } else { out.Set(elem) } out = elem } outt = out.Type() outk = outt.Kind() } inv := reflect.ValueOf(in) if outt == inv.Type() { out.Set(inv) return true } switch outk { case reflect.Interface: out.Set(inv) return true case reflect.String: switch inv.Kind() { case reflect.String: out.SetString(inv.String()) return true case reflect.Slice: if b, ok := in.([]byte); ok { out.SetString(string(b)) return true } case reflect.Int, reflect.Int64: if outt == typeJSONNumber { out.SetString(strconv.FormatInt(inv.Int(), 10)) return true } case reflect.Float64: if outt == typeJSONNumber { out.SetString(strconv.FormatFloat(inv.Float(), 'f', -1, 64)) return true } } case reflect.Slice, reflect.Array: // Remember, array (0x04) slices are built with the correct // element type. If we are here, must be a cross BSON kind // conversion (e.g. 0x05 unmarshalling on string). if outt.Elem().Kind() != reflect.Uint8 { break } switch inv.Kind() { case reflect.String: slice := []byte(inv.String()) out.Set(reflect.ValueOf(slice)) return true case reflect.Slice: switch outt.Kind() { case reflect.Array: reflect.Copy(out, inv) case reflect.Slice: out.SetBytes(inv.Bytes()) } return true } case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: switch inv.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: out.SetInt(inv.Int()) return true case reflect.Float32, reflect.Float64: out.SetInt(int64(inv.Float())) return true case reflect.Bool: if inv.Bool() { out.SetInt(1) } else { out.SetInt(0) } return true case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: panic("can't happen: no uint types in BSON (!?)") } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: switch inv.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: out.SetUint(uint64(inv.Int())) return true case reflect.Float32, reflect.Float64: out.SetUint(uint64(inv.Float())) return true case reflect.Bool: if inv.Bool() { out.SetUint(1) } else { out.SetUint(0) } return true case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: panic("Can't happen. No uint types in BSON.") } case reflect.Float32, reflect.Float64: switch inv.Kind() { case reflect.Float32, reflect.Float64: out.SetFloat(inv.Float()) return true case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: out.SetFloat(float64(inv.Int())) return true case reflect.Bool: if inv.Bool() { out.SetFloat(1) } else { out.SetFloat(0) } return true case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: panic("Can't happen. No uint types in BSON?") } case reflect.Bool: switch inv.Kind() { case reflect.Bool: out.SetBool(inv.Bool()) return true case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: out.SetBool(inv.Int() != 0) return true case reflect.Float32, reflect.Float64: out.SetBool(inv.Float() != 0) return true case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: panic("Can't happen. No uint types in BSON?") } case reflect.Struct: if outt == typeURL && inv.Kind() == reflect.String { u, err := url.Parse(inv.String()) if err != nil { panic(err) } out.Set(reflect.ValueOf(u).Elem()) return true } if outt == typeBinary { if b, ok := in.([]byte); ok { out.Set(reflect.ValueOf(Binary{Data: b})) return true } } } return false } // -------------------------------------------------------------------------- // Parsers of basic types. func (d *decoder) readRegEx() RegEx { re := RegEx{} re.Pattern = d.readCStr() re.Options = d.readCStr() return re } func (d *decoder) readBinary() Binary { l := d.readInt32() b := Binary{} b.Kind = d.readByte() b.Data = d.readBytes(l) if b.Kind == 0x02 && len(b.Data) >= 4 { // Weird obsolete format with redundant length. b.Data = b.Data[4:] } return b } func (d *decoder) readStr() string { l := d.readInt32() b := d.readBytes(l - 1) if d.readByte() != '\x00' { corrupted() } return string(b) } func (d *decoder) readCStr() string { start := d.i end := start l := len(d.in) for ; end != l; end++ { if d.in[end] == '\x00' { break } } d.i = end + 1 if d.i > l { corrupted() } return string(d.in[start:end]) } func (d *decoder) readBool() bool { b := d.readByte() if b == 0 { return false } if b == 1 { return true } panic(fmt.Sprintf("encoded boolean must be 1 or 0, found %d", b)) } func (d *decoder) readFloat64() float64 { return math.Float64frombits(uint64(d.readInt64())) } func (d *decoder) readInt32() int32 { b := d.readBytes(4) return int32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)) } func (d *decoder) readInt64() int64 { b := d.readBytes(8) return int64((uint64(b[0]) << 0) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) | (uint64(b[4]) << 32) | (uint64(b[5]) << 40) | (uint64(b[6]) << 48) | (uint64(b[7]) << 56)) } func (d *decoder) readByte() byte { i := d.i d.i++ if d.i > len(d.in) { corrupted() } return d.in[i] } func (d *decoder) readBytes(length int32) []byte { if length < 0 { corrupted() } start := d.i d.i += int(length) if d.i < start || d.i > len(d.in) { corrupted() } return d.in[start : start+int(length)] } charm-2.1.1/src/gopkg.in/mgo.v2/bson/specdata/0000775000175000017500000000000012672604565017750 5ustar marcomarcocharm-2.1.1/src/gopkg.in/mgo.v2/bson/specdata/update.sh0000775000175000017500000000057012672604565021573 0ustar marcomarco#!/bin/sh set -e if [ ! -d specifications ]; then git clone -b bson git@github.com:jyemin/specifications fi TESTFILE="../specdata_test.go" cat < $TESTFILE package bson_test var specTests = []string{ END for file in specifications/source/bson/tests/*.yml; do ( echo '`' cat $file echo -n '`,' ) >> $TESTFILE done echo '}' >> $TESTFILE gofmt -w $TESTFILE charm-2.1.1/src/gopkg.in/mgo.v2/bson/LICENSE0000664000175000017500000000251412672604565017173 0ustar marcomarcoBSON library for Go Copyright (c) 2010-2012 - Gustavo Niemeyer All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. charm-2.1.1/src/gopkg.in/mgo.v2/bson/specdata_test.go0000664000175000017500000001311012672604565021332 0ustar marcomarcopackage bson_test var specTests = []string{ ` --- description: "Array type" documents: - decoded: a : [] encoded: 0D000000046100050000000000 - decoded: a: [10] encoded: 140000000461000C0000001030000A0000000000 - # Decode an array that uses an empty string as the key decodeOnly : true decoded: a: [10] encoded: 130000000461000B00000010000A0000000000 - # Decode an array that uses a non-numeric string as the key decodeOnly : true decoded: a: [10] encoded: 150000000461000D000000106162000A0000000000 `, ` --- description: "Boolean type" documents: - encoded: "090000000862000100" decoded: { "b" : true } - encoded: "090000000862000000" decoded: { "b" : false } `, ` --- description: "Corrupted BSON" documents: - encoded: "09000000016600" error: "truncated double" - encoded: "09000000026600" error: "truncated string" - encoded: "09000000036600" error: "truncated document" - encoded: "09000000046600" error: "truncated array" - encoded: "09000000056600" error: "truncated binary" - encoded: "09000000076600" error: "truncated objectid" - encoded: "09000000086600" error: "truncated boolean" - encoded: "09000000096600" error: "truncated date" - encoded: "090000000b6600" error: "truncated regex" - encoded: "090000000c6600" error: "truncated db pointer" - encoded: "0C0000000d6600" error: "truncated javascript" - encoded: "0C0000000e6600" error: "truncated symbol" - encoded: "0C0000000f6600" error: "truncated javascript with scope" - encoded: "0C000000106600" error: "truncated int32" - encoded: "0C000000116600" error: "truncated timestamp" - encoded: "0C000000126600" error: "truncated int64" - encoded: "0400000000" error: basic - encoded: "0500000001" error: basic - encoded: "05000000" error: basic - encoded: "0700000002610078563412" error: basic - encoded: "090000001061000500" error: basic - encoded: "00000000000000000000" error: basic - encoded: "1300000002666f6f00040000006261720000" error: "basic" - encoded: "1800000003666f6f000f0000001062617200ffffff7f0000" error: basic - encoded: "1500000003666f6f000c0000000862617200010000" error: basic - encoded: "1c00000003666f6f001200000002626172000500000062617a000000" error: basic - encoded: "1000000002610004000000616263ff00" error: string is not null-terminated - encoded: "0c0000000200000000000000" error: bad_string_length - encoded: "120000000200ffffffff666f6f6261720000" error: bad_string_length - encoded: "0c0000000e00000000000000" error: bad_string_length - encoded: "120000000e00ffffffff666f6f6261720000" error: bad_string_length - encoded: "180000000c00fa5bd841d6585d9900" error: "" - encoded: "1e0000000c00ffffffff666f6f626172005259b56afa5bd841d6585d9900" error: bad_string_length - encoded: "0c0000000d00000000000000" error: bad_string_length - encoded: "0c0000000d00ffffffff0000" error: bad_string_length - encoded: "1c0000000f001500000000000000000c000000020001000000000000" error: bad_string_length - encoded: "1c0000000f0015000000ffffffff000c000000020001000000000000" error: bad_string_length - encoded: "1c0000000f001500000001000000000c000000020000000000000000" error: bad_string_length - encoded: "1c0000000f001500000001000000000c0000000200ffffffff000000" error: bad_string_length - encoded: "0E00000008616263646566676869707172737475" error: "Run-on CString" - encoded: "0100000000" error: "An object size that's too small to even include the object size, but is correctly encoded, along with a correct EOO (and no data)" - encoded: "1a0000000e74657374000c00000068656c6c6f20776f726c6400000500000000" error: "One object, but with object size listed smaller than it is in the data" - encoded: "05000000" error: "One object, missing the EOO at the end" - encoded: "0500000001" error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x01" - encoded: "05000000ff" error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0xff" - encoded: "0500000070" error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x70" - encoded: "07000000000000" error: "Invalid BSON type low range" - encoded: "07000000800000" error: "Invalid BSON type high range" - encoded: "090000000862000200" error: "Invalid boolean value of 2" - encoded: "09000000086200ff00" error: "Invalid boolean value of -1" `, ` --- description: "Int32 type" documents: - decoded: i: -2147483648 encoded: 0C0000001069000000008000 - decoded: i: 2147483647 encoded: 0C000000106900FFFFFF7F00 - decoded: i: -1 encoded: 0C000000106900FFFFFFFF00 - decoded: i: 0 encoded: 0C0000001069000000000000 - decoded: i: 1 encoded: 0C0000001069000100000000 `, ` --- description: "String type" documents: - decoded: s : "" encoded: 0D000000027300010000000000 - decoded: s: "a" encoded: 0E00000002730002000000610000 - decoded: s: "This is a string" encoded: 1D0000000273001100000054686973206973206120737472696E670000 - decoded: s: "κόσμε" encoded: 180000000273000C000000CEBAE1BDB9CF83CEBCCEB50000 `} charm-2.1.1/src/gopkg.in/mgo.v2/bson/bson.go0000664000175000017500000005410412672604565017460 0ustar marcomarco// BSON library for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Package bson is an implementation of the BSON specification for Go: // // http://bsonspec.org // // It was created as part of the mgo MongoDB driver for Go, but is standalone // and may be used on its own without the driver. package bson import ( "bytes" "crypto/md5" "crypto/rand" "encoding/binary" "encoding/hex" "errors" "fmt" "io" "os" "reflect" "runtime" "strings" "sync" "sync/atomic" "time" ) // -------------------------------------------------------------------------- // The public API. // A value implementing the bson.Getter interface will have its GetBSON // method called when the given value has to be marshalled, and the result // of this method will be marshaled in place of the actual object. // // If GetBSON returns return a non-nil error, the marshalling procedure // will stop and error out with the provided value. type Getter interface { GetBSON() (interface{}, error) } // A value implementing the bson.Setter interface will receive the BSON // value via the SetBSON method during unmarshaling, and the object // itself will not be changed as usual. // // If setting the value works, the method should return nil or alternatively // bson.SetZero to set the respective field to its zero value (nil for // pointer types). If SetBSON returns a value of type bson.TypeError, the // BSON value will be omitted from a map or slice being decoded and the // unmarshalling will continue. If it returns any other non-nil error, the // unmarshalling procedure will stop and error out with the provided value. // // This interface is generally useful in pointer receivers, since the method // will want to change the receiver. A type field that implements the Setter // interface doesn't have to be a pointer, though. // // Unlike the usual behavior, unmarshalling onto a value that implements a // Setter interface will NOT reset the value to its zero state. This allows // the value to decide by itself how to be unmarshalled. // // For example: // // type MyString string // // func (s *MyString) SetBSON(raw bson.Raw) error { // return raw.Unmarshal(s) // } // type Setter interface { SetBSON(raw Raw) error } // SetZero may be returned from a SetBSON method to have the value set to // its respective zero value. When used in pointer values, this will set the // field to nil rather than to the pre-allocated value. var SetZero = errors.New("set to zero") // M is a convenient alias for a map[string]interface{} map, useful for // dealing with BSON in a native way. For instance: // // bson.M{"a": 1, "b": true} // // There's no special handling for this type in addition to what's done anyway // for an equivalent map type. Elements in the map will be dumped in an // undefined ordered. See also the bson.D type for an ordered alternative. type M map[string]interface{} // D represents a BSON document containing ordered elements. For example: // // bson.D{{"a", 1}, {"b", true}} // // In some situations, such as when creating indexes for MongoDB, the order in // which the elements are defined is important. If the order is not important, // using a map is generally more comfortable. See bson.M and bson.RawD. type D []DocElem // DocElem is an element of the bson.D document representation. type DocElem struct { Name string Value interface{} } // Map returns a map out of the ordered element name/value pairs in d. func (d D) Map() (m M) { m = make(M, len(d)) for _, item := range d { m[item.Name] = item.Value } return m } // The Raw type represents raw unprocessed BSON documents and elements. // Kind is the kind of element as defined per the BSON specification, and // Data is the raw unprocessed data for the respective element. // Using this type it is possible to unmarshal or marshal values partially. // // Relevant documentation: // // http://bsonspec.org/#/specification // type Raw struct { Kind byte Data []byte } // RawD represents a BSON document containing raw unprocessed elements. // This low-level representation may be useful when lazily processing // documents of uncertain content, or when manipulating the raw content // documents in general. type RawD []RawDocElem // See the RawD type. type RawDocElem struct { Name string Value Raw } // ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes // long. MongoDB objects by default have such a property set in their "_id" // property. // // http://www.mongodb.org/display/DOCS/Object+IDs type ObjectId string // ObjectIdHex returns an ObjectId from the provided hex representation. // Calling this function with an invalid hex representation will // cause a runtime panic. See the IsObjectIdHex function. func ObjectIdHex(s string) ObjectId { d, err := hex.DecodeString(s) if err != nil || len(d) != 12 { panic(fmt.Sprintf("Invalid input to ObjectIdHex: %q", s)) } return ObjectId(d) } // IsObjectIdHex returns whether s is a valid hex representation of // an ObjectId. See the ObjectIdHex function. func IsObjectIdHex(s string) bool { if len(s) != 24 { return false } _, err := hex.DecodeString(s) return err == nil } // objectIdCounter is atomically incremented when generating a new ObjectId // using NewObjectId() function. It's used as a counter part of an id. var objectIdCounter uint32 = readRandomUint32() // readRandomUint32 returns a random objectIdCounter. func readRandomUint32() uint32 { var b [4]byte _, err := io.ReadFull(rand.Reader, b[:]) if err != nil { panic(fmt.Errorf("cannot read random object id: %v", err)) } return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)) } // machineId stores machine id generated once and used in subsequent calls // to NewObjectId function. var machineId = readMachineId() // readMachineId generates and returns a machine id. // If this function fails to get the hostname it will cause a runtime error. func readMachineId() []byte { var sum [3]byte id := sum[:] hostname, err1 := os.Hostname() if err1 != nil { _, err2 := io.ReadFull(rand.Reader, id) if err2 != nil { panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2)) } return id } hw := md5.New() hw.Write([]byte(hostname)) copy(id, hw.Sum(nil)) return id } // NewObjectId returns a new unique ObjectId. func NewObjectId() ObjectId { var b [12]byte // Timestamp, 4 bytes, big endian binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix())) // Machine, first 3 bytes of md5(hostname) b[4] = machineId[0] b[5] = machineId[1] b[6] = machineId[2] // Pid, 2 bytes, specs don't specify endianness, but we use big endian. pid := os.Getpid() b[7] = byte(pid >> 8) b[8] = byte(pid) // Increment, 3 bytes, big endian i := atomic.AddUint32(&objectIdCounter, 1) b[9] = byte(i >> 16) b[10] = byte(i >> 8) b[11] = byte(i) return ObjectId(b[:]) } // NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled // with the provided number of seconds from epoch UTC, and all other parts // filled with zeroes. It's not safe to insert a document with an id generated // by this method, it is useful only for queries to find documents with ids // generated before or after the specified timestamp. func NewObjectIdWithTime(t time.Time) ObjectId { var b [12]byte binary.BigEndian.PutUint32(b[:4], uint32(t.Unix())) return ObjectId(string(b[:])) } // String returns a hex string representation of the id. // Example: ObjectIdHex("4d88e15b60f486e428412dc9"). func (id ObjectId) String() string { return fmt.Sprintf(`ObjectIdHex("%x")`, string(id)) } // Hex returns a hex representation of the ObjectId. func (id ObjectId) Hex() string { return hex.EncodeToString([]byte(id)) } // MarshalJSON turns a bson.ObjectId into a json.Marshaller. func (id ObjectId) MarshalJSON() ([]byte, error) { return []byte(fmt.Sprintf(`"%x"`, string(id))), nil } var nullBytes = []byte("null") // UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller. func (id *ObjectId) UnmarshalJSON(data []byte) error { if len(data) == 2 && data[0] == '"' && data[1] == '"' || bytes.Equal(data, nullBytes) { *id = "" return nil } if len(data) != 26 || data[0] != '"' || data[25] != '"' { return errors.New(fmt.Sprintf("Invalid ObjectId in JSON: %s", string(data))) } var buf [12]byte _, err := hex.Decode(buf[:], data[1:25]) if err != nil { return errors.New(fmt.Sprintf("Invalid ObjectId in JSON: %s (%s)", string(data), err)) } *id = ObjectId(string(buf[:])) return nil } // Valid returns true if id is valid. A valid id must contain exactly 12 bytes. func (id ObjectId) Valid() bool { return len(id) == 12 } // byteSlice returns byte slice of id from start to end. // Calling this function with an invalid id will cause a runtime panic. func (id ObjectId) byteSlice(start, end int) []byte { if len(id) != 12 { panic(fmt.Sprintf("Invalid ObjectId: %q", string(id))) } return []byte(string(id)[start:end]) } // Time returns the timestamp part of the id. // It's a runtime error to call this method with an invalid id. func (id ObjectId) Time() time.Time { // First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch. secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4))) return time.Unix(secs, 0) } // Machine returns the 3-byte machine id part of the id. // It's a runtime error to call this method with an invalid id. func (id ObjectId) Machine() []byte { return id.byteSlice(4, 7) } // Pid returns the process id part of the id. // It's a runtime error to call this method with an invalid id. func (id ObjectId) Pid() uint16 { return binary.BigEndian.Uint16(id.byteSlice(7, 9)) } // Counter returns the incrementing value part of the id. // It's a runtime error to call this method with an invalid id. func (id ObjectId) Counter() int32 { b := id.byteSlice(9, 12) // Counter is stored as big-endian 3-byte value return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2])) } // The Symbol type is similar to a string and is used in languages with a // distinct symbol type. type Symbol string // Now returns the current time with millisecond precision. MongoDB stores // timestamps with the same precision, so a Time returned from this method // will not change after a roundtrip to the database. That's the only reason // why this function exists. Using the time.Now function also works fine // otherwise. func Now() time.Time { return time.Unix(0, time.Now().UnixNano()/1e6*1e6) } // MongoTimestamp is a special internal type used by MongoDB that for some // strange reason has its own datatype defined in BSON. type MongoTimestamp int64 type orderKey int64 // MaxKey is a special value that compares higher than all other possible BSON // values in a MongoDB database. var MaxKey = orderKey(1<<63 - 1) // MinKey is a special value that compares lower than all other possible BSON // values in a MongoDB database. var MinKey = orderKey(-1 << 63) type undefined struct{} // Undefined represents the undefined BSON value. var Undefined undefined // Binary is a representation for non-standard binary values. Any kind should // work, but the following are known as of this writing: // // 0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}. // 0x01 - Function (!?) // 0x02 - Obsolete generic. // 0x03 - UUID // 0x05 - MD5 // 0x80 - User defined. // type Binary struct { Kind byte Data []byte } // RegEx represents a regular expression. The Options field may contain // individual characters defining the way in which the pattern should be // applied, and must be sorted. Valid options as of this writing are 'i' for // case insensitive matching, 'm' for multi-line matching, 'x' for verbose // mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all // mode (a '.' matches everything), and 'u' to make \w, \W, and similar match // unicode. The value of the Options parameter is not verified before being // marshaled into the BSON format. type RegEx struct { Pattern string Options string } // JavaScript is a type that holds JavaScript code. If Scope is non-nil, it // will be marshaled as a mapping from identifiers to values that may be // used when evaluating the provided Code. type JavaScript struct { Code string Scope interface{} } // DBPointer refers to a document id in a namespace. // // This type is deprecated in the BSON specification and should not be used // except for backwards compatibility with ancient applications. type DBPointer struct { Namespace string Id ObjectId } const initialBufferSize = 64 func handleErr(err *error) { if r := recover(); r != nil { if _, ok := r.(runtime.Error); ok { panic(r) } else if _, ok := r.(externalPanic); ok { panic(r) } else if s, ok := r.(string); ok { *err = errors.New(s) } else if e, ok := r.(error); ok { *err = e } else { panic(r) } } } // Marshal serializes the in value, which may be a map or a struct value. // In the case of struct values, only exported fields will be serialized, // and the order of serialized fields will match that of the struct itself. // The lowercased field name is used as the key for each exported field, // but this behavior may be changed using the respective field tag. // The tag may also contain flags to tweak the marshalling behavior for // the field. The tag formats accepted are: // // "[][,[,]]" // // `(...) bson:"[][,[,]]" (...)` // // The following flags are currently supported: // // omitempty Only include the field if it's not set to the zero // value for the type or to empty slices or maps. // // minsize Marshal an int64 value as an int32, if that's feasible // while preserving the numeric value. // // inline Inline the field, which must be a struct or a map, // causing all of its fields or keys to be processed as if // they were part of the outer struct. For maps, keys must // not conflict with the bson keys of other struct fields. // // Some examples: // // type T struct { // A bool // B int "myb" // C string "myc,omitempty" // D string `bson:",omitempty" json:"jsonkey"` // E int64 ",minsize" // F int64 "myf,omitempty,minsize" // } // func Marshal(in interface{}) (out []byte, err error) { defer handleErr(&err) e := &encoder{make([]byte, 0, initialBufferSize)} e.addDoc(reflect.ValueOf(in)) return e.out, nil } // Unmarshal deserializes data from in into the out value. The out value // must be a map, a pointer to a struct, or a pointer to a bson.D value. // The lowercased field name is used as the key for each exported field, // but this behavior may be changed using the respective field tag. // The tag may also contain flags to tweak the marshalling behavior for // the field. The tag formats accepted are: // // "[][,[,]]" // // `(...) bson:"[][,[,]]" (...)` // // The following flags are currently supported during unmarshal (see the // Marshal method for other flags): // // inline Inline the field, which must be a struct or a map. // Inlined structs are handled as if its fields were part // of the outer struct. An inlined map causes keys that do // not match any other struct field to be inserted in the // map rather than being discarded as usual. // // The target field or element types of out may not necessarily match // the BSON values of the provided data. The following conversions are // made automatically: // // - Numeric types are converted if at least the integer part of the // value would be preserved correctly // - Bools are converted to numeric types as 1 or 0 // - Numeric types are converted to bools as true if not 0 or false otherwise // - Binary and string BSON data is converted to a string, array or byte slice // // If the value would not fit the type and cannot be converted, it's // silently skipped. // // Pointer values are initialized when necessary. func Unmarshal(in []byte, out interface{}) (err error) { if raw, ok := out.(*Raw); ok { raw.Kind = 3 raw.Data = in return nil } defer handleErr(&err) v := reflect.ValueOf(out) switch v.Kind() { case reflect.Ptr: fallthrough case reflect.Map: d := newDecoder(in) d.readDocTo(v) case reflect.Struct: return errors.New("Unmarshal can't deal with struct values. Use a pointer.") default: return errors.New("Unmarshal needs a map or a pointer to a struct.") } return nil } // Unmarshal deserializes raw into the out value. If the out value type // is not compatible with raw, a *bson.TypeError is returned. // // See the Unmarshal function documentation for more details on the // unmarshalling process. func (raw Raw) Unmarshal(out interface{}) (err error) { defer handleErr(&err) v := reflect.ValueOf(out) switch v.Kind() { case reflect.Ptr: v = v.Elem() fallthrough case reflect.Map: d := newDecoder(raw.Data) good := d.readElemTo(v, raw.Kind) if !good { return &TypeError{v.Type(), raw.Kind} } case reflect.Struct: return errors.New("Raw Unmarshal can't deal with struct values. Use a pointer.") default: return errors.New("Raw Unmarshal needs a map or a valid pointer.") } return nil } type TypeError struct { Type reflect.Type Kind byte } func (e *TypeError) Error() string { return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String()) } // -------------------------------------------------------------------------- // Maintain a mapping of keys to structure field indexes type structInfo struct { FieldsMap map[string]fieldInfo FieldsList []fieldInfo InlineMap int Zero reflect.Value } type fieldInfo struct { Key string Num int OmitEmpty bool MinSize bool Inline []int } var structMap = make(map[reflect.Type]*structInfo) var structMapMutex sync.RWMutex type externalPanic string func (e externalPanic) String() string { return string(e) } func getStructInfo(st reflect.Type) (*structInfo, error) { structMapMutex.RLock() sinfo, found := structMap[st] structMapMutex.RUnlock() if found { return sinfo, nil } n := st.NumField() fieldsMap := make(map[string]fieldInfo) fieldsList := make([]fieldInfo, 0, n) inlineMap := -1 for i := 0; i != n; i++ { field := st.Field(i) if field.PkgPath != "" { continue // Private field } info := fieldInfo{Num: i} tag := field.Tag.Get("bson") if tag == "" && strings.Index(string(field.Tag), ":") < 0 { tag = string(field.Tag) } if tag == "-" { continue } // XXX Drop this after a few releases. if s := strings.Index(tag, "/"); s >= 0 { recommend := tag[:s] for _, c := range tag[s+1:] { switch c { case 'c': recommend += ",omitempty" case 's': recommend += ",minsize" default: msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", string([]byte{uint8(c)}), tag, st) panic(externalPanic(msg)) } } msg := fmt.Sprintf("Replace tag %q in field %s of type %s by %q", tag, field.Name, st, recommend) panic(externalPanic(msg)) } inline := false fields := strings.Split(tag, ",") if len(fields) > 1 { for _, flag := range fields[1:] { switch flag { case "omitempty": info.OmitEmpty = true case "minsize": info.MinSize = true case "inline": inline = true default: msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st) panic(externalPanic(msg)) } } tag = fields[0] } if inline { switch field.Type.Kind() { case reflect.Map: if inlineMap >= 0 { return nil, errors.New("Multiple ,inline maps in struct " + st.String()) } if field.Type.Key() != reflect.TypeOf("") { return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) } inlineMap = info.Num case reflect.Struct: sinfo, err := getStructInfo(field.Type) if err != nil { return nil, err } for _, finfo := range sinfo.FieldsList { if _, found := fieldsMap[finfo.Key]; found { msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() return nil, errors.New(msg) } if finfo.Inline == nil { finfo.Inline = []int{i, finfo.Num} } else { finfo.Inline = append([]int{i}, finfo.Inline...) } fieldsMap[finfo.Key] = finfo fieldsList = append(fieldsList, finfo) } default: panic("Option ,inline needs a struct value or map field") } continue } if tag != "" { info.Key = tag } else { info.Key = strings.ToLower(field.Name) } if _, found = fieldsMap[info.Key]; found { msg := "Duplicated key '" + info.Key + "' in struct " + st.String() return nil, errors.New(msg) } fieldsList = append(fieldsList, info) fieldsMap[info.Key] = info } sinfo = &structInfo{ fieldsMap, fieldsList, inlineMap, reflect.New(st).Elem(), } structMapMutex.Lock() structMap[st] = sinfo structMapMutex.Unlock() return sinfo, nil } charm-2.1.1/src/gopkg.in/mgo.v2/bson/bson_test.go0000664000175000017500000014161012672604565020516 0ustar marcomarco// BSON library for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // gobson - BSON library for Go. package bson_test import ( "encoding/binary" "encoding/hex" "encoding/json" "errors" "net/url" "reflect" "strings" "testing" "time" . "gopkg.in/check.v1" "gopkg.in/mgo.v2/bson" "gopkg.in/yaml.v2" ) func TestAll(t *testing.T) { TestingT(t) } type S struct{} var _ = Suite(&S{}) // Wrap up the document elements contained in data, prepending the int32 // length of the data, and appending the '\x00' value closing the document. func wrapInDoc(data string) string { result := make([]byte, len(data)+5) binary.LittleEndian.PutUint32(result, uint32(len(result))) copy(result[4:], []byte(data)) return string(result) } func makeZeroDoc(value interface{}) (zero interface{}) { v := reflect.ValueOf(value) t := v.Type() switch t.Kind() { case reflect.Map: mv := reflect.MakeMap(t) zero = mv.Interface() case reflect.Ptr: pv := reflect.New(v.Type().Elem()) zero = pv.Interface() case reflect.Slice, reflect.Int: zero = reflect.New(t).Interface() default: panic("unsupported doc type") } return zero } func testUnmarshal(c *C, data string, obj interface{}) { zero := makeZeroDoc(obj) err := bson.Unmarshal([]byte(data), zero) c.Assert(err, IsNil) c.Assert(zero, DeepEquals, obj) } type testItemType struct { obj interface{} data string } // -------------------------------------------------------------------------- // Samples from bsonspec.org: var sampleItems = []testItemType{ {bson.M{"hello": "world"}, "\x16\x00\x00\x00\x02hello\x00\x06\x00\x00\x00world\x00\x00"}, {bson.M{"BSON": []interface{}{"awesome", float64(5.05), 1986}}, "1\x00\x00\x00\x04BSON\x00&\x00\x00\x00\x020\x00\x08\x00\x00\x00" + "awesome\x00\x011\x00333333\x14@\x102\x00\xc2\x07\x00\x00\x00\x00"}, } func (s *S) TestMarshalSampleItems(c *C) { for i, item := range sampleItems { data, err := bson.Marshal(item.obj) c.Assert(err, IsNil) c.Assert(string(data), Equals, item.data, Commentf("Failed on item %d", i)) } } func (s *S) TestUnmarshalSampleItems(c *C) { for i, item := range sampleItems { value := bson.M{} err := bson.Unmarshal([]byte(item.data), value) c.Assert(err, IsNil) c.Assert(value, DeepEquals, item.obj, Commentf("Failed on item %d", i)) } } // -------------------------------------------------------------------------- // Every type, ordered by the type flag. These are not wrapped with the // length and last \x00 from the document. wrapInDoc() computes them. // Note that all of them should be supported as two-way conversions. var allItems = []testItemType{ {bson.M{}, ""}, {bson.M{"_": float64(5.05)}, "\x01_\x00333333\x14@"}, {bson.M{"_": "yo"}, "\x02_\x00\x03\x00\x00\x00yo\x00"}, {bson.M{"_": bson.M{"a": true}}, "\x03_\x00\x09\x00\x00\x00\x08a\x00\x01\x00"}, {bson.M{"_": []interface{}{true, false}}, "\x04_\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"}, {bson.M{"_": []byte("yo")}, "\x05_\x00\x02\x00\x00\x00\x00yo"}, {bson.M{"_": bson.Binary{0x80, []byte("udef")}}, "\x05_\x00\x04\x00\x00\x00\x80udef"}, {bson.M{"_": bson.Undefined}, // Obsolete, but still seen in the wild. "\x06_\x00"}, {bson.M{"_": bson.ObjectId("0123456789ab")}, "\x07_\x000123456789ab"}, {bson.M{"_": bson.DBPointer{"testnamespace", bson.ObjectId("0123456789ab")}}, "\x0C_\x00\x0e\x00\x00\x00testnamespace\x000123456789ab"}, {bson.M{"_": false}, "\x08_\x00\x00"}, {bson.M{"_": true}, "\x08_\x00\x01"}, {bson.M{"_": time.Unix(0, 258e6)}, // Note the NS <=> MS conversion. "\x09_\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, {bson.M{"_": nil}, "\x0A_\x00"}, {bson.M{"_": bson.RegEx{"ab", "cd"}}, "\x0B_\x00ab\x00cd\x00"}, {bson.M{"_": bson.JavaScript{"code", nil}}, "\x0D_\x00\x05\x00\x00\x00code\x00"}, {bson.M{"_": bson.Symbol("sym")}, "\x0E_\x00\x04\x00\x00\x00sym\x00"}, {bson.M{"_": bson.JavaScript{"code", bson.M{"": nil}}}, "\x0F_\x00\x14\x00\x00\x00\x05\x00\x00\x00code\x00" + "\x07\x00\x00\x00\x0A\x00\x00"}, {bson.M{"_": 258}, "\x10_\x00\x02\x01\x00\x00"}, {bson.M{"_": bson.MongoTimestamp(258)}, "\x11_\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, {bson.M{"_": int64(258)}, "\x12_\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, {bson.M{"_": int64(258 << 32)}, "\x12_\x00\x00\x00\x00\x00\x02\x01\x00\x00"}, {bson.M{"_": bson.MaxKey}, "\x7F_\x00"}, {bson.M{"_": bson.MinKey}, "\xFF_\x00"}, } func (s *S) TestMarshalAllItems(c *C) { for i, item := range allItems { data, err := bson.Marshal(item.obj) c.Assert(err, IsNil) c.Assert(string(data), Equals, wrapInDoc(item.data), Commentf("Failed on item %d: %#v", i, item)) } } func (s *S) TestUnmarshalAllItems(c *C) { for i, item := range allItems { value := bson.M{} err := bson.Unmarshal([]byte(wrapInDoc(item.data)), value) c.Assert(err, IsNil) c.Assert(value, DeepEquals, item.obj, Commentf("Failed on item %d: %#v", i, item)) } } func (s *S) TestUnmarshalRawAllItems(c *C) { for i, item := range allItems { if len(item.data) == 0 { continue } value := item.obj.(bson.M)["_"] if value == nil { continue } pv := reflect.New(reflect.ValueOf(value).Type()) raw := bson.Raw{item.data[0], []byte(item.data[3:])} c.Logf("Unmarshal raw: %#v, %#v", raw, pv.Interface()) err := raw.Unmarshal(pv.Interface()) c.Assert(err, IsNil) c.Assert(pv.Elem().Interface(), DeepEquals, value, Commentf("Failed on item %d: %#v", i, item)) } } func (s *S) TestUnmarshalRawIncompatible(c *C) { raw := bson.Raw{0x08, []byte{0x01}} // true err := raw.Unmarshal(&struct{}{}) c.Assert(err, ErrorMatches, "BSON kind 0x08 isn't compatible with type struct \\{\\}") } func (s *S) TestUnmarshalZeroesStruct(c *C) { data, err := bson.Marshal(bson.M{"b": 2}) c.Assert(err, IsNil) type T struct{ A, B int } v := T{A: 1} err = bson.Unmarshal(data, &v) c.Assert(err, IsNil) c.Assert(v.A, Equals, 0) c.Assert(v.B, Equals, 2) } func (s *S) TestUnmarshalZeroesMap(c *C) { data, err := bson.Marshal(bson.M{"b": 2}) c.Assert(err, IsNil) m := bson.M{"a": 1} err = bson.Unmarshal(data, &m) c.Assert(err, IsNil) c.Assert(m, DeepEquals, bson.M{"b": 2}) } func (s *S) TestUnmarshalNonNilInterface(c *C) { data, err := bson.Marshal(bson.M{"b": 2}) c.Assert(err, IsNil) m := bson.M{"a": 1} var i interface{} i = m err = bson.Unmarshal(data, &i) c.Assert(err, IsNil) c.Assert(i, DeepEquals, bson.M{"b": 2}) c.Assert(m, DeepEquals, bson.M{"a": 1}) } // -------------------------------------------------------------------------- // Some one way marshaling operations which would unmarshal differently. var oneWayMarshalItems = []testItemType{ // These are being passed as pointers, and will unmarshal as values. {bson.M{"": &bson.Binary{0x02, []byte("old")}}, "\x05\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"}, {bson.M{"": &bson.Binary{0x80, []byte("udef")}}, "\x05\x00\x04\x00\x00\x00\x80udef"}, {bson.M{"": &bson.RegEx{"ab", "cd"}}, "\x0B\x00ab\x00cd\x00"}, {bson.M{"": &bson.JavaScript{"code", nil}}, "\x0D\x00\x05\x00\x00\x00code\x00"}, {bson.M{"": &bson.JavaScript{"code", bson.M{"": nil}}}, "\x0F\x00\x14\x00\x00\x00\x05\x00\x00\x00code\x00" + "\x07\x00\x00\x00\x0A\x00\x00"}, // There's no float32 type in BSON. Will encode as a float64. {bson.M{"": float32(5.05)}, "\x01\x00\x00\x00\x00@33\x14@"}, // The array will be unmarshaled as a slice instead. {bson.M{"": [2]bool{true, false}}, "\x04\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"}, // The typed slice will be unmarshaled as []interface{}. {bson.M{"": []bool{true, false}}, "\x04\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"}, // Will unmarshal as a []byte. {bson.M{"": bson.Binary{0x00, []byte("yo")}}, "\x05\x00\x02\x00\x00\x00\x00yo"}, {bson.M{"": bson.Binary{0x02, []byte("old")}}, "\x05\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"}, // No way to preserve the type information here. We might encode as a zero // value, but this would mean that pointer values in structs wouldn't be // able to correctly distinguish between unset and set to the zero value. {bson.M{"": (*byte)(nil)}, "\x0A\x00"}, // No int types smaller than int32 in BSON. Could encode this as a char, // but it would still be ambiguous, take more, and be awkward in Go when // loaded without typing information. {bson.M{"": byte(8)}, "\x10\x00\x08\x00\x00\x00"}, // There are no unsigned types in BSON. Will unmarshal as int32 or int64. {bson.M{"": uint32(258)}, "\x10\x00\x02\x01\x00\x00"}, {bson.M{"": uint64(258)}, "\x12\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, {bson.M{"": uint64(258 << 32)}, "\x12\x00\x00\x00\x00\x00\x02\x01\x00\x00"}, // This will unmarshal as int. {bson.M{"": int32(258)}, "\x10\x00\x02\x01\x00\x00"}, // That's a special case. The unsigned value is too large for an int32, // so an int64 is used instead. {bson.M{"": uint32(1<<32 - 1)}, "\x12\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00"}, {bson.M{"": uint(1<<32 - 1)}, "\x12\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00"}, } func (s *S) TestOneWayMarshalItems(c *C) { for i, item := range oneWayMarshalItems { data, err := bson.Marshal(item.obj) c.Assert(err, IsNil) c.Assert(string(data), Equals, wrapInDoc(item.data), Commentf("Failed on item %d", i)) } } // -------------------------------------------------------------------------- // Two-way tests for user-defined structures using the samples // from bsonspec.org. type specSample1 struct { Hello string } type specSample2 struct { BSON []interface{} "BSON" } var structSampleItems = []testItemType{ {&specSample1{"world"}, "\x16\x00\x00\x00\x02hello\x00\x06\x00\x00\x00world\x00\x00"}, {&specSample2{[]interface{}{"awesome", float64(5.05), 1986}}, "1\x00\x00\x00\x04BSON\x00&\x00\x00\x00\x020\x00\x08\x00\x00\x00" + "awesome\x00\x011\x00333333\x14@\x102\x00\xc2\x07\x00\x00\x00\x00"}, } func (s *S) TestMarshalStructSampleItems(c *C) { for i, item := range structSampleItems { data, err := bson.Marshal(item.obj) c.Assert(err, IsNil) c.Assert(string(data), Equals, item.data, Commentf("Failed on item %d", i)) } } func (s *S) TestUnmarshalStructSampleItems(c *C) { for _, item := range structSampleItems { testUnmarshal(c, item.data, item.obj) } } func (s *S) Test64bitInt(c *C) { var i int64 = (1 << 31) if int(i) > 0 { data, err := bson.Marshal(bson.M{"i": int(i)}) c.Assert(err, IsNil) c.Assert(string(data), Equals, wrapInDoc("\x12i\x00\x00\x00\x00\x80\x00\x00\x00\x00")) var result struct{ I int } err = bson.Unmarshal(data, &result) c.Assert(err, IsNil) c.Assert(int64(result.I), Equals, i) } } // -------------------------------------------------------------------------- // Generic two-way struct marshaling tests. var bytevar = byte(8) var byteptr = &bytevar var structItems = []testItemType{ {&struct{ Ptr *byte }{nil}, "\x0Aptr\x00"}, {&struct{ Ptr *byte }{&bytevar}, "\x10ptr\x00\x08\x00\x00\x00"}, {&struct{ Ptr **byte }{&byteptr}, "\x10ptr\x00\x08\x00\x00\x00"}, {&struct{ Byte byte }{8}, "\x10byte\x00\x08\x00\x00\x00"}, {&struct{ Byte byte }{0}, "\x10byte\x00\x00\x00\x00\x00"}, {&struct { V byte "Tag" }{8}, "\x10Tag\x00\x08\x00\x00\x00"}, {&struct { V *struct { Byte byte } }{&struct{ Byte byte }{8}}, "\x03v\x00" + "\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00"}, {&struct{ priv byte }{}, ""}, // The order of the dumped fields should be the same in the struct. {&struct{ A, C, B, D, F, E *byte }{}, "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x0Ae\x00"}, {&struct{ V bson.Raw }{bson.Raw{0x03, []byte("\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00")}}, "\x03v\x00" + "\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00"}, {&struct{ V bson.Raw }{bson.Raw{0x10, []byte("\x00\x00\x00\x00")}}, "\x10v\x00" + "\x00\x00\x00\x00"}, // Byte arrays. {&struct{ V [2]byte }{[2]byte{'y', 'o'}}, "\x05v\x00\x02\x00\x00\x00\x00yo"}, } func (s *S) TestMarshalStructItems(c *C) { for i, item := range structItems { data, err := bson.Marshal(item.obj) c.Assert(err, IsNil) c.Assert(string(data), Equals, wrapInDoc(item.data), Commentf("Failed on item %d", i)) } } func (s *S) TestUnmarshalStructItems(c *C) { for _, item := range structItems { testUnmarshal(c, wrapInDoc(item.data), item.obj) } } func (s *S) TestUnmarshalRawStructItems(c *C) { for i, item := range structItems { raw := bson.Raw{0x03, []byte(wrapInDoc(item.data))} zero := makeZeroDoc(item.obj) err := raw.Unmarshal(zero) c.Assert(err, IsNil) c.Assert(zero, DeepEquals, item.obj, Commentf("Failed on item %d: %#v", i, item)) } } func (s *S) TestUnmarshalRawNil(c *C) { // Regression test: shouldn't try to nil out the pointer itself, // as it's not settable. raw := bson.Raw{0x0A, []byte{}} err := raw.Unmarshal(&struct{}{}) c.Assert(err, IsNil) } // -------------------------------------------------------------------------- // One-way marshaling tests. type dOnIface struct { D interface{} } type ignoreField struct { Before string Ignore string `bson:"-"` After string } var marshalItems = []testItemType{ // Ordered document dump. Will unmarshal as a dictionary by default. {bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", nil}, {"f", nil}, {"e", true}}, "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x08e\x00\x01"}, {MyD{{"a", nil}, {"c", nil}, {"b", nil}, {"d", nil}, {"f", nil}, {"e", true}}, "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x08e\x00\x01"}, {&dOnIface{bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", true}}}, "\x03d\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x0Ab\x00\x08d\x00\x01")}, {bson.RawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}}, "\x0Aa\x00" + "\x0Ac\x00" + "\x08b\x00\x01"}, {MyRawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}}, "\x0Aa\x00" + "\x0Ac\x00" + "\x08b\x00\x01"}, {&dOnIface{bson.RawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}}}, "\x03d\x00" + wrapInDoc("\x0Aa\x00"+"\x0Ac\x00"+"\x08b\x00\x01")}, {&ignoreField{"before", "ignore", "after"}, "\x02before\x00\a\x00\x00\x00before\x00\x02after\x00\x06\x00\x00\x00after\x00"}, // Marshalling a Raw document does nothing. {bson.Raw{0x03, []byte(wrapInDoc("anything"))}, "anything"}, {bson.Raw{Data: []byte(wrapInDoc("anything"))}, "anything"}, } func (s *S) TestMarshalOneWayItems(c *C) { for _, item := range marshalItems { data, err := bson.Marshal(item.obj) c.Assert(err, IsNil) c.Assert(string(data), Equals, wrapInDoc(item.data)) } } // -------------------------------------------------------------------------- // One-way unmarshaling tests. var unmarshalItems = []testItemType{ // Field is private. Should not attempt to unmarshal it. {&struct{ priv byte }{}, "\x10priv\x00\x08\x00\x00\x00"}, // Wrong casing. Field names are lowercased. {&struct{ Byte byte }{}, "\x10Byte\x00\x08\x00\x00\x00"}, // Ignore non-existing field. {&struct{ Byte byte }{9}, "\x10boot\x00\x08\x00\x00\x00" + "\x10byte\x00\x09\x00\x00\x00"}, // Do not unmarshal on ignored field. {&ignoreField{"before", "", "after"}, "\x02before\x00\a\x00\x00\x00before\x00" + "\x02-\x00\a\x00\x00\x00ignore\x00" + "\x02after\x00\x06\x00\x00\x00after\x00"}, // Ignore unsuitable types silently. {map[string]string{"str": "s"}, "\x02str\x00\x02\x00\x00\x00s\x00" + "\x10int\x00\x01\x00\x00\x00"}, {map[string][]int{"array": []int{5, 9}}, "\x04array\x00" + wrapInDoc("\x100\x00\x05\x00\x00\x00"+"\x021\x00\x02\x00\x00\x00s\x00"+"\x102\x00\x09\x00\x00\x00")}, // Wrong type. Shouldn't init pointer. {&struct{ Str *byte }{}, "\x02str\x00\x02\x00\x00\x00s\x00"}, {&struct{ Str *struct{ Str string } }{}, "\x02str\x00\x02\x00\x00\x00s\x00"}, // Ordered document. {&struct{ bson.D }{bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", true}}}, "\x03d\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x0Ab\x00\x08d\x00\x01")}, // Raw document. {&bson.Raw{0x03, []byte(wrapInDoc("\x10byte\x00\x08\x00\x00\x00"))}, "\x10byte\x00\x08\x00\x00\x00"}, // RawD document. {&struct{ bson.RawD }{bson.RawD{{"a", bson.Raw{0x0A, []byte{}}}, {"c", bson.Raw{0x0A, []byte{}}}, {"b", bson.Raw{0x08, []byte{0x01}}}}}, "\x03rawd\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x08b\x00\x01")}, // Decode old binary. {bson.M{"_": []byte("old")}, "\x05_\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"}, // Decode old binary without length. According to the spec, this shouldn't happen. {bson.M{"_": []byte("old")}, "\x05_\x00\x03\x00\x00\x00\x02old"}, } func (s *S) TestUnmarshalOneWayItems(c *C) { for _, item := range unmarshalItems { testUnmarshal(c, wrapInDoc(item.data), item.obj) } } func (s *S) TestUnmarshalNilInStruct(c *C) { // Nil is the default value, so we need to ensure it's indeed being set. b := byte(1) v := &struct{ Ptr *byte }{&b} err := bson.Unmarshal([]byte(wrapInDoc("\x0Aptr\x00")), v) c.Assert(err, IsNil) c.Assert(v, DeepEquals, &struct{ Ptr *byte }{nil}) } // -------------------------------------------------------------------------- // Marshalling error cases. type structWithDupKeys struct { Name byte Other byte "name" // Tag should precede. } var marshalErrorItems = []testItemType{ {bson.M{"": uint64(1 << 63)}, "BSON has no uint64 type, and value is too large to fit correctly in an int64"}, {bson.M{"": bson.ObjectId("tooshort")}, "ObjectIDs must be exactly 12 bytes long \\(got 8\\)"}, {int64(123), "Can't marshal int64 as a BSON document"}, {bson.M{"": 1i}, "Can't marshal complex128 in a BSON document"}, {&structWithDupKeys{}, "Duplicated key 'name' in struct bson_test.structWithDupKeys"}, {bson.Raw{0xA, []byte{}}, "Attempted to marshal Raw kind 10 as a document"}, {bson.Raw{0x3, []byte{}}, "Attempted to marshal empty Raw document"}, {bson.M{"w": bson.Raw{0x3, []byte{}}}, "Attempted to marshal empty Raw document"}, {&inlineCantPtr{&struct{ A, B int }{1, 2}}, "Option ,inline needs a struct value or map field"}, {&inlineDupName{1, struct{ A, B int }{2, 3}}, "Duplicated key 'a' in struct bson_test.inlineDupName"}, {&inlineDupMap{}, "Multiple ,inline maps in struct bson_test.inlineDupMap"}, {&inlineBadKeyMap{}, "Option ,inline needs a map with string keys in struct bson_test.inlineBadKeyMap"}, {&inlineMap{A: 1, M: map[string]interface{}{"a": 1}}, `Can't have key "a" in inlined map; conflicts with struct field`}, } func (s *S) TestMarshalErrorItems(c *C) { for _, item := range marshalErrorItems { data, err := bson.Marshal(item.obj) c.Assert(err, ErrorMatches, item.data) c.Assert(data, IsNil) } } // -------------------------------------------------------------------------- // Unmarshalling error cases. type unmarshalErrorType struct { obj interface{} data string error string } var unmarshalErrorItems = []unmarshalErrorType{ // Tag name conflicts with existing parameter. {&structWithDupKeys{}, "\x10name\x00\x08\x00\x00\x00", "Duplicated key 'name' in struct bson_test.structWithDupKeys"}, // Non-string map key. {map[int]interface{}{}, "\x10name\x00\x08\x00\x00\x00", "BSON map must have string keys. Got: map\\[int\\]interface \\{\\}"}, {nil, "\xEEname\x00", "Unknown element kind \\(0xEE\\)"}, {struct{ Name bool }{}, "\x10name\x00\x08\x00\x00\x00", "Unmarshal can't deal with struct values. Use a pointer."}, {123, "\x10name\x00\x08\x00\x00\x00", "Unmarshal needs a map or a pointer to a struct."}, {nil, "\x08\x62\x00\x02", "encoded boolean must be 1 or 0, found 2"}, } func (s *S) TestUnmarshalErrorItems(c *C) { for _, item := range unmarshalErrorItems { data := []byte(wrapInDoc(item.data)) var value interface{} switch reflect.ValueOf(item.obj).Kind() { case reflect.Map, reflect.Ptr: value = makeZeroDoc(item.obj) case reflect.Invalid: value = bson.M{} default: value = item.obj } err := bson.Unmarshal(data, value) c.Assert(err, ErrorMatches, item.error) } } type unmarshalRawErrorType struct { obj interface{} raw bson.Raw error string } var unmarshalRawErrorItems = []unmarshalRawErrorType{ // Tag name conflicts with existing parameter. {&structWithDupKeys{}, bson.Raw{0x03, []byte("\x10byte\x00\x08\x00\x00\x00")}, "Duplicated key 'name' in struct bson_test.structWithDupKeys"}, {&struct{}{}, bson.Raw{0xEE, []byte{}}, "Unknown element kind \\(0xEE\\)"}, {struct{ Name bool }{}, bson.Raw{0x10, []byte("\x08\x00\x00\x00")}, "Raw Unmarshal can't deal with struct values. Use a pointer."}, {123, bson.Raw{0x10, []byte("\x08\x00\x00\x00")}, "Raw Unmarshal needs a map or a valid pointer."}, } func (s *S) TestUnmarshalRawErrorItems(c *C) { for i, item := range unmarshalRawErrorItems { err := item.raw.Unmarshal(item.obj) c.Assert(err, ErrorMatches, item.error, Commentf("Failed on item %d: %#v\n", i, item)) } } var corruptedData = []string{ "\x04\x00\x00\x00\x00", // Document shorter than minimum "\x06\x00\x00\x00\x00", // Not enough data "\x05\x00\x00", // Broken length "\x05\x00\x00\x00\xff", // Corrupted termination "\x0A\x00\x00\x00\x0Aooop\x00", // Unfinished C string // Array end past end of string (s[2]=0x07 is correct) wrapInDoc("\x04\x00\x09\x00\x00\x00\x0A\x00\x00"), // Array end within string, but past acceptable. wrapInDoc("\x04\x00\x08\x00\x00\x00\x0A\x00\x00"), // Document end within string, but past acceptable. wrapInDoc("\x03\x00\x08\x00\x00\x00\x0A\x00\x00"), // String with corrupted end. wrapInDoc("\x02\x00\x03\x00\x00\x00yo\xFF"), // String with negative length (issue #116). "\x0c\x00\x00\x00\x02x\x00\xff\xff\xff\xff\x00", // String with zero length (must include trailing '\x00') "\x0c\x00\x00\x00\x02x\x00\x00\x00\x00\x00\x00", // Binary with negative length. "\r\x00\x00\x00\x05x\x00\xff\xff\xff\xff\x00\x00", } func (s *S) TestUnmarshalMapDocumentTooShort(c *C) { for _, data := range corruptedData { err := bson.Unmarshal([]byte(data), bson.M{}) c.Assert(err, ErrorMatches, "Document is corrupted") err = bson.Unmarshal([]byte(data), &struct{}{}) c.Assert(err, ErrorMatches, "Document is corrupted") } } // -------------------------------------------------------------------------- // Setter test cases. var setterResult = map[string]error{} type setterType struct { received interface{} } func (o *setterType) SetBSON(raw bson.Raw) error { err := raw.Unmarshal(&o.received) if err != nil { panic("The panic:" + err.Error()) } if s, ok := o.received.(string); ok { if result, ok := setterResult[s]; ok { return result } } return nil } type ptrSetterDoc struct { Field *setterType "_" } type valSetterDoc struct { Field setterType "_" } func (s *S) TestUnmarshalAllItemsWithPtrSetter(c *C) { for _, item := range allItems { for i := 0; i != 2; i++ { var field *setterType if i == 0 { obj := &ptrSetterDoc{} err := bson.Unmarshal([]byte(wrapInDoc(item.data)), obj) c.Assert(err, IsNil) field = obj.Field } else { obj := &valSetterDoc{} err := bson.Unmarshal([]byte(wrapInDoc(item.data)), obj) c.Assert(err, IsNil) field = &obj.Field } if item.data == "" { // Nothing to unmarshal. Should be untouched. if i == 0 { c.Assert(field, IsNil) } else { c.Assert(field.received, IsNil) } } else { expected := item.obj.(bson.M)["_"] c.Assert(field, NotNil, Commentf("Pointer not initialized (%#v)", expected)) c.Assert(field.received, DeepEquals, expected) } } } } func (s *S) TestUnmarshalWholeDocumentWithSetter(c *C) { obj := &setterType{} err := bson.Unmarshal([]byte(sampleItems[0].data), obj) c.Assert(err, IsNil) c.Assert(obj.received, DeepEquals, bson.M{"hello": "world"}) } func (s *S) TestUnmarshalSetterOmits(c *C) { setterResult["2"] = &bson.TypeError{} setterResult["4"] = &bson.TypeError{} defer func() { delete(setterResult, "2") delete(setterResult, "4") }() m := map[string]*setterType{} data := wrapInDoc("\x02abc\x00\x02\x00\x00\x001\x00" + "\x02def\x00\x02\x00\x00\x002\x00" + "\x02ghi\x00\x02\x00\x00\x003\x00" + "\x02jkl\x00\x02\x00\x00\x004\x00") err := bson.Unmarshal([]byte(data), m) c.Assert(err, IsNil) c.Assert(m["abc"], NotNil) c.Assert(m["def"], IsNil) c.Assert(m["ghi"], NotNil) c.Assert(m["jkl"], IsNil) c.Assert(m["abc"].received, Equals, "1") c.Assert(m["ghi"].received, Equals, "3") } func (s *S) TestUnmarshalSetterErrors(c *C) { boom := errors.New("BOOM") setterResult["2"] = boom defer delete(setterResult, "2") m := map[string]*setterType{} data := wrapInDoc("\x02abc\x00\x02\x00\x00\x001\x00" + "\x02def\x00\x02\x00\x00\x002\x00" + "\x02ghi\x00\x02\x00\x00\x003\x00") err := bson.Unmarshal([]byte(data), m) c.Assert(err, Equals, boom) c.Assert(m["abc"], NotNil) c.Assert(m["def"], IsNil) c.Assert(m["ghi"], IsNil) c.Assert(m["abc"].received, Equals, "1") } func (s *S) TestDMap(c *C) { d := bson.D{{"a", 1}, {"b", 2}} c.Assert(d.Map(), DeepEquals, bson.M{"a": 1, "b": 2}) } func (s *S) TestUnmarshalSetterSetZero(c *C) { setterResult["foo"] = bson.SetZero defer delete(setterResult, "field") data, err := bson.Marshal(bson.M{"field": "foo"}) c.Assert(err, IsNil) m := map[string]*setterType{} err = bson.Unmarshal([]byte(data), m) c.Assert(err, IsNil) value, ok := m["field"] c.Assert(ok, Equals, true) c.Assert(value, IsNil) } // -------------------------------------------------------------------------- // Getter test cases. type typeWithGetter struct { result interface{} err error } func (t *typeWithGetter) GetBSON() (interface{}, error) { if t == nil { return "", nil } return t.result, t.err } type docWithGetterField struct { Field *typeWithGetter "_" } func (s *S) TestMarshalAllItemsWithGetter(c *C) { for i, item := range allItems { if item.data == "" { continue } obj := &docWithGetterField{} obj.Field = &typeWithGetter{result: item.obj.(bson.M)["_"]} data, err := bson.Marshal(obj) c.Assert(err, IsNil) c.Assert(string(data), Equals, wrapInDoc(item.data), Commentf("Failed on item #%d", i)) } } func (s *S) TestMarshalWholeDocumentWithGetter(c *C) { obj := &typeWithGetter{result: sampleItems[0].obj} data, err := bson.Marshal(obj) c.Assert(err, IsNil) c.Assert(string(data), Equals, sampleItems[0].data) } func (s *S) TestGetterErrors(c *C) { e := errors.New("oops") obj1 := &docWithGetterField{} obj1.Field = &typeWithGetter{sampleItems[0].obj, e} data, err := bson.Marshal(obj1) c.Assert(err, ErrorMatches, "oops") c.Assert(data, IsNil) obj2 := &typeWithGetter{sampleItems[0].obj, e} data, err = bson.Marshal(obj2) c.Assert(err, ErrorMatches, "oops") c.Assert(data, IsNil) } type intGetter int64 func (t intGetter) GetBSON() (interface{}, error) { return int64(t), nil } type typeWithIntGetter struct { V intGetter ",minsize" } func (s *S) TestMarshalShortWithGetter(c *C) { obj := typeWithIntGetter{42} data, err := bson.Marshal(obj) c.Assert(err, IsNil) m := bson.M{} err = bson.Unmarshal(data, m) c.Assert(err, IsNil) c.Assert(m["v"], Equals, 42) } func (s *S) TestMarshalWithGetterNil(c *C) { obj := docWithGetterField{} data, err := bson.Marshal(obj) c.Assert(err, IsNil) m := bson.M{} err = bson.Unmarshal(data, m) c.Assert(err, IsNil) c.Assert(m, DeepEquals, bson.M{"_": ""}) } // -------------------------------------------------------------------------- // Cross-type conversion tests. type crossTypeItem struct { obj1 interface{} obj2 interface{} } type condStr struct { V string ",omitempty" } type condStrNS struct { V string `a:"A" bson:",omitempty" b:"B"` } type condBool struct { V bool ",omitempty" } type condInt struct { V int ",omitempty" } type condUInt struct { V uint ",omitempty" } type condFloat struct { V float64 ",omitempty" } type condIface struct { V interface{} ",omitempty" } type condPtr struct { V *bool ",omitempty" } type condSlice struct { V []string ",omitempty" } type condMap struct { V map[string]int ",omitempty" } type namedCondStr struct { V string "myv,omitempty" } type condTime struct { V time.Time ",omitempty" } type condStruct struct { V struct{ A []int } ",omitempty" } type condRaw struct { V bson.Raw ",omitempty" } type shortInt struct { V int64 ",minsize" } type shortUint struct { V uint64 ",minsize" } type shortIface struct { V interface{} ",minsize" } type shortPtr struct { V *int64 ",minsize" } type shortNonEmptyInt struct { V int64 ",minsize,omitempty" } type inlineInt struct { V struct{ A, B int } ",inline" } type inlineCantPtr struct { V *struct{ A, B int } ",inline" } type inlineDupName struct { A int V struct{ A, B int } ",inline" } type inlineMap struct { A int M map[string]interface{} ",inline" } type inlineMapInt struct { A int M map[string]int ",inline" } type inlineMapMyM struct { A int M MyM ",inline" } type inlineDupMap struct { M1 map[string]interface{} ",inline" M2 map[string]interface{} ",inline" } type inlineBadKeyMap struct { M map[int]int ",inline" } type getterSetterD bson.D func (s getterSetterD) GetBSON() (interface{}, error) { if len(s) == 0 { return bson.D{}, nil } return bson.D(s[:len(s)-1]), nil } func (s *getterSetterD) SetBSON(raw bson.Raw) error { var doc bson.D err := raw.Unmarshal(&doc) doc = append(doc, bson.DocElem{"suffix", true}) *s = getterSetterD(doc) return err } type getterSetterInt int func (i getterSetterInt) GetBSON() (interface{}, error) { return bson.D{{"a", int(i)}}, nil } func (i *getterSetterInt) SetBSON(raw bson.Raw) error { var doc struct{ A int } err := raw.Unmarshal(&doc) *i = getterSetterInt(doc.A) return err } type ifaceType interface { Hello() } type ifaceSlice []ifaceType func (s *ifaceSlice) SetBSON(raw bson.Raw) error { var ns []int if err := raw.Unmarshal(&ns); err != nil { return err } *s = make(ifaceSlice, ns[0]) return nil } func (s ifaceSlice) GetBSON() (interface{}, error) { return []int{len(s)}, nil } type ( MyString string MyBytes []byte MyBool bool MyD []bson.DocElem MyRawD []bson.RawDocElem MyM map[string]interface{} ) var ( truevar = true falsevar = false int64var = int64(42) int64ptr = &int64var intvar = int(42) intptr = &intvar gsintvar = getterSetterInt(42) ) func parseURL(s string) *url.URL { u, err := url.Parse(s) if err != nil { panic(err) } return u } // That's a pretty fun test. It will dump the first item, generate a zero // value equivalent to the second one, load the dumped data onto it, and then // verify that the resulting value is deep-equal to the untouched second value. // Then, it will do the same in the *opposite* direction! var twoWayCrossItems = []crossTypeItem{ // int<=>int {&struct{ I int }{42}, &struct{ I int8 }{42}}, {&struct{ I int }{42}, &struct{ I int32 }{42}}, {&struct{ I int }{42}, &struct{ I int64 }{42}}, {&struct{ I int8 }{42}, &struct{ I int32 }{42}}, {&struct{ I int8 }{42}, &struct{ I int64 }{42}}, {&struct{ I int32 }{42}, &struct{ I int64 }{42}}, // uint<=>uint {&struct{ I uint }{42}, &struct{ I uint8 }{42}}, {&struct{ I uint }{42}, &struct{ I uint32 }{42}}, {&struct{ I uint }{42}, &struct{ I uint64 }{42}}, {&struct{ I uint8 }{42}, &struct{ I uint32 }{42}}, {&struct{ I uint8 }{42}, &struct{ I uint64 }{42}}, {&struct{ I uint32 }{42}, &struct{ I uint64 }{42}}, // float32<=>float64 {&struct{ I float32 }{42}, &struct{ I float64 }{42}}, // int<=>uint {&struct{ I uint }{42}, &struct{ I int }{42}}, {&struct{ I uint }{42}, &struct{ I int8 }{42}}, {&struct{ I uint }{42}, &struct{ I int32 }{42}}, {&struct{ I uint }{42}, &struct{ I int64 }{42}}, {&struct{ I uint8 }{42}, &struct{ I int }{42}}, {&struct{ I uint8 }{42}, &struct{ I int8 }{42}}, {&struct{ I uint8 }{42}, &struct{ I int32 }{42}}, {&struct{ I uint8 }{42}, &struct{ I int64 }{42}}, {&struct{ I uint32 }{42}, &struct{ I int }{42}}, {&struct{ I uint32 }{42}, &struct{ I int8 }{42}}, {&struct{ I uint32 }{42}, &struct{ I int32 }{42}}, {&struct{ I uint32 }{42}, &struct{ I int64 }{42}}, {&struct{ I uint64 }{42}, &struct{ I int }{42}}, {&struct{ I uint64 }{42}, &struct{ I int8 }{42}}, {&struct{ I uint64 }{42}, &struct{ I int32 }{42}}, {&struct{ I uint64 }{42}, &struct{ I int64 }{42}}, // int <=> float {&struct{ I int }{42}, &struct{ I float64 }{42}}, // int <=> bool {&struct{ I int }{1}, &struct{ I bool }{true}}, {&struct{ I int }{0}, &struct{ I bool }{false}}, // uint <=> float64 {&struct{ I uint }{42}, &struct{ I float64 }{42}}, // uint <=> bool {&struct{ I uint }{1}, &struct{ I bool }{true}}, {&struct{ I uint }{0}, &struct{ I bool }{false}}, // float64 <=> bool {&struct{ I float64 }{1}, &struct{ I bool }{true}}, {&struct{ I float64 }{0}, &struct{ I bool }{false}}, // string <=> string and string <=> []byte {&struct{ S []byte }{[]byte("abc")}, &struct{ S string }{"abc"}}, {&struct{ S []byte }{[]byte("def")}, &struct{ S bson.Symbol }{"def"}}, {&struct{ S string }{"ghi"}, &struct{ S bson.Symbol }{"ghi"}}, // map <=> struct {&struct { A struct { B, C int } }{struct{ B, C int }{1, 2}}, map[string]map[string]int{"a": map[string]int{"b": 1, "c": 2}}}, {&struct{ A bson.Symbol }{"abc"}, map[string]string{"a": "abc"}}, {&struct{ A bson.Symbol }{"abc"}, map[string][]byte{"a": []byte("abc")}}, {&struct{ A []byte }{[]byte("abc")}, map[string]string{"a": "abc"}}, {&struct{ A uint }{42}, map[string]int{"a": 42}}, {&struct{ A uint }{42}, map[string]float64{"a": 42}}, {&struct{ A uint }{1}, map[string]bool{"a": true}}, {&struct{ A int }{42}, map[string]uint{"a": 42}}, {&struct{ A int }{42}, map[string]float64{"a": 42}}, {&struct{ A int }{1}, map[string]bool{"a": true}}, {&struct{ A float64 }{42}, map[string]float32{"a": 42}}, {&struct{ A float64 }{42}, map[string]int{"a": 42}}, {&struct{ A float64 }{42}, map[string]uint{"a": 42}}, {&struct{ A float64 }{1}, map[string]bool{"a": true}}, {&struct{ A bool }{true}, map[string]int{"a": 1}}, {&struct{ A bool }{true}, map[string]uint{"a": 1}}, {&struct{ A bool }{true}, map[string]float64{"a": 1}}, {&struct{ A **byte }{&byteptr}, map[string]byte{"a": 8}}, // url.URL <=> string {&struct{ URL *url.URL }{parseURL("h://e.c/p")}, map[string]string{"url": "h://e.c/p"}}, {&struct{ URL url.URL }{*parseURL("h://e.c/p")}, map[string]string{"url": "h://e.c/p"}}, // Slices {&struct{ S []int }{[]int{1, 2, 3}}, map[string][]int{"s": []int{1, 2, 3}}}, {&struct{ S *[]int }{&[]int{1, 2, 3}}, map[string][]int{"s": []int{1, 2, 3}}}, // Conditionals {&condBool{true}, map[string]bool{"v": true}}, {&condBool{}, map[string]bool{}}, {&condInt{1}, map[string]int{"v": 1}}, {&condInt{}, map[string]int{}}, {&condUInt{1}, map[string]uint{"v": 1}}, {&condUInt{}, map[string]uint{}}, {&condFloat{}, map[string]int{}}, {&condStr{"yo"}, map[string]string{"v": "yo"}}, {&condStr{}, map[string]string{}}, {&condStrNS{"yo"}, map[string]string{"v": "yo"}}, {&condStrNS{}, map[string]string{}}, {&condSlice{[]string{"yo"}}, map[string][]string{"v": []string{"yo"}}}, {&condSlice{}, map[string][]string{}}, {&condMap{map[string]int{"k": 1}}, bson.M{"v": bson.M{"k": 1}}}, {&condMap{}, map[string][]string{}}, {&condIface{"yo"}, map[string]string{"v": "yo"}}, {&condIface{""}, map[string]string{"v": ""}}, {&condIface{}, map[string]string{}}, {&condPtr{&truevar}, map[string]bool{"v": true}}, {&condPtr{&falsevar}, map[string]bool{"v": false}}, {&condPtr{}, map[string]string{}}, {&condTime{time.Unix(123456789, 123e6)}, map[string]time.Time{"v": time.Unix(123456789, 123e6)}}, {&condTime{}, map[string]string{}}, {&condStruct{struct{ A []int }{[]int{1}}}, bson.M{"v": bson.M{"a": []interface{}{1}}}}, {&condStruct{struct{ A []int }{}}, bson.M{}}, {&condRaw{bson.Raw{Kind: 0x0A, Data: []byte{}}}, bson.M{"v": nil}}, {&condRaw{bson.Raw{Kind: 0x00}}, bson.M{}}, {&namedCondStr{"yo"}, map[string]string{"myv": "yo"}}, {&namedCondStr{}, map[string]string{}}, {&shortInt{1}, map[string]interface{}{"v": 1}}, {&shortInt{1 << 30}, map[string]interface{}{"v": 1 << 30}}, {&shortInt{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, {&shortUint{1 << 30}, map[string]interface{}{"v": 1 << 30}}, {&shortUint{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, {&shortIface{int64(1) << 31}, map[string]interface{}{"v": int64(1 << 31)}}, {&shortPtr{int64ptr}, map[string]interface{}{"v": intvar}}, {&shortNonEmptyInt{1}, map[string]interface{}{"v": 1}}, {&shortNonEmptyInt{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}}, {&shortNonEmptyInt{}, map[string]interface{}{}}, {&inlineInt{struct{ A, B int }{1, 2}}, map[string]interface{}{"a": 1, "b": 2}}, {&inlineMap{A: 1, M: map[string]interface{}{"b": 2}}, map[string]interface{}{"a": 1, "b": 2}}, {&inlineMap{A: 1, M: nil}, map[string]interface{}{"a": 1}}, {&inlineMapInt{A: 1, M: map[string]int{"b": 2}}, map[string]int{"a": 1, "b": 2}}, {&inlineMapInt{A: 1, M: nil}, map[string]int{"a": 1}}, {&inlineMapMyM{A: 1, M: MyM{"b": MyM{"c": 3}}}, map[string]interface{}{"a": 1, "b": map[string]interface{}{"c": 3}}}, // []byte <=> Binary {&struct{ B []byte }{[]byte("abc")}, map[string]bson.Binary{"b": bson.Binary{Data: []byte("abc")}}}, // []byte <=> MyBytes {&struct{ B MyBytes }{[]byte("abc")}, map[string]string{"b": "abc"}}, {&struct{ B MyBytes }{[]byte{}}, map[string]string{"b": ""}}, {&struct{ B MyBytes }{}, map[string]bool{}}, {&struct{ B []byte }{[]byte("abc")}, map[string]MyBytes{"b": []byte("abc")}}, // bool <=> MyBool {&struct{ B MyBool }{true}, map[string]bool{"b": true}}, {&struct{ B MyBool }{}, map[string]bool{"b": false}}, {&struct{ B MyBool }{}, map[string]string{}}, {&struct{ B bool }{}, map[string]MyBool{"b": false}}, // arrays {&struct{ V [2]int }{[...]int{1, 2}}, map[string][2]int{"v": [2]int{1, 2}}}, {&struct{ V [2]byte }{[...]byte{1, 2}}, map[string][2]byte{"v": [2]byte{1, 2}}}, // zero time {&struct{ V time.Time }{}, map[string]interface{}{"v": time.Time{}}}, // zero time + 1 second + 1 millisecond; overflows int64 as nanoseconds {&struct{ V time.Time }{time.Unix(-62135596799, 1e6).Local()}, map[string]interface{}{"v": time.Unix(-62135596799, 1e6).Local()}}, // bson.D <=> []DocElem {&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}}, {&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &MyD{{"a", MyD{{"b", 1}, {"c", 2}}}}}, {&struct{ V MyD }{MyD{{"a", 1}}}, &bson.D{{"v", bson.D{{"a", 1}}}}}, // bson.RawD <=> []RawDocElem {&bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}, &bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}}, {&bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}, &MyRawD{{"a", bson.Raw{0x08, []byte{0x01}}}}}, // bson.M <=> map {bson.M{"a": bson.M{"b": 1, "c": 2}}, MyM{"a": MyM{"b": 1, "c": 2}}}, {bson.M{"a": bson.M{"b": 1, "c": 2}}, map[string]interface{}{"a": map[string]interface{}{"b": 1, "c": 2}}}, // bson.M <=> map[MyString] {bson.M{"a": bson.M{"b": 1, "c": 2}}, map[MyString]interface{}{"a": map[MyString]interface{}{"b": 1, "c": 2}}}, // json.Number <=> int64, float64 {&struct{ N json.Number }{"5"}, map[string]interface{}{"n": int64(5)}}, {&struct{ N json.Number }{"5.05"}, map[string]interface{}{"n": 5.05}}, {&struct{ N json.Number }{"9223372036854776000"}, map[string]interface{}{"n": float64(1 << 63)}}, // bson.D <=> non-struct getter/setter {&bson.D{{"a", 1}}, &getterSetterD{{"a", 1}, {"suffix", true}}}, {&bson.D{{"a", 42}}, &gsintvar}, // Interface slice setter. {&struct{ V ifaceSlice }{ifaceSlice{nil, nil, nil}}, bson.M{"v": []interface{}{3}}}, } // Same thing, but only one way (obj1 => obj2). var oneWayCrossItems = []crossTypeItem{ // map <=> struct {map[string]interface{}{"a": 1, "b": "2", "c": 3}, map[string]int{"a": 1, "c": 3}}, // inline map elides badly typed values {map[string]interface{}{"a": 1, "b": "2", "c": 3}, &inlineMapInt{A: 1, M: map[string]int{"c": 3}}}, // Can't decode int into struct. {bson.M{"a": bson.M{"b": 2}}, &struct{ A bool }{}}, // Would get decoded into a int32 too in the opposite direction. {&shortIface{int64(1) << 30}, map[string]interface{}{"v": 1 << 30}}, // Ensure omitempty on struct with private fields works properly. {&struct { V struct{ v time.Time } ",omitempty" }{}, map[string]interface{}{}}, // Attempt to marshal slice into RawD (issue #120). {bson.M{"x": []int{1, 2, 3}}, &struct{ X bson.RawD }{}}, } func testCrossPair(c *C, dump interface{}, load interface{}) { c.Logf("Dump: %#v", dump) c.Logf("Load: %#v", load) zero := makeZeroDoc(load) data, err := bson.Marshal(dump) c.Assert(err, IsNil) c.Logf("Dumped: %#v", string(data)) err = bson.Unmarshal(data, zero) c.Assert(err, IsNil) c.Logf("Loaded: %#v", zero) c.Assert(zero, DeepEquals, load) } func (s *S) TestTwoWayCrossPairs(c *C) { for _, item := range twoWayCrossItems { testCrossPair(c, item.obj1, item.obj2) testCrossPair(c, item.obj2, item.obj1) } } func (s *S) TestOneWayCrossPairs(c *C) { for _, item := range oneWayCrossItems { testCrossPair(c, item.obj1, item.obj2) } } // -------------------------------------------------------------------------- // ObjectId hex representation test. func (s *S) TestObjectIdHex(c *C) { id := bson.ObjectIdHex("4d88e15b60f486e428412dc9") c.Assert(id.String(), Equals, `ObjectIdHex("4d88e15b60f486e428412dc9")`) c.Assert(id.Hex(), Equals, "4d88e15b60f486e428412dc9") } func (s *S) TestIsObjectIdHex(c *C) { test := []struct { id string valid bool }{ {"4d88e15b60f486e428412dc9", true}, {"4d88e15b60f486e428412dc", false}, {"4d88e15b60f486e428412dc9e", false}, {"4d88e15b60f486e428412dcx", false}, } for _, t := range test { c.Assert(bson.IsObjectIdHex(t.id), Equals, t.valid) } } // -------------------------------------------------------------------------- // ObjectId parts extraction tests. type objectIdParts struct { id bson.ObjectId timestamp int64 machine []byte pid uint16 counter int32 } var objectIds = []objectIdParts{ objectIdParts{ bson.ObjectIdHex("4d88e15b60f486e428412dc9"), 1300816219, []byte{0x60, 0xf4, 0x86}, 0xe428, 4271561, }, objectIdParts{ bson.ObjectIdHex("000000000000000000000000"), 0, []byte{0x00, 0x00, 0x00}, 0x0000, 0, }, objectIdParts{ bson.ObjectIdHex("00000000aabbccddee000001"), 0, []byte{0xaa, 0xbb, 0xcc}, 0xddee, 1, }, } func (s *S) TestObjectIdPartsExtraction(c *C) { for i, v := range objectIds { t := time.Unix(v.timestamp, 0) c.Assert(v.id.Time(), Equals, t, Commentf("#%d Wrong timestamp value", i)) c.Assert(v.id.Machine(), DeepEquals, v.machine, Commentf("#%d Wrong machine id value", i)) c.Assert(v.id.Pid(), Equals, v.pid, Commentf("#%d Wrong pid value", i)) c.Assert(v.id.Counter(), Equals, v.counter, Commentf("#%d Wrong counter value", i)) } } func (s *S) TestNow(c *C) { before := time.Now() time.Sleep(1e6) now := bson.Now() time.Sleep(1e6) after := time.Now() c.Assert(now.After(before) && now.Before(after), Equals, true, Commentf("now=%s, before=%s, after=%s", now, before, after)) } // -------------------------------------------------------------------------- // ObjectId generation tests. func (s *S) TestNewObjectId(c *C) { // Generate 10 ids ids := make([]bson.ObjectId, 10) for i := 0; i < 10; i++ { ids[i] = bson.NewObjectId() } for i := 1; i < 10; i++ { prevId := ids[i-1] id := ids[i] // Test for uniqueness among all other 9 generated ids for j, tid := range ids { if j != i { c.Assert(id, Not(Equals), tid, Commentf("Generated ObjectId is not unique")) } } // Check that timestamp was incremented and is within 30 seconds of the previous one secs := id.Time().Sub(prevId.Time()).Seconds() c.Assert((secs >= 0 && secs <= 30), Equals, true, Commentf("Wrong timestamp in generated ObjectId")) // Check that machine ids are the same c.Assert(id.Machine(), DeepEquals, prevId.Machine()) // Check that pids are the same c.Assert(id.Pid(), Equals, prevId.Pid()) // Test for proper increment delta := int(id.Counter() - prevId.Counter()) c.Assert(delta, Equals, 1, Commentf("Wrong increment in generated ObjectId")) } } func (s *S) TestNewObjectIdWithTime(c *C) { t := time.Unix(12345678, 0) id := bson.NewObjectIdWithTime(t) c.Assert(id.Time(), Equals, t) c.Assert(id.Machine(), DeepEquals, []byte{0x00, 0x00, 0x00}) c.Assert(int(id.Pid()), Equals, 0) c.Assert(int(id.Counter()), Equals, 0) } // -------------------------------------------------------------------------- // ObjectId JSON marshalling. type jsonType struct { Id bson.ObjectId } var jsonIdTests = []struct { value jsonType json string marshal bool unmarshal bool error string }{{ value: jsonType{Id: bson.ObjectIdHex("4d88e15b60f486e428412dc9")}, json: `{"Id":"4d88e15b60f486e428412dc9"}`, marshal: true, unmarshal: true, }, { value: jsonType{}, json: `{"Id":""}`, marshal: true, unmarshal: true, }, { value: jsonType{}, json: `{"Id":null}`, marshal: false, unmarshal: true, }, { json: `{"Id":"4d88e15b60f486e428412dc9A"}`, error: `Invalid ObjectId in JSON: "4d88e15b60f486e428412dc9A"`, marshal: false, unmarshal: true, }, { json: `{"Id":"4d88e15b60f486e428412dcZ"}`, error: `Invalid ObjectId in JSON: "4d88e15b60f486e428412dcZ" .*`, marshal: false, unmarshal: true, }} func (s *S) TestObjectIdJSONMarshaling(c *C) { for _, test := range jsonIdTests { if test.marshal { data, err := json.Marshal(&test.value) if test.error == "" { c.Assert(err, IsNil) c.Assert(string(data), Equals, test.json) } else { c.Assert(err, ErrorMatches, test.error) } } if test.unmarshal { var value jsonType err := json.Unmarshal([]byte(test.json), &value) if test.error == "" { c.Assert(err, IsNil) c.Assert(value, DeepEquals, test.value) } else { c.Assert(err, ErrorMatches, test.error) } } } } type specTest struct { Description string Documents []struct { Decoded map[string]interface{} Encoded string DecodeOnly bool `yaml:"decodeOnly"` Error interface{} } } func (s *S) TestSpecTests(c *C) { for _, data := range specTests { var test specTest err := yaml.Unmarshal([]byte(data), &test) c.Assert(err, IsNil) c.Logf("Running spec test set %q", test.Description) for _, doc := range test.Documents { if doc.Error != nil { continue } c.Logf("Ensuring %q decodes as %v", doc.Encoded, doc.Decoded) var decoded map[string]interface{} encoded, err := hex.DecodeString(doc.Encoded) c.Assert(err, IsNil) err = bson.Unmarshal(encoded, &decoded) c.Assert(err, IsNil) c.Assert(decoded, DeepEquals, doc.Decoded) } for _, doc := range test.Documents { if doc.DecodeOnly || doc.Error != nil { continue } c.Logf("Ensuring %v encodes as %q", doc.Decoded, doc.Encoded) encoded, err := bson.Marshal(doc.Decoded) c.Assert(err, IsNil) c.Assert(strings.ToUpper(hex.EncodeToString(encoded)), Equals, doc.Encoded) } for _, doc := range test.Documents { if doc.Error == nil { continue } c.Logf("Ensuring %q errors when decoded: %s", doc.Encoded, doc.Error) var decoded map[string]interface{} encoded, err := hex.DecodeString(doc.Encoded) c.Assert(err, IsNil) err = bson.Unmarshal(encoded, &decoded) c.Assert(err, NotNil) c.Logf("Failed with: %v", err) } } } // -------------------------------------------------------------------------- // Some simple benchmarks. type BenchT struct { A, B, C, D, E, F string } type BenchRawT struct { A string B int C bson.M D []float64 } func (s *S) BenchmarkUnmarhsalStruct(c *C) { v := BenchT{A: "A", D: "D", E: "E"} data, err := bson.Marshal(&v) if err != nil { panic(err) } c.ResetTimer() for i := 0; i < c.N; i++ { err = bson.Unmarshal(data, &v) } if err != nil { panic(err) } } func (s *S) BenchmarkUnmarhsalMap(c *C) { m := bson.M{"a": "a", "d": "d", "e": "e"} data, err := bson.Marshal(&m) if err != nil { panic(err) } c.ResetTimer() for i := 0; i < c.N; i++ { err = bson.Unmarshal(data, &m) } if err != nil { panic(err) } } func (s *S) BenchmarkUnmarshalRaw(c *C) { var err error m := BenchRawT{ A: "test_string", B: 123, C: bson.M{ "subdoc_int": 12312, "subdoc_doc": bson.M{"1": 1}, }, D: []float64{0.0, 1.3333, -99.9997, 3.1415}, } data, err := bson.Marshal(&m) if err != nil { panic(err) } raw := bson.Raw{} c.ResetTimer() for i := 0; i < c.N; i++ { err = bson.Unmarshal(data, &raw) } if err != nil { panic(err) } } charm-2.1.1/src/gopkg.in/mgo.v2/bson/encode.go0000664000175000017500000003021612672604565017752 0ustar marcomarco// BSON library for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // gobson - BSON library for Go. package bson import ( "encoding/json" "fmt" "math" "net/url" "reflect" "strconv" "time" ) // -------------------------------------------------------------------------- // Some internal infrastructure. var ( typeBinary = reflect.TypeOf(Binary{}) typeObjectId = reflect.TypeOf(ObjectId("")) typeDBPointer = reflect.TypeOf(DBPointer{"", ObjectId("")}) typeSymbol = reflect.TypeOf(Symbol("")) typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0)) typeOrderKey = reflect.TypeOf(MinKey) typeDocElem = reflect.TypeOf(DocElem{}) typeRawDocElem = reflect.TypeOf(RawDocElem{}) typeRaw = reflect.TypeOf(Raw{}) typeURL = reflect.TypeOf(url.URL{}) typeTime = reflect.TypeOf(time.Time{}) typeString = reflect.TypeOf("") typeJSONNumber = reflect.TypeOf(json.Number("")) ) const itoaCacheSize = 32 var itoaCache []string func init() { itoaCache = make([]string, itoaCacheSize) for i := 0; i != itoaCacheSize; i++ { itoaCache[i] = strconv.Itoa(i) } } func itoa(i int) string { if i < itoaCacheSize { return itoaCache[i] } return strconv.Itoa(i) } // -------------------------------------------------------------------------- // Marshaling of the document value itself. type encoder struct { out []byte } func (e *encoder) addDoc(v reflect.Value) { for { if vi, ok := v.Interface().(Getter); ok { getv, err := vi.GetBSON() if err != nil { panic(err) } v = reflect.ValueOf(getv) continue } if v.Kind() == reflect.Ptr { v = v.Elem() continue } break } if v.Type() == typeRaw { raw := v.Interface().(Raw) if raw.Kind != 0x03 && raw.Kind != 0x00 { panic("Attempted to marshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document") } if len(raw.Data) == 0 { panic("Attempted to marshal empty Raw document") } e.addBytes(raw.Data...) return } start := e.reserveInt32() switch v.Kind() { case reflect.Map: e.addMap(v) case reflect.Struct: e.addStruct(v) case reflect.Array, reflect.Slice: e.addSlice(v) default: panic("Can't marshal " + v.Type().String() + " as a BSON document") } e.addBytes(0) e.setInt32(start, int32(len(e.out)-start)) } func (e *encoder) addMap(v reflect.Value) { for _, k := range v.MapKeys() { e.addElem(k.String(), v.MapIndex(k), false) } } func (e *encoder) addStruct(v reflect.Value) { sinfo, err := getStructInfo(v.Type()) if err != nil { panic(err) } var value reflect.Value if sinfo.InlineMap >= 0 { m := v.Field(sinfo.InlineMap) if m.Len() > 0 { for _, k := range m.MapKeys() { ks := k.String() if _, found := sinfo.FieldsMap[ks]; found { panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks)) } e.addElem(ks, m.MapIndex(k), false) } } } for _, info := range sinfo.FieldsList { if info.Inline == nil { value = v.Field(info.Num) } else { value = v.FieldByIndex(info.Inline) } if info.OmitEmpty && isZero(value) { continue } e.addElem(info.Key, value, info.MinSize) } } func isZero(v reflect.Value) bool { switch v.Kind() { case reflect.String: return len(v.String()) == 0 case reflect.Ptr, reflect.Interface: return v.IsNil() case reflect.Slice: return v.Len() == 0 case reflect.Map: return v.Len() == 0 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return v.Int() == 0 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 case reflect.Bool: return !v.Bool() case reflect.Struct: vt := v.Type() if vt == typeTime { return v.Interface().(time.Time).IsZero() } for i := 0; i < v.NumField(); i++ { if vt.Field(i).PkgPath != "" { continue // Private field } if !isZero(v.Field(i)) { return false } } return true } return false } func (e *encoder) addSlice(v reflect.Value) { vi := v.Interface() if d, ok := vi.(D); ok { for _, elem := range d { e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) } return } if d, ok := vi.(RawD); ok { for _, elem := range d { e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) } return } l := v.Len() et := v.Type().Elem() if et == typeDocElem { for i := 0; i < l; i++ { elem := v.Index(i).Interface().(DocElem) e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) } return } if et == typeRawDocElem { for i := 0; i < l; i++ { elem := v.Index(i).Interface().(RawDocElem) e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) } return } for i := 0; i < l; i++ { e.addElem(itoa(i), v.Index(i), false) } } // -------------------------------------------------------------------------- // Marshaling of elements in a document. func (e *encoder) addElemName(kind byte, name string) { e.addBytes(kind) e.addBytes([]byte(name)...) e.addBytes(0) } func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { if !v.IsValid() { e.addElemName('\x0A', name) return } if getter, ok := v.Interface().(Getter); ok { getv, err := getter.GetBSON() if err != nil { panic(err) } e.addElem(name, reflect.ValueOf(getv), minSize) return } switch v.Kind() { case reflect.Interface: e.addElem(name, v.Elem(), minSize) case reflect.Ptr: e.addElem(name, v.Elem(), minSize) case reflect.String: s := v.String() switch v.Type() { case typeObjectId: if len(s) != 12 { panic("ObjectIDs must be exactly 12 bytes long (got " + strconv.Itoa(len(s)) + ")") } e.addElemName('\x07', name) e.addBytes([]byte(s)...) case typeSymbol: e.addElemName('\x0E', name) e.addStr(s) case typeJSONNumber: n := v.Interface().(json.Number) if i, err := n.Int64(); err == nil { e.addElemName('\x12', name) e.addInt64(i) } else if f, err := n.Float64(); err == nil { e.addElemName('\x01', name) e.addFloat64(f) } else { panic("failed to convert json.Number to a number: " + s) } default: e.addElemName('\x02', name) e.addStr(s) } case reflect.Float32, reflect.Float64: e.addElemName('\x01', name) e.addFloat64(v.Float()) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: u := v.Uint() if int64(u) < 0 { panic("BSON has no uint64 type, and value is too large to fit correctly in an int64") } else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) { e.addElemName('\x10', name) e.addInt32(int32(u)) } else { e.addElemName('\x12', name) e.addInt64(int64(u)) } case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: switch v.Type() { case typeMongoTimestamp: e.addElemName('\x11', name) e.addInt64(v.Int()) case typeOrderKey: if v.Int() == int64(MaxKey) { e.addElemName('\x7F', name) } else { e.addElemName('\xFF', name) } default: i := v.Int() if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 { // It fits into an int32, encode as such. e.addElemName('\x10', name) e.addInt32(int32(i)) } else { e.addElemName('\x12', name) e.addInt64(i) } } case reflect.Bool: e.addElemName('\x08', name) if v.Bool() { e.addBytes(1) } else { e.addBytes(0) } case reflect.Map: e.addElemName('\x03', name) e.addDoc(v) case reflect.Slice: vt := v.Type() et := vt.Elem() if et.Kind() == reflect.Uint8 { e.addElemName('\x05', name) e.addBinary('\x00', v.Bytes()) } else if et == typeDocElem || et == typeRawDocElem { e.addElemName('\x03', name) e.addDoc(v) } else { e.addElemName('\x04', name) e.addDoc(v) } case reflect.Array: et := v.Type().Elem() if et.Kind() == reflect.Uint8 { e.addElemName('\x05', name) if v.CanAddr() { e.addBinary('\x00', v.Slice(0, v.Len()).Interface().([]byte)) } else { n := v.Len() e.addInt32(int32(n)) e.addBytes('\x00') for i := 0; i < n; i++ { el := v.Index(i) e.addBytes(byte(el.Uint())) } } } else { e.addElemName('\x04', name) e.addDoc(v) } case reflect.Struct: switch s := v.Interface().(type) { case Raw: kind := s.Kind if kind == 0x00 { kind = 0x03 } if len(s.Data) == 0 && kind != 0x06 && kind != 0x0A && kind != 0xFF && kind != 0x7F { panic("Attempted to marshal empty Raw document") } e.addElemName(kind, name) e.addBytes(s.Data...) case Binary: e.addElemName('\x05', name) e.addBinary(s.Kind, s.Data) case DBPointer: e.addElemName('\x0C', name) e.addStr(s.Namespace) if len(s.Id) != 12 { panic("ObjectIDs must be exactly 12 bytes long (got " + strconv.Itoa(len(s.Id)) + ")") } e.addBytes([]byte(s.Id)...) case RegEx: e.addElemName('\x0B', name) e.addCStr(s.Pattern) e.addCStr(s.Options) case JavaScript: if s.Scope == nil { e.addElemName('\x0D', name) e.addStr(s.Code) } else { e.addElemName('\x0F', name) start := e.reserveInt32() e.addStr(s.Code) e.addDoc(reflect.ValueOf(s.Scope)) e.setInt32(start, int32(len(e.out)-start)) } case time.Time: // MongoDB handles timestamps as milliseconds. e.addElemName('\x09', name) e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6)) case url.URL: e.addElemName('\x02', name) e.addStr(s.String()) case undefined: e.addElemName('\x06', name) default: e.addElemName('\x03', name) e.addDoc(v) } default: panic("Can't marshal " + v.Type().String() + " in a BSON document") } } // -------------------------------------------------------------------------- // Marshaling of base types. func (e *encoder) addBinary(subtype byte, v []byte) { if subtype == 0x02 { // Wonder how that brilliant idea came to life. Obsolete, luckily. e.addInt32(int32(len(v) + 4)) e.addBytes(subtype) e.addInt32(int32(len(v))) } else { e.addInt32(int32(len(v))) e.addBytes(subtype) } e.addBytes(v...) } func (e *encoder) addStr(v string) { e.addInt32(int32(len(v) + 1)) e.addCStr(v) } func (e *encoder) addCStr(v string) { e.addBytes([]byte(v)...) e.addBytes(0) } func (e *encoder) reserveInt32() (pos int) { pos = len(e.out) e.addBytes(0, 0, 0, 0) return pos } func (e *encoder) setInt32(pos int, v int32) { e.out[pos+0] = byte(v) e.out[pos+1] = byte(v >> 8) e.out[pos+2] = byte(v >> 16) e.out[pos+3] = byte(v >> 24) } func (e *encoder) addInt32(v int32) { u := uint32(v) e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24)) } func (e *encoder) addInt64(v int64) { u := uint64(v) e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24), byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56)) } func (e *encoder) addFloat64(v float64) { e.addInt64(int64(math.Float64bits(v))) } func (e *encoder) addBytes(v ...byte) { e.out = append(e.out, v...) } charm-2.1.1/src/gopkg.in/mgo.v2/export_test.go0000664000175000017500000000111212672604565020125 0ustar marcomarcopackage mgo import ( "time" ) func HackPingDelay(newDelay time.Duration) (restore func()) { globalMutex.Lock() defer globalMutex.Unlock() oldDelay := pingDelay restore = func() { globalMutex.Lock() pingDelay = oldDelay globalMutex.Unlock() } pingDelay = newDelay return } func HackSyncSocketTimeout(newTimeout time.Duration) (restore func()) { globalMutex.Lock() defer globalMutex.Unlock() oldTimeout := syncSocketTimeout restore = func() { globalMutex.Lock() syncSocketTimeout = oldTimeout globalMutex.Unlock() } syncSocketTimeout = newTimeout return } charm-2.1.1/src/gopkg.in/mgo.v2/saslstub.go0000664000175000017500000000030212672604565017405 0ustar marcomarco//+build !sasl package mgo import ( "fmt" ) func saslNew(cred Credential, host string) (saslStepper, error) { return nil, fmt.Errorf("SASL support not enabled during build (-tags sasl)") } charm-2.1.1/src/gopkg.in/mgo.v2/queue_test.go0000664000175000017500000000535112672604565017741 0ustar marcomarco// mgo - MongoDB driver for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo import ( . "gopkg.in/check.v1" ) type QS struct{} var _ = Suite(&QS{}) func (s *QS) TestSequentialGrowth(c *C) { q := queue{} n := 2048 for i := 0; i != n; i++ { q.Push(i) } for i := 0; i != n; i++ { c.Assert(q.Pop(), Equals, i) } } var queueTestLists = [][]int{ // {0, 1, 2, 3, 4, 5, 6, 7, 8, 9} {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, // {8, 9, 10, 11, ... 2, 3, 4, 5, 6, 7} {0, 1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, 10, 11}, // {8, 9, 10, 11, ... 2, 3, 4, 5, 6, 7} {0, 1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11}, // {0, 1, 2, 3, 4, 5, 6, 7, 8} {0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}, } func (s *QS) TestQueueTestLists(c *C) { test := []int{} testi := 0 reset := func() { test = test[0:0] testi = 0 } push := func(i int) { test = append(test, i) } pop := func() (i int) { if testi == len(test) { return -1 } i = test[testi] testi++ return } for _, list := range queueTestLists { reset() q := queue{} for _, n := range list { if n == -1 { c.Assert(q.Pop(), Equals, pop(), Commentf("With list %#v", list)) } else { q.Push(n) push(n) } } for n := pop(); n != -1; n = pop() { c.Assert(q.Pop(), Equals, n, Commentf("With list %#v", list)) } c.Assert(q.Pop(), Equals, nil, Commentf("With list %#v", list)) } } charm-2.1.1/src/gopkg.in/mgo.v2/syscall_windows_test.go0000664000175000017500000000026512672604565022040 0ustar marcomarcopackage mgo_test func stop(pid int) (err error) { panicOnWindows() // Always does. return nil } func cont(pid int) (err error) { panicOnWindows() // Always does. return nil } charm-2.1.1/src/gopkg.in/mgo.v2/gridfs.go0000664000175000017500000005116312672604565017036 0ustar marcomarco// mgo - MongoDB driver for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo import ( "crypto/md5" "encoding/hex" "errors" "hash" "io" "os" "sync" "time" "gopkg.in/mgo.v2/bson" ) type GridFS struct { Files *Collection Chunks *Collection } type gfsFileMode int const ( gfsClosed gfsFileMode = 0 gfsReading gfsFileMode = 1 gfsWriting gfsFileMode = 2 ) type GridFile struct { m sync.Mutex c sync.Cond gfs *GridFS mode gfsFileMode err error chunk int offset int64 wpending int wbuf []byte wsum hash.Hash rbuf []byte rcache *gfsCachedChunk doc gfsFile } type gfsFile struct { Id interface{} "_id" ChunkSize int "chunkSize" UploadDate time.Time "uploadDate" Length int64 ",minsize" MD5 string Filename string ",omitempty" ContentType string "contentType,omitempty" Metadata *bson.Raw ",omitempty" } type gfsChunk struct { Id interface{} "_id" FilesId interface{} "files_id" N int Data []byte } type gfsCachedChunk struct { wait sync.Mutex n int data []byte err error } func newGridFS(db *Database, prefix string) *GridFS { return &GridFS{db.C(prefix + ".files"), db.C(prefix + ".chunks")} } func (gfs *GridFS) newFile() *GridFile { file := &GridFile{gfs: gfs} file.c.L = &file.m //runtime.SetFinalizer(file, finalizeFile) return file } func finalizeFile(file *GridFile) { file.Close() } // Create creates a new file with the provided name in the GridFS. If the file // name already exists, a new version will be inserted with an up-to-date // uploadDate that will cause it to be atomically visible to the Open and // OpenId methods. If the file name is not important, an empty name may be // provided and the file Id used instead. // // It's important to Close files whether they are being written to // or read from, and to check the err result to ensure the operation // completed successfully. // // A simple example inserting a new file: // // func check(err error) { // if err != nil { // panic(err.String()) // } // } // file, err := db.GridFS("fs").Create("myfile.txt") // check(err) // n, err := file.Write([]byte("Hello world!")) // check(err) // err = file.Close() // check(err) // fmt.Printf("%d bytes written\n", n) // // The io.Writer interface is implemented by *GridFile and may be used to // help on the file creation. For example: // // file, err := db.GridFS("fs").Create("myfile.txt") // check(err) // messages, err := os.Open("/var/log/messages") // check(err) // defer messages.Close() // err = io.Copy(file, messages) // check(err) // err = file.Close() // check(err) // func (gfs *GridFS) Create(name string) (file *GridFile, err error) { file = gfs.newFile() file.mode = gfsWriting file.wsum = md5.New() file.doc = gfsFile{Id: bson.NewObjectId(), ChunkSize: 255 * 1024, Filename: name} return } // OpenId returns the file with the provided id, for reading. // If the file isn't found, err will be set to mgo.ErrNotFound. // // It's important to Close files whether they are being written to // or read from, and to check the err result to ensure the operation // completed successfully. // // The following example will print the first 8192 bytes from the file: // // func check(err error) { // if err != nil { // panic(err.String()) // } // } // file, err := db.GridFS("fs").OpenId(objid) // check(err) // b := make([]byte, 8192) // n, err := file.Read(b) // check(err) // fmt.Println(string(b)) // check(err) // err = file.Close() // check(err) // fmt.Printf("%d bytes read\n", n) // // The io.Reader interface is implemented by *GridFile and may be used to // deal with it. As an example, the following snippet will dump the whole // file into the standard output: // // file, err := db.GridFS("fs").OpenId(objid) // check(err) // err = io.Copy(os.Stdout, file) // check(err) // err = file.Close() // check(err) // func (gfs *GridFS) OpenId(id interface{}) (file *GridFile, err error) { var doc gfsFile err = gfs.Files.Find(bson.M{"_id": id}).One(&doc) if err != nil { return } file = gfs.newFile() file.mode = gfsReading file.doc = doc return } // Open returns the most recently uploaded file with the provided // name, for reading. If the file isn't found, err will be set // to mgo.ErrNotFound. // // It's important to Close files whether they are being written to // or read from, and to check the err result to ensure the operation // completed successfully. // // The following example will print the first 8192 bytes from the file: // // file, err := db.GridFS("fs").Open("myfile.txt") // check(err) // b := make([]byte, 8192) // n, err := file.Read(b) // check(err) // fmt.Println(string(b)) // check(err) // err = file.Close() // check(err) // fmt.Printf("%d bytes read\n", n) // // The io.Reader interface is implemented by *GridFile and may be used to // deal with it. As an example, the following snippet will dump the whole // file into the standard output: // // file, err := db.GridFS("fs").Open("myfile.txt") // check(err) // err = io.Copy(os.Stdout, file) // check(err) // err = file.Close() // check(err) // func (gfs *GridFS) Open(name string) (file *GridFile, err error) { var doc gfsFile err = gfs.Files.Find(bson.M{"filename": name}).Sort("-uploadDate").One(&doc) if err != nil { return } file = gfs.newFile() file.mode = gfsReading file.doc = doc return } // OpenNext opens the next file from iter for reading, sets *file to it, // and returns true on the success case. If no more documents are available // on iter or an error occurred, *file is set to nil and the result is false. // Errors will be available via iter.Err(). // // The iter parameter must be an iterator on the GridFS files collection. // Using the GridFS.Find method is an easy way to obtain such an iterator, // but any iterator on the collection will work. // // If the provided *file is non-nil, OpenNext will close it before attempting // to iterate to the next element. This means that in a loop one only // has to worry about closing files when breaking out of the loop early // (break, return, or panic). // // For example: // // gfs := db.GridFS("fs") // query := gfs.Find(nil).Sort("filename") // iter := query.Iter() // var f *mgo.GridFile // for gfs.OpenNext(iter, &f) { // fmt.Printf("Filename: %s\n", f.Name()) // } // if iter.Close() != nil { // panic(iter.Close()) // } // func (gfs *GridFS) OpenNext(iter *Iter, file **GridFile) bool { if *file != nil { // Ignoring the error here shouldn't be a big deal // as we're reading the file and the loop iteration // for this file is finished. _ = (*file).Close() } var doc gfsFile if !iter.Next(&doc) { *file = nil return false } f := gfs.newFile() f.mode = gfsReading f.doc = doc *file = f return true } // Find runs query on GridFS's files collection and returns // the resulting Query. // // This logic: // // gfs := db.GridFS("fs") // iter := gfs.Find(nil).Iter() // // Is equivalent to: // // files := db.C("fs" + ".files") // iter := files.Find(nil).Iter() // func (gfs *GridFS) Find(query interface{}) *Query { return gfs.Files.Find(query) } // RemoveId deletes the file with the provided id from the GridFS. func (gfs *GridFS) RemoveId(id interface{}) error { err := gfs.Files.Remove(bson.M{"_id": id}) if err != nil { return err } _, err = gfs.Chunks.RemoveAll(bson.D{{"files_id", id}}) return err } type gfsDocId struct { Id interface{} "_id" } // Remove deletes all files with the provided name from the GridFS. func (gfs *GridFS) Remove(name string) (err error) { iter := gfs.Files.Find(bson.M{"filename": name}).Select(bson.M{"_id": 1}).Iter() var doc gfsDocId for iter.Next(&doc) { if e := gfs.RemoveId(doc.Id); e != nil { err = e } } if err == nil { err = iter.Close() } return err } func (file *GridFile) assertMode(mode gfsFileMode) { switch file.mode { case mode: return case gfsWriting: panic("GridFile is open for writing") case gfsReading: panic("GridFile is open for reading") case gfsClosed: panic("GridFile is closed") default: panic("internal error: missing GridFile mode") } } // SetChunkSize sets size of saved chunks. Once the file is written to, it // will be split in blocks of that size and each block saved into an // independent chunk document. The default chunk size is 256kb. // // It is a runtime error to call this function once the file has started // being written to. func (file *GridFile) SetChunkSize(bytes int) { file.assertMode(gfsWriting) debugf("GridFile %p: setting chunk size to %d", file, bytes) file.m.Lock() file.doc.ChunkSize = bytes file.m.Unlock() } // Id returns the current file Id. func (file *GridFile) Id() interface{} { return file.doc.Id } // SetId changes the current file Id. // // It is a runtime error to call this function once the file has started // being written to, or when the file is not open for writing. func (file *GridFile) SetId(id interface{}) { file.assertMode(gfsWriting) file.m.Lock() file.doc.Id = id file.m.Unlock() } // Name returns the optional file name. An empty string will be returned // in case it is unset. func (file *GridFile) Name() string { return file.doc.Filename } // SetName changes the optional file name. An empty string may be used to // unset it. // // It is a runtime error to call this function when the file is not open // for writing. func (file *GridFile) SetName(name string) { file.assertMode(gfsWriting) file.m.Lock() file.doc.Filename = name file.m.Unlock() } // ContentType returns the optional file content type. An empty string will be // returned in case it is unset. func (file *GridFile) ContentType() string { return file.doc.ContentType } // ContentType changes the optional file content type. An empty string may be // used to unset it. // // It is a runtime error to call this function when the file is not open // for writing. func (file *GridFile) SetContentType(ctype string) { file.assertMode(gfsWriting) file.m.Lock() file.doc.ContentType = ctype file.m.Unlock() } // GetMeta unmarshals the optional "metadata" field associated with the // file into the result parameter. The meaning of keys under that field // is user-defined. For example: // // result := struct{ INode int }{} // err = file.GetMeta(&result) // if err != nil { // panic(err.String()) // } // fmt.Printf("inode: %d\n", result.INode) // func (file *GridFile) GetMeta(result interface{}) (err error) { file.m.Lock() if file.doc.Metadata != nil { err = bson.Unmarshal(file.doc.Metadata.Data, result) } file.m.Unlock() return } // SetMeta changes the optional "metadata" field associated with the // file. The meaning of keys under that field is user-defined. // For example: // // file.SetMeta(bson.M{"inode": inode}) // // It is a runtime error to call this function when the file is not open // for writing. func (file *GridFile) SetMeta(metadata interface{}) { file.assertMode(gfsWriting) data, err := bson.Marshal(metadata) file.m.Lock() if err != nil && file.err == nil { file.err = err } else { file.doc.Metadata = &bson.Raw{Data: data} } file.m.Unlock() } // Size returns the file size in bytes. func (file *GridFile) Size() (bytes int64) { file.m.Lock() bytes = file.doc.Length file.m.Unlock() return } // MD5 returns the file MD5 as a hex-encoded string. func (file *GridFile) MD5() (md5 string) { return file.doc.MD5 } // UploadDate returns the file upload time. func (file *GridFile) UploadDate() time.Time { return file.doc.UploadDate } // SetUploadDate changes the file upload time. // // It is a runtime error to call this function when the file is not open // for writing. func (file *GridFile) SetUploadDate(t time.Time) { file.assertMode(gfsWriting) file.m.Lock() file.doc.UploadDate = t file.m.Unlock() } // Close flushes any pending changes in case the file is being written // to, waits for any background operations to finish, and closes the file. // // It's important to Close files whether they are being written to // or read from, and to check the err result to ensure the operation // completed successfully. func (file *GridFile) Close() (err error) { file.m.Lock() defer file.m.Unlock() if file.mode == gfsWriting { if len(file.wbuf) > 0 && file.err == nil { file.insertChunk(file.wbuf) file.wbuf = file.wbuf[0:0] } file.completeWrite() } else if file.mode == gfsReading && file.rcache != nil { file.rcache.wait.Lock() file.rcache = nil } file.mode = gfsClosed debugf("GridFile %p: closed", file) return file.err } func (file *GridFile) completeWrite() { for file.wpending > 0 { debugf("GridFile %p: waiting for %d pending chunks to complete file write", file, file.wpending) file.c.Wait() } if file.err == nil { hexsum := hex.EncodeToString(file.wsum.Sum(nil)) if file.doc.UploadDate.IsZero() { file.doc.UploadDate = bson.Now() } file.doc.MD5 = hexsum file.err = file.gfs.Files.Insert(file.doc) file.gfs.Chunks.EnsureIndexKey("files_id", "n") } if file.err != nil { file.gfs.Chunks.RemoveAll(bson.D{{"files_id", file.doc.Id}}) } } // Abort cancels an in-progress write, preventing the file from being // automically created and ensuring previously written chunks are // removed when the file is closed. // // It is a runtime error to call Abort when the file was not opened // for writing. func (file *GridFile) Abort() { if file.mode != gfsWriting { panic("file.Abort must be called on file opened for writing") } file.err = errors.New("write aborted") } // Write writes the provided data to the file and returns the // number of bytes written and an error in case something // wrong happened. // // The file will internally cache the data so that all but the last // chunk sent to the database have the size defined by SetChunkSize. // This also means that errors may be deferred until a future call // to Write or Close. // // The parameters and behavior of this function turn the file // into an io.Writer. func (file *GridFile) Write(data []byte) (n int, err error) { file.assertMode(gfsWriting) file.m.Lock() debugf("GridFile %p: writing %d bytes", file, len(data)) defer file.m.Unlock() if file.err != nil { return 0, file.err } n = len(data) file.doc.Length += int64(n) chunkSize := file.doc.ChunkSize if len(file.wbuf)+len(data) < chunkSize { file.wbuf = append(file.wbuf, data...) return } // First, flush file.wbuf complementing with data. if len(file.wbuf) > 0 { missing := chunkSize - len(file.wbuf) if missing > len(data) { missing = len(data) } file.wbuf = append(file.wbuf, data[:missing]...) data = data[missing:] file.insertChunk(file.wbuf) file.wbuf = file.wbuf[0:0] } // Then, flush all chunks from data without copying. for len(data) > chunkSize { size := chunkSize if size > len(data) { size = len(data) } file.insertChunk(data[:size]) data = data[size:] } // And append the rest for a future call. file.wbuf = append(file.wbuf, data...) return n, file.err } func (file *GridFile) insertChunk(data []byte) { n := file.chunk file.chunk++ debugf("GridFile %p: adding to checksum: %q", file, string(data)) file.wsum.Write(data) for file.doc.ChunkSize*file.wpending >= 1024*1024 { // Hold on.. we got a MB pending. file.c.Wait() if file.err != nil { return } } file.wpending++ debugf("GridFile %p: inserting chunk %d with %d bytes", file, n, len(data)) // We may not own the memory of data, so rather than // simply copying it, we'll marshal the document ahead of time. data, err := bson.Marshal(gfsChunk{bson.NewObjectId(), file.doc.Id, n, data}) if err != nil { file.err = err return } go func() { err := file.gfs.Chunks.Insert(bson.Raw{Data: data}) file.m.Lock() file.wpending-- if err != nil && file.err == nil { file.err = err } file.c.Broadcast() file.m.Unlock() }() } // Seek sets the offset for the next Read or Write on file to // offset, interpreted according to whence: 0 means relative to // the origin of the file, 1 means relative to the current offset, // and 2 means relative to the end. It returns the new offset and // an error, if any. func (file *GridFile) Seek(offset int64, whence int) (pos int64, err error) { file.m.Lock() debugf("GridFile %p: seeking for %s (whence=%d)", file, offset, whence) defer file.m.Unlock() switch whence { case os.SEEK_SET: case os.SEEK_CUR: offset += file.offset case os.SEEK_END: offset += file.doc.Length default: panic("unsupported whence value") } if offset > file.doc.Length { return file.offset, errors.New("seek past end of file") } if offset == file.doc.Length { // If we're seeking to the end of the file, // no need to read anything. This enables // a client to find the size of the file using only the // io.ReadSeeker interface with low overhead. file.offset = offset return file.offset, nil } chunk := int(offset / int64(file.doc.ChunkSize)) if chunk+1 == file.chunk && offset >= file.offset { file.rbuf = file.rbuf[int(offset-file.offset):] file.offset = offset return file.offset, nil } file.offset = offset file.chunk = chunk file.rbuf = nil file.rbuf, err = file.getChunk() if err == nil { file.rbuf = file.rbuf[int(file.offset-int64(chunk)*int64(file.doc.ChunkSize)):] } return file.offset, err } // Read reads into b the next available data from the file and // returns the number of bytes written and an error in case // something wrong happened. At the end of the file, n will // be zero and err will be set to io.EOF. // // The parameters and behavior of this function turn the file // into an io.Reader. func (file *GridFile) Read(b []byte) (n int, err error) { file.assertMode(gfsReading) file.m.Lock() debugf("GridFile %p: reading at offset %d into buffer of length %d", file, file.offset, len(b)) defer file.m.Unlock() if file.offset == file.doc.Length { return 0, io.EOF } for err == nil { i := copy(b, file.rbuf) n += i file.offset += int64(i) file.rbuf = file.rbuf[i:] if i == len(b) || file.offset == file.doc.Length { break } b = b[i:] file.rbuf, err = file.getChunk() } return n, err } func (file *GridFile) getChunk() (data []byte, err error) { cache := file.rcache file.rcache = nil if cache != nil && cache.n == file.chunk { debugf("GridFile %p: Getting chunk %d from cache", file, file.chunk) cache.wait.Lock() data, err = cache.data, cache.err } else { debugf("GridFile %p: Fetching chunk %d", file, file.chunk) var doc gfsChunk err = file.gfs.Chunks.Find(bson.D{{"files_id", file.doc.Id}, {"n", file.chunk}}).One(&doc) data = doc.Data } file.chunk++ if int64(file.chunk)*int64(file.doc.ChunkSize) < file.doc.Length { // Read the next one in background. cache = &gfsCachedChunk{n: file.chunk} cache.wait.Lock() debugf("GridFile %p: Scheduling chunk %d for background caching", file, file.chunk) // Clone the session to avoid having it closed in between. chunks := file.gfs.Chunks session := chunks.Database.Session.Clone() go func(id interface{}, n int) { defer session.Close() chunks = chunks.With(session) var doc gfsChunk cache.err = chunks.Find(bson.D{{"files_id", id}, {"n", n}}).One(&doc) cache.data = doc.Data cache.wait.Unlock() }(file.doc.Id, file.chunk) file.rcache = cache } debugf("Returning err: %#v", err) return } charm-2.1.1/src/gopkg.in/mgo.v2/cluster.go0000664000175000017500000004542712672604565017247 0ustar marcomarco// mgo - MongoDB driver for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo import ( "errors" "fmt" "net" "strconv" "strings" "sync" "time" "gopkg.in/mgo.v2/bson" ) // --------------------------------------------------------------------------- // Mongo cluster encapsulation. // // A cluster enables the communication with one or more servers participating // in a mongo cluster. This works with individual servers, a replica set, // a replica pair, one or multiple mongos routers, etc. type mongoCluster struct { sync.RWMutex serverSynced sync.Cond userSeeds []string dynaSeeds []string servers mongoServers masters mongoServers references int syncing bool direct bool failFast bool syncCount uint setName string cachedIndex map[string]bool sync chan bool dial dialer } func newCluster(userSeeds []string, direct, failFast bool, dial dialer, setName string) *mongoCluster { cluster := &mongoCluster{ userSeeds: userSeeds, references: 1, direct: direct, failFast: failFast, dial: dial, setName: setName, } cluster.serverSynced.L = cluster.RWMutex.RLocker() cluster.sync = make(chan bool, 1) stats.cluster(+1) go cluster.syncServersLoop() return cluster } // Acquire increases the reference count for the cluster. func (cluster *mongoCluster) Acquire() { cluster.Lock() cluster.references++ debugf("Cluster %p acquired (refs=%d)", cluster, cluster.references) cluster.Unlock() } // Release decreases the reference count for the cluster. Once // it reaches zero, all servers will be closed. func (cluster *mongoCluster) Release() { cluster.Lock() if cluster.references == 0 { panic("cluster.Release() with references == 0") } cluster.references-- debugf("Cluster %p released (refs=%d)", cluster, cluster.references) if cluster.references == 0 { for _, server := range cluster.servers.Slice() { server.Close() } // Wake up the sync loop so it can die. cluster.syncServers() stats.cluster(-1) } cluster.Unlock() } func (cluster *mongoCluster) LiveServers() (servers []string) { cluster.RLock() for _, serv := range cluster.servers.Slice() { servers = append(servers, serv.Addr) } cluster.RUnlock() return servers } func (cluster *mongoCluster) removeServer(server *mongoServer) { cluster.Lock() cluster.masters.Remove(server) other := cluster.servers.Remove(server) cluster.Unlock() if other != nil { other.Close() log("Removed server ", server.Addr, " from cluster.") } server.Close() } type isMasterResult struct { IsMaster bool Secondary bool Primary string Hosts []string Passives []string Tags bson.D Msg string SetName string `bson:"setName"` MaxWireVersion int `bson:"maxWireVersion"` } func (cluster *mongoCluster) isMaster(socket *mongoSocket, result *isMasterResult) error { // Monotonic let's it talk to a slave and still hold the socket. session := newSession(Monotonic, cluster, 10*time.Second) session.setSocket(socket) err := session.Run("ismaster", result) session.Close() return err } type possibleTimeout interface { Timeout() bool } var syncSocketTimeout = 5 * time.Second func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerInfo, hosts []string, err error) { var syncTimeout time.Duration if raceDetector { // This variable is only ever touched by tests. globalMutex.Lock() syncTimeout = syncSocketTimeout globalMutex.Unlock() } else { syncTimeout = syncSocketTimeout } addr := server.Addr log("SYNC Processing ", addr, "...") // Retry a few times to avoid knocking a server down for a hiccup. var result isMasterResult var tryerr error for retry := 0; ; retry++ { if retry == 3 || retry == 1 && cluster.failFast { return nil, nil, tryerr } if retry > 0 { // Don't abuse the server needlessly if there's something actually wrong. if err, ok := tryerr.(possibleTimeout); ok && err.Timeout() { // Give a chance for waiters to timeout as well. cluster.serverSynced.Broadcast() } time.Sleep(syncShortDelay) } // It's not clear what would be a good timeout here. Is it // better to wait longer or to retry? socket, _, err := server.AcquireSocket(0, syncTimeout) if err != nil { tryerr = err logf("SYNC Failed to get socket to %s: %v", addr, err) continue } err = cluster.isMaster(socket, &result) socket.Release() if err != nil { tryerr = err logf("SYNC Command 'ismaster' to %s failed: %v", addr, err) continue } debugf("SYNC Result of 'ismaster' from %s: %#v", addr, result) break } if cluster.setName != "" && result.SetName != cluster.setName { logf("SYNC Server %s is not a member of replica set %q", addr, cluster.setName) return nil, nil, fmt.Errorf("server %s is not a member of replica set %q", addr, cluster.setName) } if result.IsMaster { debugf("SYNC %s is a master.", addr) if !server.info.Master { // Made an incorrect assumption above, so fix stats. stats.conn(-1, false) stats.conn(+1, true) } } else if result.Secondary { debugf("SYNC %s is a slave.", addr) } else if cluster.direct { logf("SYNC %s in unknown state. Pretending it's a slave due to direct connection.", addr) } else { logf("SYNC %s is neither a master nor a slave.", addr) // Let stats track it as whatever was known before. return nil, nil, errors.New(addr + " is not a master nor slave") } info = &mongoServerInfo{ Master: result.IsMaster, Mongos: result.Msg == "isdbgrid", Tags: result.Tags, SetName: result.SetName, MaxWireVersion: result.MaxWireVersion, } hosts = make([]string, 0, 1+len(result.Hosts)+len(result.Passives)) if result.Primary != "" { // First in the list to speed up master discovery. hosts = append(hosts, result.Primary) } hosts = append(hosts, result.Hosts...) hosts = append(hosts, result.Passives...) debugf("SYNC %s knows about the following peers: %#v", addr, hosts) return info, hosts, nil } type syncKind bool const ( completeSync syncKind = true partialSync syncKind = false ) func (cluster *mongoCluster) addServer(server *mongoServer, info *mongoServerInfo, syncKind syncKind) { cluster.Lock() current := cluster.servers.Search(server.ResolvedAddr) if current == nil { if syncKind == partialSync { cluster.Unlock() server.Close() log("SYNC Discarding unknown server ", server.Addr, " due to partial sync.") return } cluster.servers.Add(server) if info.Master { cluster.masters.Add(server) log("SYNC Adding ", server.Addr, " to cluster as a master.") } else { log("SYNC Adding ", server.Addr, " to cluster as a slave.") } } else { if server != current { panic("addServer attempting to add duplicated server") } if server.Info().Master != info.Master { if info.Master { log("SYNC Server ", server.Addr, " is now a master.") cluster.masters.Add(server) } else { log("SYNC Server ", server.Addr, " is now a slave.") cluster.masters.Remove(server) } } } server.SetInfo(info) debugf("SYNC Broadcasting availability of server %s", server.Addr) cluster.serverSynced.Broadcast() cluster.Unlock() } func (cluster *mongoCluster) getKnownAddrs() []string { cluster.RLock() max := len(cluster.userSeeds) + len(cluster.dynaSeeds) + cluster.servers.Len() seen := make(map[string]bool, max) known := make([]string, 0, max) add := func(addr string) { if _, found := seen[addr]; !found { seen[addr] = true known = append(known, addr) } } for _, addr := range cluster.userSeeds { add(addr) } for _, addr := range cluster.dynaSeeds { add(addr) } for _, serv := range cluster.servers.Slice() { add(serv.Addr) } cluster.RUnlock() return known } // syncServers injects a value into the cluster.sync channel to force // an iteration of the syncServersLoop function. func (cluster *mongoCluster) syncServers() { select { case cluster.sync <- true: default: } } // How long to wait for a checkup of the cluster topology if nothing // else kicks a synchronization before that. const syncServersDelay = 30 * time.Second const syncShortDelay = 500 * time.Millisecond // syncServersLoop loops while the cluster is alive to keep its idea of // the server topology up-to-date. It must be called just once from // newCluster. The loop iterates once syncServersDelay has passed, or // if somebody injects a value into the cluster.sync channel to force a // synchronization. A loop iteration will contact all servers in // parallel, ask them about known peers and their own role within the // cluster, and then attempt to do the same with all the peers // retrieved. func (cluster *mongoCluster) syncServersLoop() { for { debugf("SYNC Cluster %p is starting a sync loop iteration.", cluster) cluster.Lock() if cluster.references == 0 { cluster.Unlock() break } cluster.references++ // Keep alive while syncing. direct := cluster.direct cluster.Unlock() cluster.syncServersIteration(direct) // We just synchronized, so consume any outstanding requests. select { case <-cluster.sync: default: } cluster.Release() // Hold off before allowing another sync. No point in // burning CPU looking for down servers. if !cluster.failFast { time.Sleep(syncShortDelay) } cluster.Lock() if cluster.references == 0 { cluster.Unlock() break } cluster.syncCount++ // Poke all waiters so they have a chance to timeout or // restart syncing if they wish to. cluster.serverSynced.Broadcast() // Check if we have to restart immediately either way. restart := !direct && cluster.masters.Empty() || cluster.servers.Empty() cluster.Unlock() if restart { log("SYNC No masters found. Will synchronize again.") time.Sleep(syncShortDelay) continue } debugf("SYNC Cluster %p waiting for next requested or scheduled sync.", cluster) // Hold off until somebody explicitly requests a synchronization // or it's time to check for a cluster topology change again. select { case <-cluster.sync: case <-time.After(syncServersDelay): } } debugf("SYNC Cluster %p is stopping its sync loop.", cluster) } func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoServer { cluster.RLock() server := cluster.servers.Search(tcpaddr.String()) cluster.RUnlock() if server != nil { return server } return newServer(addr, tcpaddr, cluster.sync, cluster.dial) } func resolveAddr(addr string) (*net.TCPAddr, error) { // Simple cases that do not need actual resolution. Works with IPv4 and v6. if host, port, err := net.SplitHostPort(addr); err == nil { if port, _ := strconv.Atoi(port); port > 0 { zone := "" if i := strings.LastIndex(host, "%"); i >= 0 { zone = host[i+1:] host = host[:i] } ip := net.ParseIP(host) if ip != nil { return &net.TCPAddr{IP: ip, Port: port, Zone: zone}, nil } } } // Attempt to resolve IPv4 and v6 concurrently. addrChan := make(chan *net.TCPAddr, 2) for _, network := range []string{"udp4", "udp6"} { network := network go func() { // The unfortunate UDP dialing hack allows having a timeout on address resolution. conn, err := net.DialTimeout(network, addr, 10*time.Second) if err != nil { addrChan <- nil } else { addrChan <- (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr)) conn.Close() } }() } // Wait for the result of IPv4 and v6 resolution. Use IPv4 if available. tcpaddr := <-addrChan if tcpaddr == nil || len(tcpaddr.IP) != 4 { var timeout <-chan time.Time if tcpaddr != nil { // Don't wait too long if an IPv6 address is known. timeout = time.After(50 * time.Millisecond) } select { case <-timeout: case tcpaddr2 := <-addrChan: if tcpaddr == nil || tcpaddr2 != nil { // It's an IPv4 address or the only known address. Use it. tcpaddr = tcpaddr2 } } } if tcpaddr == nil { log("SYNC Failed to resolve server address: ", addr) return nil, errors.New("failed to resolve server address: " + addr) } if tcpaddr.String() != addr { debug("SYNC Address ", addr, " resolved as ", tcpaddr.String()) } return tcpaddr, nil } type pendingAdd struct { server *mongoServer info *mongoServerInfo } func (cluster *mongoCluster) syncServersIteration(direct bool) { log("SYNC Starting full topology synchronization...") var wg sync.WaitGroup var m sync.Mutex notYetAdded := make(map[string]pendingAdd) addIfFound := make(map[string]bool) seen := make(map[string]bool) syncKind := partialSync var spawnSync func(addr string, byMaster bool) spawnSync = func(addr string, byMaster bool) { wg.Add(1) go func() { defer wg.Done() tcpaddr, err := resolveAddr(addr) if err != nil { log("SYNC Failed to start sync of ", addr, ": ", err.Error()) return } resolvedAddr := tcpaddr.String() m.Lock() if byMaster { if pending, ok := notYetAdded[resolvedAddr]; ok { delete(notYetAdded, resolvedAddr) m.Unlock() cluster.addServer(pending.server, pending.info, completeSync) return } addIfFound[resolvedAddr] = true } if seen[resolvedAddr] { m.Unlock() return } seen[resolvedAddr] = true m.Unlock() server := cluster.server(addr, tcpaddr) info, hosts, err := cluster.syncServer(server) if err != nil { cluster.removeServer(server) return } m.Lock() add := direct || info.Master || addIfFound[resolvedAddr] if add { syncKind = completeSync } else { notYetAdded[resolvedAddr] = pendingAdd{server, info} } m.Unlock() if add { cluster.addServer(server, info, completeSync) } if !direct { for _, addr := range hosts { spawnSync(addr, info.Master) } } }() } knownAddrs := cluster.getKnownAddrs() for _, addr := range knownAddrs { spawnSync(addr, false) } wg.Wait() if syncKind == completeSync { logf("SYNC Synchronization was complete (got data from primary).") for _, pending := range notYetAdded { cluster.removeServer(pending.server) } } else { logf("SYNC Synchronization was partial (cannot talk to primary).") for _, pending := range notYetAdded { cluster.addServer(pending.server, pending.info, partialSync) } } cluster.Lock() mastersLen := cluster.masters.Len() logf("SYNC Synchronization completed: %d master(s) and %d slave(s) alive.", mastersLen, cluster.servers.Len()-mastersLen) // Update dynamic seeds, but only if we have any good servers. Otherwise, // leave them alone for better chances of a successful sync in the future. if syncKind == completeSync { dynaSeeds := make([]string, cluster.servers.Len()) for i, server := range cluster.servers.Slice() { dynaSeeds[i] = server.Addr } cluster.dynaSeeds = dynaSeeds debugf("SYNC New dynamic seeds: %#v\n", dynaSeeds) } cluster.Unlock() } // AcquireSocket returns a socket to a server in the cluster. If slaveOk is // true, it will attempt to return a socket to a slave server. If it is // false, the socket will necessarily be to a master server. func (cluster *mongoCluster) AcquireSocket(mode Mode, slaveOk bool, syncTimeout time.Duration, socketTimeout time.Duration, serverTags []bson.D, poolLimit int) (s *mongoSocket, err error) { var started time.Time var syncCount uint warnedLimit := false for { cluster.RLock() for { mastersLen := cluster.masters.Len() slavesLen := cluster.servers.Len() - mastersLen debugf("Cluster has %d known masters and %d known slaves.", mastersLen, slavesLen) if !(slaveOk && mode == Secondary) && mastersLen > 0 || slaveOk && slavesLen > 0 { break } if started.IsZero() { // Initialize after fast path above. started = time.Now() syncCount = cluster.syncCount } else if syncTimeout != 0 && started.Before(time.Now().Add(-syncTimeout)) || cluster.failFast && cluster.syncCount != syncCount { cluster.RUnlock() return nil, errors.New("no reachable servers") } log("Waiting for servers to synchronize...") cluster.syncServers() // Remember: this will release and reacquire the lock. cluster.serverSynced.Wait() } var server *mongoServer if slaveOk { server = cluster.servers.BestFit(mode, serverTags) } else { server = cluster.masters.BestFit(mode, nil) } cluster.RUnlock() if server == nil { // Must have failed the requested tags. Sleep to avoid spinning. time.Sleep(1e8) continue } s, abended, err := server.AcquireSocket(poolLimit, socketTimeout) if err == errPoolLimit { if !warnedLimit { warnedLimit = true log("WARNING: Per-server connection limit reached.") } time.Sleep(100 * time.Millisecond) continue } if err != nil { cluster.removeServer(server) cluster.syncServers() continue } if abended && !slaveOk { var result isMasterResult err := cluster.isMaster(s, &result) if err != nil || !result.IsMaster { logf("Cannot confirm server %s as master (%v)", server.Addr, err) s.Release() cluster.syncServers() time.Sleep(100 * time.Millisecond) continue } } return s, nil } panic("unreached") } func (cluster *mongoCluster) CacheIndex(cacheKey string, exists bool) { cluster.Lock() if cluster.cachedIndex == nil { cluster.cachedIndex = make(map[string]bool) } if exists { cluster.cachedIndex[cacheKey] = true } else { delete(cluster.cachedIndex, cacheKey) } cluster.Unlock() } func (cluster *mongoCluster) HasCachedIndex(cacheKey string) (result bool) { cluster.RLock() if cluster.cachedIndex != nil { result = cluster.cachedIndex[cacheKey] } cluster.RUnlock() return } func (cluster *mongoCluster) ResetIndexCache() { cluster.Lock() cluster.cachedIndex = make(map[string]bool) cluster.Unlock() } charm-2.1.1/src/gopkg.in/mgo.v2/socket.go0000664000175000017500000004351612672604565017053 0ustar marcomarco// mgo - MongoDB driver for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo import ( "errors" "fmt" "net" "sync" "time" "gopkg.in/mgo.v2/bson" ) type replyFunc func(err error, reply *replyOp, docNum int, docData []byte) type mongoSocket struct { sync.Mutex server *mongoServer // nil when cached conn net.Conn timeout time.Duration addr string // For debugging only. nextRequestId uint32 replyFuncs map[uint32]replyFunc references int creds []Credential logout []Credential cachedNonce string gotNonce sync.Cond dead error serverInfo *mongoServerInfo } type queryOpFlags uint32 const ( _ queryOpFlags = 1 << iota flagTailable flagSlaveOk flagLogReplay flagNoCursorTimeout flagAwaitData ) type queryOp struct { collection string query interface{} skip int32 limit int32 selector interface{} flags queryOpFlags replyFunc replyFunc mode Mode options queryWrapper hasOptions bool serverTags []bson.D } type queryWrapper struct { Query interface{} "$query" OrderBy interface{} "$orderby,omitempty" Hint interface{} "$hint,omitempty" Explain bool "$explain,omitempty" Snapshot bool "$snapshot,omitempty" ReadPreference bson.D "$readPreference,omitempty" MaxScan int "$maxScan,omitempty" MaxTimeMS int "$maxTimeMS,omitempty" Comment string "$comment,omitempty" } func (op *queryOp) finalQuery(socket *mongoSocket) interface{} { if op.flags&flagSlaveOk != 0 && socket.ServerInfo().Mongos { var modeName string switch op.mode { case Strong: modeName = "primary" case Monotonic, Eventual: modeName = "secondaryPreferred" case PrimaryPreferred: modeName = "primaryPreferred" case Secondary: modeName = "secondary" case SecondaryPreferred: modeName = "secondaryPreferred" case Nearest: modeName = "nearest" default: panic(fmt.Sprintf("unsupported read mode: %d", op.mode)) } op.hasOptions = true op.options.ReadPreference = make(bson.D, 0, 2) op.options.ReadPreference = append(op.options.ReadPreference, bson.DocElem{"mode", modeName}) if len(op.serverTags) > 0 { op.options.ReadPreference = append(op.options.ReadPreference, bson.DocElem{"tags", op.serverTags}) } } if op.hasOptions { if op.query == nil { var empty bson.D op.options.Query = empty } else { op.options.Query = op.query } debugf("final query is %#v\n", &op.options) return &op.options } return op.query } type getMoreOp struct { collection string limit int32 cursorId int64 replyFunc replyFunc } type replyOp struct { flags uint32 cursorId int64 firstDoc int32 replyDocs int32 } type insertOp struct { collection string // "database.collection" documents []interface{} // One or more documents to insert flags uint32 } type updateOp struct { Collection string `bson:"-"` // "database.collection" Selector interface{} `bson:"q"` Update interface{} `bson:"u"` Flags uint32 `bson:"-"` Multi bool `bson:"multi,omitempty"` Upsert bool `bson:"upsert,omitempty"` } type deleteOp struct { collection string // "database.collection" selector interface{} flags uint32 } type killCursorsOp struct { cursorIds []int64 } type requestInfo struct { bufferPos int replyFunc replyFunc } func newSocket(server *mongoServer, conn net.Conn, timeout time.Duration) *mongoSocket { socket := &mongoSocket{ conn: conn, addr: server.Addr, server: server, replyFuncs: make(map[uint32]replyFunc), } socket.gotNonce.L = &socket.Mutex if err := socket.InitialAcquire(server.Info(), timeout); err != nil { panic("newSocket: InitialAcquire returned error: " + err.Error()) } stats.socketsAlive(+1) debugf("Socket %p to %s: initialized", socket, socket.addr) socket.resetNonce() go socket.readLoop() return socket } // Server returns the server that the socket is associated with. // It returns nil while the socket is cached in its respective server. func (socket *mongoSocket) Server() *mongoServer { socket.Lock() server := socket.server socket.Unlock() return server } // ServerInfo returns details for the server at the time the socket // was initially acquired. func (socket *mongoSocket) ServerInfo() *mongoServerInfo { socket.Lock() serverInfo := socket.serverInfo socket.Unlock() return serverInfo } // InitialAcquire obtains the first reference to the socket, either // right after the connection is made or once a recycled socket is // being put back in use. func (socket *mongoSocket) InitialAcquire(serverInfo *mongoServerInfo, timeout time.Duration) error { socket.Lock() if socket.references > 0 { panic("Socket acquired out of cache with references") } if socket.dead != nil { dead := socket.dead socket.Unlock() return dead } socket.references++ socket.serverInfo = serverInfo socket.timeout = timeout stats.socketsInUse(+1) stats.socketRefs(+1) socket.Unlock() return nil } // Acquire obtains an additional reference to the socket. // The socket will only be recycled when it's released as many // times as it's been acquired. func (socket *mongoSocket) Acquire() (info *mongoServerInfo) { socket.Lock() if socket.references == 0 { panic("Socket got non-initial acquire with references == 0") } // We'll track references to dead sockets as well. // Caller is still supposed to release the socket. socket.references++ stats.socketRefs(+1) serverInfo := socket.serverInfo socket.Unlock() return serverInfo } // Release decrements a socket reference. The socket will be // recycled once its released as many times as it's been acquired. func (socket *mongoSocket) Release() { socket.Lock() if socket.references == 0 { panic("socket.Release() with references == 0") } socket.references-- stats.socketRefs(-1) if socket.references == 0 { stats.socketsInUse(-1) server := socket.server socket.Unlock() socket.LogoutAll() // If the socket is dead server is nil. if server != nil { server.RecycleSocket(socket) } } else { socket.Unlock() } } // SetTimeout changes the timeout used on socket operations. func (socket *mongoSocket) SetTimeout(d time.Duration) { socket.Lock() socket.timeout = d socket.Unlock() } type deadlineType int const ( readDeadline deadlineType = 1 writeDeadline deadlineType = 2 ) func (socket *mongoSocket) updateDeadline(which deadlineType) { var when time.Time if socket.timeout > 0 { when = time.Now().Add(socket.timeout) } whichstr := "" switch which { case readDeadline | writeDeadline: whichstr = "read/write" socket.conn.SetDeadline(when) case readDeadline: whichstr = "read" socket.conn.SetReadDeadline(when) case writeDeadline: whichstr = "write" socket.conn.SetWriteDeadline(when) default: panic("invalid parameter to updateDeadline") } debugf("Socket %p to %s: updated %s deadline to %s ahead (%s)", socket, socket.addr, whichstr, socket.timeout, when) } // Close terminates the socket use. func (socket *mongoSocket) Close() { socket.kill(errors.New("Closed explicitly"), false) } func (socket *mongoSocket) kill(err error, abend bool) { socket.Lock() if socket.dead != nil { debugf("Socket %p to %s: killed again: %s (previously: %s)", socket, socket.addr, err.Error(), socket.dead.Error()) socket.Unlock() return } logf("Socket %p to %s: closing: %s (abend=%v)", socket, socket.addr, err.Error(), abend) socket.dead = err socket.conn.Close() stats.socketsAlive(-1) replyFuncs := socket.replyFuncs socket.replyFuncs = make(map[uint32]replyFunc) server := socket.server socket.server = nil socket.gotNonce.Broadcast() socket.Unlock() for _, replyFunc := range replyFuncs { logf("Socket %p to %s: notifying replyFunc of closed socket: %s", socket, socket.addr, err.Error()) replyFunc(err, nil, -1, nil) } if abend { server.AbendSocket(socket) } } func (socket *mongoSocket) SimpleQuery(op *queryOp) (data []byte, err error) { var wait, change sync.Mutex var replyDone bool var replyData []byte var replyErr error wait.Lock() op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { change.Lock() if !replyDone { replyDone = true replyErr = err if err == nil { replyData = docData } } change.Unlock() wait.Unlock() } err = socket.Query(op) if err != nil { return nil, err } wait.Lock() change.Lock() data = replyData err = replyErr change.Unlock() return data, err } func (socket *mongoSocket) Query(ops ...interface{}) (err error) { if lops := socket.flushLogout(); len(lops) > 0 { ops = append(lops, ops...) } buf := make([]byte, 0, 256) // Serialize operations synchronously to avoid interrupting // other goroutines while we can't really be sending data. // Also, record id positions so that we can compute request // ids at once later with the lock already held. requests := make([]requestInfo, len(ops)) requestCount := 0 for _, op := range ops { debugf("Socket %p to %s: serializing op: %#v", socket, socket.addr, op) start := len(buf) var replyFunc replyFunc switch op := op.(type) { case *updateOp: buf = addHeader(buf, 2001) buf = addInt32(buf, 0) // Reserved buf = addCString(buf, op.Collection) buf = addInt32(buf, int32(op.Flags)) debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.Selector) buf, err = addBSON(buf, op.Selector) if err != nil { return err } debugf("Socket %p to %s: serializing update document: %#v", socket, socket.addr, op.Update) buf, err = addBSON(buf, op.Update) if err != nil { return err } case *insertOp: buf = addHeader(buf, 2002) buf = addInt32(buf, int32(op.flags)) buf = addCString(buf, op.collection) for _, doc := range op.documents { debugf("Socket %p to %s: serializing document for insertion: %#v", socket, socket.addr, doc) buf, err = addBSON(buf, doc) if err != nil { return err } } case *queryOp: buf = addHeader(buf, 2004) buf = addInt32(buf, int32(op.flags)) buf = addCString(buf, op.collection) buf = addInt32(buf, op.skip) buf = addInt32(buf, op.limit) buf, err = addBSON(buf, op.finalQuery(socket)) if err != nil { return err } if op.selector != nil { buf, err = addBSON(buf, op.selector) if err != nil { return err } } replyFunc = op.replyFunc case *getMoreOp: buf = addHeader(buf, 2005) buf = addInt32(buf, 0) // Reserved buf = addCString(buf, op.collection) buf = addInt32(buf, op.limit) buf = addInt64(buf, op.cursorId) replyFunc = op.replyFunc case *deleteOp: buf = addHeader(buf, 2006) buf = addInt32(buf, 0) // Reserved buf = addCString(buf, op.collection) buf = addInt32(buf, int32(op.flags)) debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.selector) buf, err = addBSON(buf, op.selector) if err != nil { return err } case *killCursorsOp: buf = addHeader(buf, 2007) buf = addInt32(buf, 0) // Reserved buf = addInt32(buf, int32(len(op.cursorIds))) for _, cursorId := range op.cursorIds { buf = addInt64(buf, cursorId) } default: panic("internal error: unknown operation type") } setInt32(buf, start, int32(len(buf)-start)) if replyFunc != nil { request := &requests[requestCount] request.replyFunc = replyFunc request.bufferPos = start requestCount++ } } // Buffer is ready for the pipe. Lock, allocate ids, and enqueue. socket.Lock() if socket.dead != nil { dead := socket.dead socket.Unlock() debugf("Socket %p to %s: failing query, already closed: %s", socket, socket.addr, socket.dead.Error()) // XXX This seems necessary in case the session is closed concurrently // with a query being performed, but it's not yet tested: for i := 0; i != requestCount; i++ { request := &requests[i] if request.replyFunc != nil { request.replyFunc(dead, nil, -1, nil) } } return dead } wasWaiting := len(socket.replyFuncs) > 0 // Reserve id 0 for requests which should have no responses. requestId := socket.nextRequestId + 1 if requestId == 0 { requestId++ } socket.nextRequestId = requestId + uint32(requestCount) for i := 0; i != requestCount; i++ { request := &requests[i] setInt32(buf, request.bufferPos+4, int32(requestId)) socket.replyFuncs[requestId] = request.replyFunc requestId++ } debugf("Socket %p to %s: sending %d op(s) (%d bytes)", socket, socket.addr, len(ops), len(buf)) stats.sentOps(len(ops)) socket.updateDeadline(writeDeadline) _, err = socket.conn.Write(buf) if !wasWaiting && requestCount > 0 { socket.updateDeadline(readDeadline) } socket.Unlock() return err } func fill(r net.Conn, b []byte) error { l := len(b) n, err := r.Read(b) for n != l && err == nil { var ni int ni, err = r.Read(b[n:]) n += ni } return err } // Estimated minimum cost per socket: 1 goroutine + memory for the largest // document ever seen. func (socket *mongoSocket) readLoop() { p := make([]byte, 36) // 16 from header + 20 from OP_REPLY fixed fields s := make([]byte, 4) conn := socket.conn // No locking, conn never changes. for { // XXX Handle timeouts, , etc err := fill(conn, p) if err != nil { socket.kill(err, true) return } totalLen := getInt32(p, 0) responseTo := getInt32(p, 8) opCode := getInt32(p, 12) // Don't use socket.server.Addr here. socket is not // locked and socket.server may go away. debugf("Socket %p to %s: got reply (%d bytes)", socket, socket.addr, totalLen) _ = totalLen if opCode != 1 { socket.kill(errors.New("opcode != 1, corrupted data?"), true) return } reply := replyOp{ flags: uint32(getInt32(p, 16)), cursorId: getInt64(p, 20), firstDoc: getInt32(p, 28), replyDocs: getInt32(p, 32), } stats.receivedOps(+1) stats.receivedDocs(int(reply.replyDocs)) socket.Lock() replyFunc, ok := socket.replyFuncs[uint32(responseTo)] if ok { delete(socket.replyFuncs, uint32(responseTo)) } socket.Unlock() if replyFunc != nil && reply.replyDocs == 0 { replyFunc(nil, &reply, -1, nil) } else { for i := 0; i != int(reply.replyDocs); i++ { err := fill(conn, s) if err != nil { if replyFunc != nil { replyFunc(err, nil, -1, nil) } socket.kill(err, true) return } b := make([]byte, int(getInt32(s, 0))) // copy(b, s) in an efficient way. b[0] = s[0] b[1] = s[1] b[2] = s[2] b[3] = s[3] err = fill(conn, b[4:]) if err != nil { if replyFunc != nil { replyFunc(err, nil, -1, nil) } socket.kill(err, true) return } if globalDebug && globalLogger != nil { m := bson.M{} if err := bson.Unmarshal(b, m); err == nil { debugf("Socket %p to %s: received document: %#v", socket, socket.addr, m) } } if replyFunc != nil { replyFunc(nil, &reply, i, b) } // XXX Do bound checking against totalLen. } } socket.Lock() if len(socket.replyFuncs) == 0 { // Nothing else to read for now. Disable deadline. socket.conn.SetReadDeadline(time.Time{}) } else { socket.updateDeadline(readDeadline) } socket.Unlock() // XXX Do bound checking against totalLen. } } var emptyHeader = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} func addHeader(b []byte, opcode int) []byte { i := len(b) b = append(b, emptyHeader...) // Enough for current opcodes. b[i+12] = byte(opcode) b[i+13] = byte(opcode >> 8) return b } func addInt32(b []byte, i int32) []byte { return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24)) } func addInt64(b []byte, i int64) []byte { return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24), byte(i>>32), byte(i>>40), byte(i>>48), byte(i>>56)) } func addCString(b []byte, s string) []byte { b = append(b, []byte(s)...) b = append(b, 0) return b } func addBSON(b []byte, doc interface{}) ([]byte, error) { if doc == nil { return append(b, 5, 0, 0, 0, 0), nil } data, err := bson.Marshal(doc) if err != nil { return b, err } return append(b, data...), nil } func setInt32(b []byte, pos int, i int32) { b[pos] = byte(i) b[pos+1] = byte(i >> 8) b[pos+2] = byte(i >> 16) b[pos+3] = byte(i >> 24) } func getInt32(b []byte, pos int) int32 { return (int32(b[pos+0])) | (int32(b[pos+1]) << 8) | (int32(b[pos+2]) << 16) | (int32(b[pos+3]) << 24) } func getInt64(b []byte, pos int) int64 { return (int64(b[pos+0])) | (int64(b[pos+1]) << 8) | (int64(b[pos+2]) << 16) | (int64(b[pos+3]) << 24) | (int64(b[pos+4]) << 32) | (int64(b[pos+5]) << 40) | (int64(b[pos+6]) << 48) | (int64(b[pos+7]) << 56) } charm-2.1.1/src/gopkg.in/mgo.v2/Makefile0000664000175000017500000000010112672604565016653 0ustar marcomarcostartdb: @testdb/setup.sh start stopdb: @testdb/setup.sh stop charm-2.1.1/src/gopkg.in/mgo.v2/syscall_test.go0000664000175000017500000000033212672604565020261 0ustar marcomarco// +build !windows package mgo_test import ( "syscall" ) func stop(pid int) (err error) { return syscall.Kill(pid, syscall.SIGSTOP) } func cont(pid int) (err error) { return syscall.Kill(pid, syscall.SIGCONT) } charm-2.1.1/src/gopkg.in/mgo.v2/dbtest/0000775000175000017500000000000012672604565016510 5ustar marcomarcocharm-2.1.1/src/gopkg.in/mgo.v2/dbtest/export_test.go0000664000175000017500000000023012672604565021412 0ustar marcomarcopackage dbtest import ( "os" ) func (dbs *DBServer) ProcessTest() *os.Process { if dbs.server == nil { return nil } return dbs.server.Process } charm-2.1.1/src/gopkg.in/mgo.v2/dbtest/dbserver.go0000664000175000017500000001167512672604565020665 0ustar marcomarcopackage dbtest import ( "bytes" "fmt" "net" "os" "os/exec" "strconv" "time" "gopkg.in/mgo.v2" "gopkg.in/tomb.v2" ) // DBServer controls a MongoDB server process to be used within test suites. // // The test server is started when Session is called the first time and should // remain running for the duration of all tests, with the Wipe method being // called between tests (before each of them) to clear stored data. After all tests // are done, the Stop method should be called to stop the test server. // // Before the DBServer is used the SetPath method must be called to define // the location for the database files to be stored. type DBServer struct { session *mgo.Session output bytes.Buffer server *exec.Cmd dbpath string host string tomb tomb.Tomb } // SetPath defines the path to the directory where the database files will be // stored if it is started. The directory path itself is not created or removed // by the test helper. func (dbs *DBServer) SetPath(dbpath string) { dbs.dbpath = dbpath } func (dbs *DBServer) start() { if dbs.server != nil { panic("DBServer already started") } if dbs.dbpath == "" { panic("DBServer.SetPath must be called before using the server") } mgo.SetStats(true) l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { panic("unable to listen on a local address: " + err.Error()) } addr := l.Addr().(*net.TCPAddr) l.Close() dbs.host = addr.String() args := []string{ "--dbpath", dbs.dbpath, "--bind_ip", "127.0.0.1", "--port", strconv.Itoa(addr.Port), "--nssize", "1", "--noprealloc", "--smallfiles", "--nojournal", } dbs.tomb = tomb.Tomb{} dbs.server = exec.Command("mongod", args...) dbs.server.Stdout = &dbs.output dbs.server.Stderr = &dbs.output err = dbs.server.Start() if err != nil { panic(err) } dbs.tomb.Go(dbs.monitor) dbs.Wipe() } func (dbs *DBServer) monitor() error { dbs.server.Process.Wait() if dbs.tomb.Alive() { // Present some debugging information. fmt.Fprintf(os.Stderr, "---- mongod process died unexpectedly:\n") fmt.Fprintf(os.Stderr, "%s", dbs.output.Bytes()) fmt.Fprintf(os.Stderr, "---- mongod processes running right now:\n") cmd := exec.Command("/bin/sh", "-c", "ps auxw | grep mongod") cmd.Stdout = os.Stderr cmd.Stderr = os.Stderr cmd.Run() fmt.Fprintf(os.Stderr, "----------------------------------------\n") panic("mongod process died unexpectedly") } return nil } // Stop stops the test server process, if it is running. // // It's okay to call Stop multiple times. After the test server is // stopped it cannot be restarted. // // All database sessions must be closed before or while the Stop method // is running. Otherwise Stop will panic after a timeout informing that // there is a session leak. func (dbs *DBServer) Stop() { if dbs.session != nil { dbs.checkSessions() if dbs.session != nil { dbs.session.Close() dbs.session = nil } } if dbs.server != nil { dbs.tomb.Kill(nil) dbs.server.Process.Kill() select { case <-dbs.tomb.Dead(): case <-time.After(5 * time.Second): panic("timeout waiting for mongod process to die") } dbs.server = nil } } // Session returns a new session to the server. The returned session // must be closed after the test is done with it. // // The first Session obtained from a DBServer will start it. func (dbs *DBServer) Session() *mgo.Session { if dbs.server == nil { dbs.start() } if dbs.session == nil { mgo.ResetStats() var err error dbs.session, err = mgo.Dial(dbs.host + "/test") if err != nil { panic(err) } } return dbs.session.Copy() } // checkSessions ensures all mgo sessions opened were properly closed. // For slightly faster tests, it may be disabled setting the // environmnet variable CHECK_SESSIONS to 0. func (dbs *DBServer) checkSessions() { if check := os.Getenv("CHECK_SESSIONS"); check == "0" || dbs.server == nil || dbs.session == nil { return } dbs.session.Close() dbs.session = nil for i := 0; i < 100; i++ { stats := mgo.GetStats() if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 { return } time.Sleep(100 * time.Millisecond) } panic("There are mgo sessions still alive.") } // Wipe drops all created databases and their data. // // The MongoDB server remains running if it was prevoiusly running, // or stopped if it was previously stopped. // // All database sessions must be closed before or while the Wipe method // is running. Otherwise Wipe will panic after a timeout informing that // there is a session leak. func (dbs *DBServer) Wipe() { if dbs.server == nil || dbs.session == nil { return } dbs.checkSessions() sessionUnset := dbs.session == nil session := dbs.Session() defer session.Close() if sessionUnset { dbs.session.Close() dbs.session = nil } names, err := session.DatabaseNames() if err != nil { panic(err) } for _, name := range names { switch name { case "admin", "local", "config": default: err = session.DB(name).DropDatabase() if err != nil { panic(err) } } } } charm-2.1.1/src/gopkg.in/mgo.v2/dbtest/dbserver_test.go0000664000175000017500000000415412672604565021716 0ustar marcomarcopackage dbtest_test import ( "os" "testing" "time" . "gopkg.in/check.v1" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/dbtest" ) type M map[string]interface{} func TestAll(t *testing.T) { TestingT(t) } type S struct { oldCheckSessions string } var _ = Suite(&S{}) func (s *S) SetUpTest(c *C) { s.oldCheckSessions = os.Getenv("CHECK_SESSIONS") os.Setenv("CHECK_SESSIONS", "") } func (s *S) TearDownTest(c *C) { os.Setenv("CHECK_SESSIONS", s.oldCheckSessions) } func (s *S) TestWipeData(c *C) { var server dbtest.DBServer server.SetPath(c.MkDir()) defer server.Stop() session := server.Session() err := session.DB("mydb").C("mycoll").Insert(M{"a": 1}) session.Close() c.Assert(err, IsNil) server.Wipe() session = server.Session() names, err := session.DatabaseNames() session.Close() c.Assert(err, IsNil) for _, name := range names { if name != "local" && name != "admin" { c.Fatalf("Wipe should have removed this database: %s", name) } } } func (s *S) TestStop(c *C) { var server dbtest.DBServer server.SetPath(c.MkDir()) defer server.Stop() // Server should not be running. process := server.ProcessTest() c.Assert(process, IsNil) session := server.Session() addr := session.LiveServers()[0] session.Close() // Server should be running now. process = server.ProcessTest() p, err := os.FindProcess(process.Pid) c.Assert(err, IsNil) p.Release() server.Stop() // Server should not be running anymore. session, err = mgo.DialWithTimeout(addr, 500*time.Millisecond) if session != nil { session.Close() c.Fatalf("Stop did not stop the server") } } func (s *S) TestCheckSessions(c *C) { var server dbtest.DBServer server.SetPath(c.MkDir()) defer server.Stop() session := server.Session() defer session.Close() c.Assert(server.Wipe, PanicMatches, "There are mgo sessions still alive.") } func (s *S) TestCheckSessionsDisabled(c *C) { var server dbtest.DBServer server.SetPath(c.MkDir()) defer server.Stop() os.Setenv("CHECK_SESSIONS", "0") // Should not panic, although it looks to Wipe like this session will leak. session := server.Session() defer session.Close() server.Wipe() } charm-2.1.1/src/gopkg.in/mgo.v2/gridfs_test.go0000664000175000017500000003754312672604565020103 0ustar marcomarco// mgo - MongoDB driver for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo_test import ( "io" "os" "time" . "gopkg.in/check.v1" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" ) func (s *S) TestGridFSCreate(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") before := bson.Now() gfs := db.GridFS("fs") file, err := gfs.Create("") c.Assert(err, IsNil) n, err := file.Write([]byte("some data")) c.Assert(err, IsNil) c.Assert(n, Equals, 9) err = file.Close() c.Assert(err, IsNil) after := bson.Now() // Check the file information. result := M{} err = db.C("fs.files").Find(nil).One(result) c.Assert(err, IsNil) fileId, ok := result["_id"].(bson.ObjectId) c.Assert(ok, Equals, true) c.Assert(fileId.Valid(), Equals, true) result["_id"] = "" ud, ok := result["uploadDate"].(time.Time) c.Assert(ok, Equals, true) c.Assert(ud.After(before) && ud.Before(after), Equals, true) result["uploadDate"] = "" expected := M{ "_id": "", "length": 9, "chunkSize": 255 * 1024, "uploadDate": "", "md5": "1e50210a0202497fb79bc38b6ade6c34", } c.Assert(result, DeepEquals, expected) // Check the chunk. result = M{} err = db.C("fs.chunks").Find(nil).One(result) c.Assert(err, IsNil) chunkId, ok := result["_id"].(bson.ObjectId) c.Assert(ok, Equals, true) c.Assert(chunkId.Valid(), Equals, true) result["_id"] = "" expected = M{ "_id": "", "files_id": fileId, "n": 0, "data": []byte("some data"), } c.Assert(result, DeepEquals, expected) // Check that an index was created. indexes, err := db.C("fs.chunks").Indexes() c.Assert(err, IsNil) c.Assert(len(indexes), Equals, 2) c.Assert(indexes[1].Key, DeepEquals, []string{"files_id", "n"}) } func (s *S) TestGridFSFileDetails(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.Create("myfile1.txt") c.Assert(err, IsNil) n, err := file.Write([]byte("some")) c.Assert(err, IsNil) c.Assert(n, Equals, 4) c.Assert(file.Size(), Equals, int64(4)) n, err = file.Write([]byte(" data")) c.Assert(err, IsNil) c.Assert(n, Equals, 5) c.Assert(file.Size(), Equals, int64(9)) id, _ := file.Id().(bson.ObjectId) c.Assert(id.Valid(), Equals, true) c.Assert(file.Name(), Equals, "myfile1.txt") c.Assert(file.ContentType(), Equals, "") var info interface{} err = file.GetMeta(&info) c.Assert(err, IsNil) c.Assert(info, IsNil) file.SetId("myid") file.SetName("myfile2.txt") file.SetContentType("text/plain") file.SetMeta(M{"any": "thing"}) c.Assert(file.Id(), Equals, "myid") c.Assert(file.Name(), Equals, "myfile2.txt") c.Assert(file.ContentType(), Equals, "text/plain") err = file.GetMeta(&info) c.Assert(err, IsNil) c.Assert(info, DeepEquals, bson.M{"any": "thing"}) err = file.Close() c.Assert(err, IsNil) c.Assert(file.MD5(), Equals, "1e50210a0202497fb79bc38b6ade6c34") ud := file.UploadDate() now := time.Now() c.Assert(ud.Before(now), Equals, true) c.Assert(ud.After(now.Add(-3*time.Second)), Equals, true) result := M{} err = db.C("fs.files").Find(nil).One(result) c.Assert(err, IsNil) result["uploadDate"] = "" expected := M{ "_id": "myid", "length": 9, "chunkSize": 255 * 1024, "uploadDate": "", "md5": "1e50210a0202497fb79bc38b6ade6c34", "filename": "myfile2.txt", "contentType": "text/plain", "metadata": M{"any": "thing"}, } c.Assert(result, DeepEquals, expected) } func (s *S) TestGridFSSetUploadDate(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.Create("") c.Assert(err, IsNil) t := time.Date(2014, 1, 1, 1, 1, 1, 0, time.Local) file.SetUploadDate(t) err = file.Close() c.Assert(err, IsNil) // Check the file information. result := M{} err = db.C("fs.files").Find(nil).One(result) c.Assert(err, IsNil) ud := result["uploadDate"].(time.Time) if !ud.Equal(t) { c.Fatalf("want upload date %s, got %s", t, ud) } } func (s *S) TestGridFSCreateWithChunking(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.Create("") c.Assert(err, IsNil) file.SetChunkSize(5) // Smaller than the chunk size. n, err := file.Write([]byte("abc")) c.Assert(err, IsNil) c.Assert(n, Equals, 3) // Boundary in the middle. n, err = file.Write([]byte("defg")) c.Assert(err, IsNil) c.Assert(n, Equals, 4) // Boundary at the end. n, err = file.Write([]byte("hij")) c.Assert(err, IsNil) c.Assert(n, Equals, 3) // Larger than the chunk size, with 3 chunks. n, err = file.Write([]byte("klmnopqrstuv")) c.Assert(err, IsNil) c.Assert(n, Equals, 12) err = file.Close() c.Assert(err, IsNil) // Check the file information. result := M{} err = db.C("fs.files").Find(nil).One(result) c.Assert(err, IsNil) fileId, _ := result["_id"].(bson.ObjectId) c.Assert(fileId.Valid(), Equals, true) result["_id"] = "" result["uploadDate"] = "" expected := M{ "_id": "", "length": 22, "chunkSize": 5, "uploadDate": "", "md5": "44a66044834cbe55040089cabfc102d5", } c.Assert(result, DeepEquals, expected) // Check the chunks. iter := db.C("fs.chunks").Find(nil).Sort("n").Iter() dataChunks := []string{"abcde", "fghij", "klmno", "pqrst", "uv"} for i := 0; ; i++ { result = M{} if !iter.Next(result) { if i != 5 { c.Fatalf("Expected 5 chunks, got %d", i) } break } c.Assert(iter.Close(), IsNil) result["_id"] = "" expected = M{ "_id": "", "files_id": fileId, "n": i, "data": []byte(dataChunks[i]), } c.Assert(result, DeepEquals, expected) } } func (s *S) TestGridFSAbort(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.Create("") c.Assert(err, IsNil) file.SetChunkSize(5) n, err := file.Write([]byte("some data")) c.Assert(err, IsNil) c.Assert(n, Equals, 9) var count int for i := 0; i < 10; i++ { count, err = db.C("fs.chunks").Count() if count > 0 || err != nil { break } } c.Assert(err, IsNil) c.Assert(count, Equals, 1) file.Abort() err = file.Close() c.Assert(err, ErrorMatches, "write aborted") count, err = db.C("fs.chunks").Count() c.Assert(err, IsNil) c.Assert(count, Equals, 0) } func (s *S) TestGridFSCloseConflict(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") db.C("fs.files").EnsureIndex(mgo.Index{Key: []string{"filename"}, Unique: true}) // For a closing-time conflict err = db.C("fs.files").Insert(M{"filename": "foo.txt"}) c.Assert(err, IsNil) gfs := db.GridFS("fs") file, err := gfs.Create("foo.txt") c.Assert(err, IsNil) _, err = file.Write([]byte("some data")) c.Assert(err, IsNil) err = file.Close() c.Assert(mgo.IsDup(err), Equals, true) count, err := db.C("fs.chunks").Count() c.Assert(err, IsNil) c.Assert(count, Equals, 0) } func (s *S) TestGridFSOpenNotFound(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.OpenId("non-existent") c.Assert(err == mgo.ErrNotFound, Equals, true) c.Assert(file, IsNil) file, err = gfs.Open("non-existent") c.Assert(err == mgo.ErrNotFound, Equals, true) c.Assert(file, IsNil) } func (s *S) TestGridFSReadAll(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.Create("") c.Assert(err, IsNil) id := file.Id() file.SetChunkSize(5) n, err := file.Write([]byte("abcdefghijklmnopqrstuv")) c.Assert(err, IsNil) c.Assert(n, Equals, 22) err = file.Close() c.Assert(err, IsNil) file, err = gfs.OpenId(id) c.Assert(err, IsNil) b := make([]byte, 30) n, err = file.Read(b) c.Assert(n, Equals, 22) c.Assert(err, IsNil) n, err = file.Read(b) c.Assert(n, Equals, 0) c.Assert(err == io.EOF, Equals, true) err = file.Close() c.Assert(err, IsNil) } func (s *S) TestGridFSReadChunking(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.Create("") c.Assert(err, IsNil) id := file.Id() file.SetChunkSize(5) n, err := file.Write([]byte("abcdefghijklmnopqrstuv")) c.Assert(err, IsNil) c.Assert(n, Equals, 22) err = file.Close() c.Assert(err, IsNil) file, err = gfs.OpenId(id) c.Assert(err, IsNil) b := make([]byte, 30) // Smaller than the chunk size. n, err = file.Read(b[:3]) c.Assert(err, IsNil) c.Assert(n, Equals, 3) c.Assert(b[:3], DeepEquals, []byte("abc")) // Boundary in the middle. n, err = file.Read(b[:4]) c.Assert(err, IsNil) c.Assert(n, Equals, 4) c.Assert(b[:4], DeepEquals, []byte("defg")) // Boundary at the end. n, err = file.Read(b[:3]) c.Assert(err, IsNil) c.Assert(n, Equals, 3) c.Assert(b[:3], DeepEquals, []byte("hij")) // Larger than the chunk size, with 3 chunks. n, err = file.Read(b) c.Assert(err, IsNil) c.Assert(n, Equals, 12) c.Assert(b[:12], DeepEquals, []byte("klmnopqrstuv")) n, err = file.Read(b) c.Assert(n, Equals, 0) c.Assert(err == io.EOF, Equals, true) err = file.Close() c.Assert(err, IsNil) } func (s *S) TestGridFSOpen(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.Create("myfile.txt") c.Assert(err, IsNil) file.Write([]byte{'1'}) file.Close() file, err = gfs.Create("myfile.txt") c.Assert(err, IsNil) file.Write([]byte{'2'}) file.Close() file, err = gfs.Open("myfile.txt") c.Assert(err, IsNil) defer file.Close() var b [1]byte _, err = file.Read(b[:]) c.Assert(err, IsNil) c.Assert(string(b[:]), Equals, "2") } func (s *S) TestGridFSSeek(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.Create("") c.Assert(err, IsNil) id := file.Id() file.SetChunkSize(5) n, err := file.Write([]byte("abcdefghijklmnopqrstuv")) c.Assert(err, IsNil) c.Assert(n, Equals, 22) err = file.Close() c.Assert(err, IsNil) b := make([]byte, 5) file, err = gfs.OpenId(id) c.Assert(err, IsNil) o, err := file.Seek(3, os.SEEK_SET) c.Assert(err, IsNil) c.Assert(o, Equals, int64(3)) _, err = file.Read(b) c.Assert(err, IsNil) c.Assert(b, DeepEquals, []byte("defgh")) o, err = file.Seek(5, os.SEEK_CUR) c.Assert(err, IsNil) c.Assert(o, Equals, int64(13)) _, err = file.Read(b) c.Assert(err, IsNil) c.Assert(b, DeepEquals, []byte("nopqr")) o, err = file.Seek(0, os.SEEK_END) c.Assert(err, IsNil) c.Assert(o, Equals, int64(22)) n, err = file.Read(b) c.Assert(err, Equals, io.EOF) c.Assert(n, Equals, 0) o, err = file.Seek(-10, os.SEEK_END) c.Assert(err, IsNil) c.Assert(o, Equals, int64(12)) _, err = file.Read(b) c.Assert(err, IsNil) c.Assert(b, DeepEquals, []byte("mnopq")) o, err = file.Seek(8, os.SEEK_SET) c.Assert(err, IsNil) c.Assert(o, Equals, int64(8)) _, err = file.Read(b) c.Assert(err, IsNil) c.Assert(b, DeepEquals, []byte("ijklm")) // Trivial seek forward within same chunk. Already // got the data, shouldn't touch the database. sent := mgo.GetStats().SentOps o, err = file.Seek(1, os.SEEK_CUR) c.Assert(err, IsNil) c.Assert(o, Equals, int64(14)) c.Assert(mgo.GetStats().SentOps, Equals, sent) _, err = file.Read(b) c.Assert(err, IsNil) c.Assert(b, DeepEquals, []byte("opqrs")) // Try seeking past end of file. file.Seek(3, os.SEEK_SET) o, err = file.Seek(23, os.SEEK_SET) c.Assert(err, ErrorMatches, "seek past end of file") c.Assert(o, Equals, int64(3)) } func (s *S) TestGridFSRemoveId(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.Create("myfile.txt") c.Assert(err, IsNil) file.Write([]byte{'1'}) file.Close() file, err = gfs.Create("myfile.txt") c.Assert(err, IsNil) file.Write([]byte{'2'}) id := file.Id() file.Close() err = gfs.RemoveId(id) c.Assert(err, IsNil) file, err = gfs.Open("myfile.txt") c.Assert(err, IsNil) defer file.Close() var b [1]byte _, err = file.Read(b[:]) c.Assert(err, IsNil) c.Assert(string(b[:]), Equals, "1") n, err := db.C("fs.chunks").Find(M{"files_id": id}).Count() c.Assert(err, IsNil) c.Assert(n, Equals, 0) } func (s *S) TestGridFSRemove(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.Create("myfile.txt") c.Assert(err, IsNil) file.Write([]byte{'1'}) file.Close() file, err = gfs.Create("myfile.txt") c.Assert(err, IsNil) file.Write([]byte{'2'}) file.Close() err = gfs.Remove("myfile.txt") c.Assert(err, IsNil) _, err = gfs.Open("myfile.txt") c.Assert(err == mgo.ErrNotFound, Equals, true) n, err := db.C("fs.chunks").Find(nil).Count() c.Assert(err, IsNil) c.Assert(n, Equals, 0) } func (s *S) TestGridFSOpenNext(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.Create("myfile1.txt") c.Assert(err, IsNil) file.Write([]byte{'1'}) file.Close() file, err = gfs.Create("myfile2.txt") c.Assert(err, IsNil) file.Write([]byte{'2'}) file.Close() var f *mgo.GridFile var b [1]byte iter := gfs.Find(nil).Sort("-filename").Iter() ok := gfs.OpenNext(iter, &f) c.Assert(ok, Equals, true) c.Check(f.Name(), Equals, "myfile2.txt") _, err = f.Read(b[:]) c.Assert(err, IsNil) c.Assert(string(b[:]), Equals, "2") ok = gfs.OpenNext(iter, &f) c.Assert(ok, Equals, true) c.Check(f.Name(), Equals, "myfile1.txt") _, err = f.Read(b[:]) c.Assert(err, IsNil) c.Assert(string(b[:]), Equals, "1") ok = gfs.OpenNext(iter, &f) c.Assert(ok, Equals, false) c.Assert(iter.Close(), IsNil) c.Assert(f, IsNil) // Do it again with a more restrictive query to make sure // it's actually taken into account. iter = gfs.Find(bson.M{"filename": "myfile1.txt"}).Iter() ok = gfs.OpenNext(iter, &f) c.Assert(ok, Equals, true) c.Check(f.Name(), Equals, "myfile1.txt") ok = gfs.OpenNext(iter, &f) c.Assert(ok, Equals, false) c.Assert(iter.Close(), IsNil) c.Assert(f, IsNil) } charm-2.1.1/src/gopkg.in/mgo.v2/LICENSE0000664000175000017500000000252412672604565016233 0ustar marcomarcomgo - MongoDB driver for Go Copyright (c) 2010-2013 - Gustavo Niemeyer All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. charm-2.1.1/src/gopkg.in/mgo.v2/testdb/0000775000175000017500000000000012672604565016510 5ustar marcomarcocharm-2.1.1/src/gopkg.in/mgo.v2/testdb/init.js0000664000175000017500000000757312672604565020025 0ustar marcomarco//var settings = {heartbeatSleep: 0.05, heartbeatTimeout: 0.5} var settings = {}; // We know the master of the first set (pri=1), but not of the second. var rs1cfg = {_id: "rs1", members: [{_id: 1, host: "127.0.0.1:40011", priority: 1, tags: {rs1: "a"}}, {_id: 2, host: "127.0.0.1:40012", priority: 0, tags: {rs1: "b"}}, {_id: 3, host: "127.0.0.1:40013", priority: 0, tags: {rs1: "c"}}], settings: settings} var rs2cfg = {_id: "rs2", members: [{_id: 1, host: "127.0.0.1:40021", priority: 1, tags: {rs2: "a"}}, {_id: 2, host: "127.0.0.1:40022", priority: 1, tags: {rs2: "b"}}, {_id: 3, host: "127.0.0.1:40023", priority: 1, tags: {rs2: "c"}}], settings: settings} var rs3cfg = {_id: "rs3", members: [{_id: 1, host: "127.0.0.1:40031", priority: 1, tags: {rs3: "a"}}, {_id: 2, host: "127.0.0.1:40032", priority: 1, tags: {rs3: "b"}}, {_id: 3, host: "127.0.0.1:40033", priority: 1, tags: {rs3: "c"}}], settings: settings} for (var i = 0; i != 60; i++) { try { db1 = new Mongo("127.0.0.1:40001").getDB("admin") db2 = new Mongo("127.0.0.1:40002").getDB("admin") rs1a = new Mongo("127.0.0.1:40011").getDB("admin") rs2a = new Mongo("127.0.0.1:40021").getDB("admin") rs3a = new Mongo("127.0.0.1:40031").getDB("admin") break } catch(err) { print("Can't connect yet...") } sleep(1000) } function hasSSL() { return Boolean(db1.serverBuildInfo().OpenSSLVersion) } rs1a.runCommand({replSetInitiate: rs1cfg}) rs2a.runCommand({replSetInitiate: rs2cfg}) rs3a.runCommand({replSetInitiate: rs3cfg}) function configShards() { cfg1 = new Mongo("127.0.0.1:40201").getDB("admin") cfg1.runCommand({addshard: "127.0.0.1:40001"}) cfg1.runCommand({addshard: "rs1/127.0.0.1:40011"}) cfg2 = new Mongo("127.0.0.1:40202").getDB("admin") cfg2.runCommand({addshard: "rs2/127.0.0.1:40021"}) cfg3 = new Mongo("127.0.0.1:40203").getDB("admin") cfg3.runCommand({addshard: "rs3/127.0.0.1:40031"}) } function configAuth() { var addrs = ["127.0.0.1:40002", "127.0.0.1:40203", "127.0.0.1:40031"] if (hasSSL()) { addrs.push("127.0.0.1:40003") } for (var i in addrs) { var db = new Mongo(addrs[i]).getDB("admin") var v = db.serverBuildInfo().versionArray if (v < [2, 5]) { db.addUser("root", "rapadura") } else { db.createUser({user: "root", pwd: "rapadura", roles: ["root"]}) } db.auth("root", "rapadura") if (v >= [2, 6]) { db.createUser({user: "reader", pwd: "rapadura", roles: ["readAnyDatabase"]}) } else if (v >= [2, 4]) { db.addUser({user: "reader", pwd: "rapadura", roles: ["readAnyDatabase"]}) } else { db.addUser("reader", "rapadura", true) } } } function countHealthy(rs) { var status = rs.runCommand({replSetGetStatus: 1}) var count = 0 var primary = 0 if (typeof status.members != "undefined") { for (var i = 0; i != status.members.length; i++) { var m = status.members[i] if (m.health == 1 && (m.state == 1 || m.state == 2)) { count += 1 if (m.state == 1) { primary = 1 } } } } if (primary == 0) { count = 0 } return count } var totalRSMembers = rs1cfg.members.length + rs2cfg.members.length + rs3cfg.members.length for (var i = 0; i != 60; i++) { var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a) print("Replica sets have", count, "healthy nodes.") if (count == totalRSMembers) { configShards() configAuth() quit(0) } sleep(1000) } print("Replica sets didn't sync up properly.") quit(12) // vim:ts=4:sw=4:et charm-2.1.1/src/gopkg.in/mgo.v2/testdb/wait.js0000664000175000017500000000424412672604565020016 0ustar marcomarco// We know the master of the first set (pri=1), but not of the second. var settings = {} var rs1cfg = {_id: "rs1", members: [{_id: 1, host: "127.0.0.1:40011", priority: 1}, {_id: 2, host: "127.0.0.1:40012", priority: 0}, {_id: 3, host: "127.0.0.1:40013", priority: 0}]} var rs2cfg = {_id: "rs2", members: [{_id: 1, host: "127.0.0.1:40021", priority: 1}, {_id: 2, host: "127.0.0.1:40022", priority: 1}, {_id: 3, host: "127.0.0.1:40023", priority: 0}]} var rs3cfg = {_id: "rs3", members: [{_id: 1, host: "127.0.0.1:40031", priority: 1}, {_id: 2, host: "127.0.0.1:40032", priority: 1}, {_id: 3, host: "127.0.0.1:40033", priority: 1}], settings: settings} for (var i = 0; i != 60; i++) { try { rs1a = new Mongo("127.0.0.1:40011").getDB("admin") rs2a = new Mongo("127.0.0.1:40021").getDB("admin") rs3a = new Mongo("127.0.0.1:40031").getDB("admin") rs3a.auth("root", "rapadura") db1 = new Mongo("127.0.0.1:40001").getDB("admin") db2 = new Mongo("127.0.0.1:40002").getDB("admin") break } catch(err) { print("Can't connect yet...") } sleep(1000) } function countHealthy(rs) { var status = rs.runCommand({replSetGetStatus: 1}) var count = 0 var primary = 0 if (typeof status.members != "undefined") { for (var i = 0; i != status.members.length; i++) { var m = status.members[i] if (m.health == 1 && (m.state == 1 || m.state == 2)) { count += 1 if (m.state == 1) { primary = 1 } } } } if (primary == 0) { count = 0 } return count } var totalRSMembers = rs1cfg.members.length + rs2cfg.members.length + rs3cfg.members.length for (var i = 0; i != 60; i++) { var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a) print("Replica sets have", count, "healthy nodes.") if (count == totalRSMembers) { quit(0) } sleep(1000) } print("Replica sets didn't sync up properly.") quit(12) // vim:ts=4:sw=4:et charm-2.1.1/src/gopkg.in/mgo.v2/testdb/client.pem0000664000175000017500000000626512672604565020502 0ustar marcomarcoTo regenerate the key: openssl req -newkey rsa:2048 -new -x509 -days 36500 -nodes -out server.crt -keyout server.key cat server.key server.crt > server.pem openssl genrsa -out client.key 2048 openssl req -key client.key -new -out client.req openssl x509 -req -in client.req -CA server.crt -CAkey server.key -days 36500 -CAserial file.srl -out client.crt cat client.key client.crt > client.pem -----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAtFIkIZk/h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7 wQidZwLul+cyDfPRDzzo3za4GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJ r4f/tItg0riOEBbLslQDzNTtCAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJ Q6DYEQgCa2BTIWq0Uw3WO20M3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AI KBhAZwa7vND0RaRYqpO9kyZFzh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5 Hx+ftNTXnl/69TnxG44BP8M88ZfDWlpzwpsTXwIDAQABAoIBADzCjOAxZkHfuZyu La0wTHXpkEfXdJ6ltagq5WY7P6MlOYwcRoK152vlhgXzZl9jL6ely4YjRwec0swq KdwezpV4fOGVPmuTuw45bx47HEnr/49ZQ4p9FgF9EYQPofbz53FQc/NaMACJcogv bn+osniw+VMFrOVNmGLiZ5p3Smk8zfXE7GRHO8CL5hpWLWO/aK236yytbfWOjM2f Pr76ICb26TPRNzYaYUEThU6DtgdLU8pLnJ6QKKaDsjn+zqQzRa+Nvc0c0K8gvWwA Afq7t0325+uMSwfpLgCOFldcaZQ5uvteJ0CAVRq1MvStnSHBmMzPlgS+NzsDm6lp QH5+rIkCgYEA5j3jrWsv7TueTNbk8Hr/Zwywc+fA2Ex0pBURBHlHyc6ahSXWSCqo DtvRGX0GDoK1lCfaIf1qb/DLlGaoHpkEeqcNhXQ+hHs+bZAxfbfBY9+ikit5ZTtl QN1tIlhaiyLDnwhkpi/hMw1tiouxJUf84Io61z0sCL4hyZSPCpjn0H0CgYEAyH6F Mwl+bCD3VDL/Dr5WSoOr2B/M3bF5SfvdStwy2IPcDJ716je1Ud/2qFCnKGgqvWhJ +HU15c7CjAWo7/pXq2/pEMD8fDKTYww4Hr4p6duEA7DpbOGkwcUX8u3eknxUWT9F jOSbTCvAxuDOC1K3AElyMxVVTNUrFFe8M84R9gsCgYBXmb6RkdG3WlKde7m5gaLB K4PLZabq5RQQBe/mmtpkfxYtiLrh1FEC7kG9h+MRDExX5V3KRugDVUOv3+shUSjy HbM4ToUm1NloyE78PTj4bfMl2CKlEJcyucy3H5S7kWuKi5/31wnA6d/+sa2huKUP Lai7kgu5+9VRJBPUfV7d5QKBgCnhk/13TDtWH5QtGu5/gBMMskbxTaA5xHZZ8H4E xXJJCRxx0Dje7jduK145itF8AQGT2W/XPC0HJciOHh4TE2EyfWMMjTF8dyFHmimB 28uIGWmT+Q7Pi9UWUMxkOAwtgIksGGE4F+CvexOQPjpLSwL6VKqrGCh2lwsm0J+Z ulLFAoGAKlC93c6XEj1A31c1+usdEhUe9BrmTqtSYLYpDNpeMLdZ3VctrAZuOQPZ 4A4gkkQkqqwZGBYYSEqwqiLU6MsBdHPPZ9u3JXLLOQuh1xGeaKylvHj7qx6iT0Xo I+FkJ6/3JeMgOina/+wlzD4oyQpqR4Mnh+TuLkDfQTgY+Lg0WPk= -----END RSA PRIVATE KEY----- -----BEGIN CERTIFICATE----- MIIDLjCCAhYCAQcwDQYJKoZIhvcNAQELBQAwXDELMAkGA1UEBhMCR08xDDAKBgNV BAgMA01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNl cnZlcjESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTE1MDkyOTA4NDAzMFoYDzIxMTUw OTA1MDg0MDMwWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECAwDTUdPMQwwCgYDVQQH DANNR08xDDAKBgNVBAoMA01HTzEPMA0GA1UECwwGQ2xpZW50MRIwEAYDVQQDDAls b2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0UiQhmT+H 4IIqrn8SMESDzvcl3rwImwUoRIHlmXkovCIZCbvBCJ1nAu6X5zIN89EPPOjfNrgZ 616wPgVV/YEQXp+D7+jTAsE5s8JepRXFdecResmvh/+0i2DSuI4QFsuyVAPM1O0I AQ5EKgr0weZZmsX6lhPD4uYehV4DxDE0i/8aTAlDoNgRCAJrYFMharRTDdY7bQzd 7ZYab/pK/3DSmOKxl/AFJ8Enmcj9w1bsvy0fgAgoGEBnBru80PRFpFiqk72TJkXO Hx7zcYFpegtKPbAreTCModaCnjP//fskCp4XJrkfH5+01NeeX/r1OfEbjgE/wzzx l8NaWnPCmxNfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAFwYpje3dCLDOIHYjd+5 CpFOEb+bJsS4ryqm/NblTjIhCLo58hNpMsBqdJHRbHAFRCOE8fvY8yiWtdHeFZcW DgVRAXfHONLtN7faZaZQnhy/YzOhLfC/8dUMB0gQA8KXhBCPZqQmexE28AfkEO47 PwICAxIWINfjm5VnFMkA3b7bDNLHon/pev2m7HqVQ3pRUJQNK3XgFOdDgRrnuXpR OKAfHORHVGTh1gf1DVwc0oM+0gnkSiJ1VG0n5pE3zhZ24fmZxu6JQ6X515W7APQI /nKVH+f1Fo+ustyTNLt8Bwxi1XmwT7IXwnkVSE9Ff6VejppXRF01V0aaWsa3kU3r z3A= -----END CERTIFICATE----- charm-2.1.1/src/gopkg.in/mgo.v2/testdb/server.pem0000664000175000017500000000566612672604565020536 0ustar marcomarco-----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD9PlbW9OwAX7aB Nc/UkrKCMztP/YFceIlzoNEpWOWyFO09i4LeulN10Obp3zp3XstYSj5PZsJPgzNk mFIYC6f2l4W96F0SVEyvnvGzuPlXVBiPBp0xMGQtC4ogCDpwhI3Uo9TOlRNQqxYi xvH3uwDS3TCIQ+J9E5vud9IwhVCx3P9z0uVjZQ1gj7kaJTzyIMaDbCt2xrdT6XYb YpLH/24TdzmIWSLpt16q4uJaYFnqvF+hot7iCTUg2OJ8qyw2yfaLe4niLhOavc9R ziTHHSYwq5Yrcd2VCwyq2mr74dCYdK+w+tuByOX0fI8mIcOygn7g7ltu1wTnWhBs uHVtkCFjAgMBAAECggEASRAfRc1L+Z+jrAu2doIMdnwJdL6S//bW0UFolyFKw+I9 wC/sBg6D3c3zkS4SVDZJPKPO7mGbVg1oWnGH3eAfCYoV0ACmOY+QwGp/GXcYmRVu MHWcDIEFpelaZHt7QNM9iEfsMd3YwMFblZUIYozVZADk66uKQMPTjS2Muur7qRSi wuVfSmsVZ5afH3B1Tr96BbmPsHrXLjvNpjO44k2wrnnSPQjUL7+YiZPvtnNW8Fby yuo2uoAyjg3+68PYZftOvvNneMsv1uyGlUs6Bk+DVWaqofIztWFdFZyXbHnK2PTk eGQt5EsL+RwIck5eoqd5vSE+KyzhhydL0zcpngVQoQKBgQD/Yelvholbz5NQtSy3 ZoiW1y7hL1BKzvVNHuAMKJ5WOnj5szhjhKxt/wZ+hk0qcAmlV9WAPbf4izbEwPRC tnMBQzf1uBxqqbLL6WZ4YAyGrcX3UrT7GXsGfVT4zJjz7oYSw8aPircecw5V4exB xa4NF+ki8IycXSkHwvW2R56fRwKBgQD92xpxXtte/rUnmENbQmr0aKg7JEfMoih6 MdX+f6mfgjMmqj+L4jPTI8/ql8HEy13SQS1534aDSHO+nBqBK5aHUCRMIgSLnTP9 Xyx9Ngg03SZIkPfykqxQmnZgWkTPMhYS+K1Ao9FGVs8W5jVi7veyAdhHptAcxhP3 IuxvrxVTBQKBgQCluMPiu0snaOwP04HRAZhhSgIB3tIbuXE1OnPpb/JPwmH+p25Q Jig+uN9d+4jXoRyhTv4c2fAoOS6xPwVCxWKbzyLhMTg/fx+ncy4rryhxvRJaDDGl QEO1Ul9xlFMs9/vI8YJIY5uxBrimwpStmbn4hSukoLSeQ1X802bfglpMwQKBgD8z GTY4Y20XBIrDAaHquy32EEwJEEcF6AXj+l7N8bDgfVOW9xMgUb6zH8RL29Xeu5Do 4SWCXL66fvZpbr/R1jwB28eIgJExpgvicfUKSqi+lhVi4hfmJDg8/FOopZDf61b1 ykxZfHSCkDQnRAtJaylKBEpyYUWImtfgPfTgJfLxAoGAc8A/Tl2h/DsdTA+cA5d7 1e0l64m13ObruSWRczyru4hy8Yq6E/K2rOFw8cYCcFpy24NqNlk+2iXPLRpWm2zt 9R497zAPvhK/bfPXjvm0j/VjB44lvRTC9hby/RRMHy9UJk4o/UQaD+1IodxZovvk SruEA1+5bfBRMW0P+h7Qfe4= -----END PRIVATE KEY----- -----BEGIN CERTIFICATE----- MIIDjTCCAnWgAwIBAgIJAMW+wDfcdzC+MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV BAYTAkdPMQwwCgYDVQQIDANNR08xDDAKBgNVBAcMA01HTzEMMAoGA1UECgwDTUdP MQ8wDQYDVQQLDAZTZXJ2ZXIxEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0xNTA5Mjkw ODM0MTBaGA8yMTE1MDkwNTA4MzQxMFowXDELMAkGA1UEBhMCR08xDDAKBgNVBAgM A01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNlcnZl cjESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB CgKCAQEA/T5W1vTsAF+2gTXP1JKygjM7T/2BXHiJc6DRKVjlshTtPYuC3rpTddDm 6d86d17LWEo+T2bCT4MzZJhSGAun9peFvehdElRMr57xs7j5V1QYjwadMTBkLQuK IAg6cISN1KPUzpUTUKsWIsbx97sA0t0wiEPifROb7nfSMIVQsdz/c9LlY2UNYI+5 GiU88iDGg2wrdsa3U+l2G2KSx/9uE3c5iFki6bdequLiWmBZ6rxfoaLe4gk1INji fKssNsn2i3uJ4i4Tmr3PUc4kxx0mMKuWK3HdlQsMqtpq++HQmHSvsPrbgcjl9HyP JiHDsoJ+4O5bbtcE51oQbLh1bZAhYwIDAQABo1AwTjAdBgNVHQ4EFgQUhku/u9Kd OAc1L0OR649vCCuQT+0wHwYDVR0jBBgwFoAUhku/u9KdOAc1L0OR649vCCuQT+0w DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAw7Bgw3hlWXWSZjLhnSOu 2mW/UJ2Sj31unHngmgtXwW/04cyzoULb+qmzPe/Z06QMgGIsku1jFBcu0JabQtUG TyalpfW77tfnvz238CYdImYwE9ZcIGuZGfhs6ySFN9XpW43B8YM7R8wTNPvOcSPw nfjqU6kueN4TTspQg9cKhDss5DcMTIdgJgLbITXhIsrCu6GlKOgtX3HrdMGpQX7s UoMXtZVG8pK32vxKWGTZ6DPqESeKjjq74NbYnB3H5U/kDU2dt7LF90C/Umdr9y+C W2OJb1WBrf6RTcbt8D6d7P9kOfLPOtyn/cbaA/pfXBMQMHqr7XNXzjnaNU+jB7hL yQ== -----END CERTIFICATE----- charm-2.1.1/src/gopkg.in/mgo.v2/testdb/setup.sh0000775000175000017500000000262012672604565020207 0ustar marcomarco#!/bin/sh -e start() { mkdir _testdb cd _testdb mkdir db1 db2 db3 rs1a rs1b rs1c rs2a rs2b rs2c rs3a rs3b rs3c rs4a cfg1 cfg2 cfg3 cp ../testdb/supervisord.conf supervisord.conf cp ../testdb/server.pem server.pem echo keyfile > keyfile chmod 600 keyfile COUNT=$(grep '^\[program' supervisord.conf | wc -l | tr -d ' ') if ! mongod --help | grep -q -- --ssl; then COUNT=$(($COUNT - 1)) fi echo "Running supervisord..." supervisord || ( echo "Supervisord failed executing ($?)" && exit 1 ) echo "Supervisord is up, starting $COUNT processes..." for i in $(seq 10); do RUNNING=$(supervisorctl status | grep RUNNING | wc -l | tr -d ' ') echo "$RUNNING processes running..." if [ x$COUNT = x$RUNNING ]; then echo "Running setup.js with mongo..." mongo --nodb ../testdb/init.js exit 0 fi sleep 1 done echo "Failed to start all processes. Check out what's up at $PWD now!" exit 1 } stop() { if [ -d _testdb ]; then echo "Shutting down test cluster..." (cd _testdb && supervisorctl shutdown) rm -rf _testdb fi } if [ ! -f suite_test.go ]; then echo "This script must be run from within the source directory." exit 1 fi case "$1" in start) start $2 ;; stop) stop $2 ;; esac # vim:ts=4:sw=4:et charm-2.1.1/src/gopkg.in/mgo.v2/testdb/dropall.js0000664000175000017500000000365612672604565020515 0ustar marcomarco var ports = [40001, 40002, 40011, 40012, 40013, 40021, 40022, 40023, 40041, 40101, 40102, 40103, 40201, 40202, 40203] var auth = [40002, 40103, 40203, 40031] var db1 = new Mongo("localhost:40001") if (db1.getDB("admin").serverBuildInfo().OpenSSLVersion) { ports.push(40003) auth.push(40003) } for (var i in ports) { var port = ports[i] var server = "localhost:" + port var mongo = new Mongo("localhost:" + port) var admin = mongo.getDB("admin") for (var j in auth) { if (auth[j] == port) { admin.auth("root", "rapadura") admin.system.users.find().forEach(function(u) { if (u.user == "root" || u.user == "reader") { return; } if (typeof admin.dropUser == "function") { mongo.getDB(u.db).dropUser(u.user); } else { admin.removeUser(u.user); } }) break } } var result = admin.runCommand({"listDatabases": 1}) for (var j = 0; j != 100; j++) { if (typeof result.databases != "undefined" || notMaster(result)) { break } result = admin.runCommand({"listDatabases": 1}) } if (notMaster(result)) { continue } if (typeof result.databases == "undefined") { print("Could not list databases. Command result:") print(JSON.stringify(result)) quit(12) } var dbs = result.databases for (var j = 0; j != dbs.length; j++) { var db = dbs[j] switch (db.name) { case "admin": case "local": case "config": break default: mongo.getDB(db.name).dropDatabase() } } } function notMaster(result) { return typeof result.errmsg != "undefined" && (result.errmsg.indexOf("not master") >= 0 || result.errmsg.indexOf("no master found")) } // vim:ts=4:sw=4:et charm-2.1.1/src/gopkg.in/mgo.v2/testdb/supervisord.conf0000664000175000017500000001035012672604565021743 0ustar marcomarco[supervisord] logfile = %(here)s/supervisord.log pidfile = %(here)s/supervisord.pid directory = %(here)s #nodaemon = true [inet_http_server] port = 127.0.0.1:9001 [supervisorctl] serverurl = http://127.0.0.1:9001 [rpcinterface:supervisor] supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface [program:db1] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db1 --bind_ip=127.0.0.1,::1 --port 40001 --ipv6 [program:db2] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db2 --bind_ip=127.0.0.1 --port 40002 --auth [program:db3] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --dbpath %(here)s/db3 --bind_ip=127.0.0.1 --port 40003 --auth --sslMode preferSSL --sslCAFile %(here)s/server.pem --sslPEMKeyFile %(here)s/server.pem [program:rs1a] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(here)s/rs1a --bind_ip=127.0.0.1 --port 40011 [program:rs1b] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(here)s/rs1b --bind_ip=127.0.0.1 --port 40012 [program:rs1c] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(here)s/rs1c --bind_ip=127.0.0.1 --port 40013 [program:rs2a] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(here)s/rs2a --bind_ip=127.0.0.1 --port 40021 [program:rs2b] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(here)s/rs2b --bind_ip=127.0.0.1 --port 40022 [program:rs2c] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(here)s/rs2c --bind_ip=127.0.0.1 --port 40023 [program:rs3a] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(here)s/rs3a --bind_ip=127.0.0.1 --port 40031 --auth --keyFile=%(here)s/keyfile [program:rs3b] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(here)s/rs3b --bind_ip=127.0.0.1 --port 40032 --auth --keyFile=%(here)s/keyfile [program:rs3c] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(here)s/rs3c --bind_ip=127.0.0.1 --port 40033 --auth --keyFile=%(here)s/keyfile [program:rs4a] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs4 --dbpath %(here)s/rs4a --bind_ip=127.0.0.1 --port 40041 [program:cfg1] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg1 --bind_ip=127.0.0.1 --port 40101 [program:cfg2] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg2 --bind_ip=127.0.0.1 --port 40102 [program:cfg3] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg3 --bind_ip=127.0.0.1 --port 40103 --auth --keyFile=%(here)s/keyfile [program:s1] command = mongos --configdb 127.0.0.1:40101 --bind_ip=127.0.0.1 --port 40201 --chunkSize 1 [program:s2] command = mongos --configdb 127.0.0.1:40102 --bind_ip=127.0.0.1 --port 40202 --chunkSize 1 [program:s3] command = mongos --configdb 127.0.0.1:40103 --bind_ip=127.0.0.1 --port 40203 --chunkSize 1 --keyFile=%(here)s/keyfile charm-2.1.1/src/gopkg.in/mgo.v2/raceoff.go0000664000175000017500000000007112672604565017155 0ustar marcomarco// +build !race package mgo const raceDetector = false charm-2.1.1/src/gopkg.in/mgo.v2/log.go0000664000175000017500000000750712672604565016344 0ustar marcomarco// mgo - MongoDB driver for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo import ( "fmt" "sync" ) // --------------------------------------------------------------------------- // Logging integration. // Avoid importing the log type information unnecessarily. There's a small cost // associated with using an interface rather than the type. Depending on how // often the logger is plugged in, it would be worth using the type instead. type log_Logger interface { Output(calldepth int, s string) error } var ( globalLogger log_Logger globalDebug bool globalMutex sync.Mutex ) // RACE WARNING: There are known data races when logging, which are manually // silenced when the race detector is in use. These data races won't be // observed in typical use, because logging is supposed to be set up once when // the application starts. Having raceDetector as a constant, the compiler // should elide the locks altogether in actual use. // Specify the *log.Logger object where log messages should be sent to. func SetLogger(logger log_Logger) { if raceDetector { globalMutex.Lock() defer globalMutex.Unlock() } globalLogger = logger } // Enable the delivery of debug messages to the logger. Only meaningful // if a logger is also set. func SetDebug(debug bool) { if raceDetector { globalMutex.Lock() defer globalMutex.Unlock() } globalDebug = debug } func log(v ...interface{}) { if raceDetector { globalMutex.Lock() defer globalMutex.Unlock() } if globalLogger != nil { globalLogger.Output(2, fmt.Sprint(v...)) } } func logln(v ...interface{}) { if raceDetector { globalMutex.Lock() defer globalMutex.Unlock() } if globalLogger != nil { globalLogger.Output(2, fmt.Sprintln(v...)) } } func logf(format string, v ...interface{}) { if raceDetector { globalMutex.Lock() defer globalMutex.Unlock() } if globalLogger != nil { globalLogger.Output(2, fmt.Sprintf(format, v...)) } } func debug(v ...interface{}) { if raceDetector { globalMutex.Lock() defer globalMutex.Unlock() } if globalDebug && globalLogger != nil { globalLogger.Output(2, fmt.Sprint(v...)) } } func debugln(v ...interface{}) { if raceDetector { globalMutex.Lock() defer globalMutex.Unlock() } if globalDebug && globalLogger != nil { globalLogger.Output(2, fmt.Sprintln(v...)) } } func debugf(format string, v ...interface{}) { if raceDetector { globalMutex.Lock() defer globalMutex.Unlock() } if globalDebug && globalLogger != nil { globalLogger.Output(2, fmt.Sprintf(format, v...)) } } charm-2.1.1/src/gopkg.in/mgo.v2/auth_test.go0000664000175000017500000007607612672604565017572 0ustar marcomarco// mgo - MongoDB driver for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo_test import ( "crypto/tls" "flag" "fmt" "io/ioutil" "net" "net/url" "os" "runtime" "sync" "time" . "gopkg.in/check.v1" "gopkg.in/mgo.v2" ) func (s *S) TestAuthLoginDatabase(c *C) { // Test both with a normal database and with an authenticated shard. for _, addr := range []string{"localhost:40002", "localhost:40203"} { session, err := mgo.Dial(addr) c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") admindb := session.DB("admin") err = admindb.Login("root", "wrong") c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) err = coll.Insert(M{"n": 1}) c.Assert(err, IsNil) } } func (s *S) TestAuthLoginSession(c *C) { // Test both with a normal database and with an authenticated shard. for _, addr := range []string{"localhost:40002", "localhost:40203"} { session, err := mgo.Dial(addr) c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") cred := mgo.Credential{ Username: "root", Password: "wrong", } err = session.Login(&cred) c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") cred.Password = "rapadura" err = session.Login(&cred) c.Assert(err, IsNil) err = coll.Insert(M{"n": 1}) c.Assert(err, IsNil) } } func (s *S) TestAuthLoginLogout(c *C) { // Test both with a normal database and with an authenticated shard. for _, addr := range []string{"localhost:40002", "localhost:40203"} { session, err := mgo.Dial(addr) c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) admindb.Logout() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") // Must have dropped auth from the session too. session = session.Copy() defer session.Close() coll = session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") } } func (s *S) TestAuthLoginLogoutAll(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) session.LogoutAll() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") // Must have dropped auth from the session too. session = session.Copy() defer session.Close() coll = session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") } func (s *S) TestAuthUpsertUserErrors(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) mydb := session.DB("mydb") err = mydb.UpsertUser(&mgo.User{}) c.Assert(err, ErrorMatches, "user has no Username") err = mydb.UpsertUser(&mgo.User{Username: "user", Password: "pass", UserSource: "source"}) c.Assert(err, ErrorMatches, "user has both Password/PasswordHash and UserSource set") err = mydb.UpsertUser(&mgo.User{Username: "user", Password: "pass", OtherDBRoles: map[string][]mgo.Role{"db": nil}}) c.Assert(err, ErrorMatches, "user with OtherDBRoles is only supported in the admin or \\$external databases") } func (s *S) TestAuthUpsertUser(c *C) { if !s.versionAtLeast(2, 4) { c.Skip("UpsertUser only works on 2.4+") } session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) mydb := session.DB("mydb") ruser := &mgo.User{ Username: "myruser", Password: "mypass", Roles: []mgo.Role{mgo.RoleRead}, } rwuser := &mgo.User{ Username: "myrwuser", Password: "mypass", Roles: []mgo.Role{mgo.RoleReadWrite}, } err = mydb.UpsertUser(ruser) c.Assert(err, IsNil) err = mydb.UpsertUser(rwuser) c.Assert(err, IsNil) err = mydb.Login("myruser", "mypass") c.Assert(err, IsNil) admindb.Logout() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") err = mydb.Login("myrwuser", "mypass") c.Assert(err, IsNil) err = coll.Insert(M{"n": 1}) c.Assert(err, IsNil) myotherdb := session.DB("myotherdb") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) // Test UserSource. rwuserother := &mgo.User{ Username: "myrwuser", UserSource: "mydb", Roles: []mgo.Role{mgo.RoleRead}, } err = myotherdb.UpsertUser(rwuserother) if s.versionAtLeast(2, 6) { c.Assert(err, ErrorMatches, `MongoDB 2.6\+ does not support the UserSource setting`) return } c.Assert(err, IsNil) admindb.Logout() // Test indirection via UserSource: we can't write to it, because // the roles for myrwuser are different there. othercoll := myotherdb.C("myothercoll") err = othercoll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") // Reading works, though. err = othercoll.Find(nil).One(nil) c.Assert(err, Equals, mgo.ErrNotFound) // Can't login directly into the database using UserSource, though. err = myotherdb.Login("myrwuser", "mypass") c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") } func (s *S) TestAuthUpsertUserOtherDBRoles(c *C) { if !s.versionAtLeast(2, 4) { c.Skip("UpsertUser only works on 2.4+") } session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) ruser := &mgo.User{ Username: "myruser", Password: "mypass", OtherDBRoles: map[string][]mgo.Role{"mydb": []mgo.Role{mgo.RoleRead}}, } err = admindb.UpsertUser(ruser) c.Assert(err, IsNil) defer admindb.RemoveUser("myruser") admindb.Logout() err = admindb.Login("myruser", "mypass") coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") err = coll.Find(nil).One(nil) c.Assert(err, Equals, mgo.ErrNotFound) } func (s *S) TestAuthUpsertUserUpdates(c *C) { if !s.versionAtLeast(2, 4) { c.Skip("UpsertUser only works on 2.4+") } session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) mydb := session.DB("mydb") // Insert a user that can read. user := &mgo.User{ Username: "myruser", Password: "mypass", Roles: []mgo.Role{mgo.RoleRead}, } err = mydb.UpsertUser(user) c.Assert(err, IsNil) // Now update the user password. user = &mgo.User{ Username: "myruser", Password: "mynewpass", } err = mydb.UpsertUser(user) c.Assert(err, IsNil) // Login with the new user. usession, err := mgo.Dial("myruser:mynewpass@localhost:40002/mydb") c.Assert(err, IsNil) defer usession.Close() // Can read, but not write. err = usession.DB("mydb").C("mycoll").Find(nil).One(nil) c.Assert(err, Equals, mgo.ErrNotFound) err = usession.DB("mydb").C("mycoll").Insert(M{"ok": 1}) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") // Update the user role. user = &mgo.User{ Username: "myruser", Roles: []mgo.Role{mgo.RoleReadWrite}, } err = mydb.UpsertUser(user) c.Assert(err, IsNil) // Dial again to ensure the password hasn't changed. usession, err = mgo.Dial("myruser:mynewpass@localhost:40002/mydb") c.Assert(err, IsNil) defer usession.Close() // Now it can write. err = usession.DB("mydb").C("mycoll").Insert(M{"ok": 1}) c.Assert(err, IsNil) } func (s *S) TestAuthAddUser(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) mydb := session.DB("mydb") err = mydb.AddUser("myruser", "mypass", true) c.Assert(err, IsNil) err = mydb.AddUser("mywuser", "mypass", false) c.Assert(err, IsNil) err = mydb.Login("myruser", "mypass") c.Assert(err, IsNil) admindb.Logout() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") err = mydb.Login("mywuser", "mypass") c.Assert(err, IsNil) err = coll.Insert(M{"n": 1}) c.Assert(err, IsNil) } func (s *S) TestAuthAddUserReplaces(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) mydb := session.DB("mydb") err = mydb.AddUser("myuser", "myoldpass", false) c.Assert(err, IsNil) err = mydb.AddUser("myuser", "mynewpass", true) c.Assert(err, IsNil) admindb.Logout() err = mydb.Login("myuser", "myoldpass") c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") err = mydb.Login("myuser", "mynewpass") c.Assert(err, IsNil) // ReadOnly flag was changed too. err = mydb.C("mycoll").Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") } func (s *S) TestAuthRemoveUser(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) mydb := session.DB("mydb") err = mydb.AddUser("myuser", "mypass", true) c.Assert(err, IsNil) err = mydb.RemoveUser("myuser") c.Assert(err, IsNil) err = mydb.RemoveUser("myuser") c.Assert(err, Equals, mgo.ErrNotFound) err = mydb.Login("myuser", "mypass") c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") } func (s *S) TestAuthLoginTwiceDoesNothing(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) oldStats := mgo.GetStats() err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) newStats := mgo.GetStats() c.Assert(newStats.SentOps, Equals, oldStats.SentOps) } func (s *S) TestAuthLoginLogoutLoginDoesNothing(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) oldStats := mgo.GetStats() admindb.Logout() err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) newStats := mgo.GetStats() c.Assert(newStats.SentOps, Equals, oldStats.SentOps) } func (s *S) TestAuthLoginSwitchUser(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, IsNil) err = admindb.Login("reader", "rapadura") c.Assert(err, IsNil) // Can't write. err = coll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") // But can read. result := struct{ N int }{} err = coll.Find(nil).One(&result) c.Assert(err, IsNil) c.Assert(result.N, Equals, 1) } func (s *S) TestAuthLoginChangePassword(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) mydb := session.DB("mydb") err = mydb.AddUser("myuser", "myoldpass", false) c.Assert(err, IsNil) err = mydb.Login("myuser", "myoldpass") c.Assert(err, IsNil) err = mydb.AddUser("myuser", "mynewpass", true) c.Assert(err, IsNil) err = mydb.Login("myuser", "mynewpass") c.Assert(err, IsNil) admindb.Logout() // The second login must be in effect, which means read-only. err = mydb.C("mycoll").Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") } func (s *S) TestAuthLoginCachingWithSessionRefresh(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) session.Refresh() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, IsNil) } func (s *S) TestAuthLoginCachingWithSessionCopy(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) session = session.Copy() defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, IsNil) } func (s *S) TestAuthLoginCachingWithSessionClone(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) session = session.Clone() defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, IsNil) } func (s *S) TestAuthLoginCachingWithNewSession(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) session = session.New() defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") } func (s *S) TestAuthLoginCachingAcrossPool(c *C) { // Logins are cached even when the conenction goes back // into the pool. session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) // Add another user to test the logout case at the same time. mydb := session.DB("mydb") err = mydb.AddUser("myuser", "mypass", false) c.Assert(err, IsNil) err = mydb.Login("myuser", "mypass") c.Assert(err, IsNil) // Logout root explicitly, to test both cases. admindb.Logout() // Give socket back to pool. session.Refresh() // Brand new session, should use socket from the pool. other := session.New() defer other.Close() oldStats := mgo.GetStats() err = other.DB("admin").Login("root", "rapadura") c.Assert(err, IsNil) err = other.DB("mydb").Login("myuser", "mypass") c.Assert(err, IsNil) // Both logins were cached, so no ops. newStats := mgo.GetStats() c.Assert(newStats.SentOps, Equals, oldStats.SentOps) // And they actually worked. err = other.DB("mydb").C("mycoll").Insert(M{"n": 1}) c.Assert(err, IsNil) other.DB("admin").Logout() err = other.DB("mydb").C("mycoll").Insert(M{"n": 1}) c.Assert(err, IsNil) } func (s *S) TestAuthLoginCachingAcrossPoolWithLogout(c *C) { // Now verify that logouts are properly flushed if they // are not revalidated after leaving the pool. session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) // Add another user to test the logout case at the same time. mydb := session.DB("mydb") err = mydb.AddUser("myuser", "mypass", true) c.Assert(err, IsNil) err = mydb.Login("myuser", "mypass") c.Assert(err, IsNil) // Just some data to query later. err = session.DB("mydb").C("mycoll").Insert(M{"n": 1}) c.Assert(err, IsNil) // Give socket back to pool. session.Refresh() // Brand new session, should use socket from the pool. other := session.New() defer other.Close() oldStats := mgo.GetStats() err = other.DB("mydb").Login("myuser", "mypass") c.Assert(err, IsNil) // Login was cached, so no ops. newStats := mgo.GetStats() c.Assert(newStats.SentOps, Equals, oldStats.SentOps) // Can't write, since root has been implicitly logged out // when the collection went into the pool, and not revalidated. err = other.DB("mydb").C("mycoll").Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") // But can read due to the revalidated myuser login. result := struct{ N int }{} err = other.DB("mydb").C("mycoll").Find(nil).One(&result) c.Assert(err, IsNil) c.Assert(result.N, Equals, 1) } func (s *S) TestAuthEventual(c *C) { // Eventual sessions don't keep sockets around, so they are // an interesting test case. session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) err = session.DB("mydb").C("mycoll").Insert(M{"n": 1}) c.Assert(err, IsNil) var wg sync.WaitGroup wg.Add(20) for i := 0; i != 10; i++ { go func() { defer wg.Done() var result struct{ N int } err := session.DB("mydb").C("mycoll").Find(nil).One(&result) c.Assert(err, IsNil) c.Assert(result.N, Equals, 1) }() } for i := 0; i != 10; i++ { go func() { defer wg.Done() err := session.DB("mydb").C("mycoll").Insert(M{"n": 1}) c.Assert(err, IsNil) }() } wg.Wait() } func (s *S) TestAuthURL(c *C) { session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002/") c.Assert(err, IsNil) defer session.Close() err = session.DB("mydb").C("mycoll").Insert(M{"n": 1}) c.Assert(err, IsNil) } func (s *S) TestAuthURLWrongCredentials(c *C) { session, err := mgo.Dial("mongodb://root:wrong@localhost:40002/") if session != nil { session.Close() } c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") c.Assert(session, IsNil) } func (s *S) TestAuthURLWithNewSession(c *C) { // When authentication is in the URL, the new session will // actually carry it on as well, even if logged out explicitly. session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002/") c.Assert(err, IsNil) defer session.Close() session.DB("admin").Logout() // Do it twice to ensure it passes the needed data on. session = session.New() defer session.Close() session = session.New() defer session.Close() err = session.DB("mydb").C("mycoll").Insert(M{"n": 1}) c.Assert(err, IsNil) } func (s *S) TestAuthURLWithDatabase(c *C) { session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002") c.Assert(err, IsNil) defer session.Close() mydb := session.DB("mydb") err = mydb.AddUser("myruser", "mypass", true) c.Assert(err, IsNil) // Test once with database, and once with source. for i := 0; i < 2; i++ { var url string if i == 0 { url = "mongodb://myruser:mypass@localhost:40002/mydb" } else { url = "mongodb://myruser:mypass@localhost:40002/admin?authSource=mydb" } usession, err := mgo.Dial(url) c.Assert(err, IsNil) defer usession.Close() ucoll := usession.DB("mydb").C("mycoll") err = ucoll.FindId(0).One(nil) c.Assert(err, Equals, mgo.ErrNotFound) err = ucoll.Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") } } func (s *S) TestDefaultDatabase(c *C) { tests := []struct{ url, db string }{ {"mongodb://root:rapadura@localhost:40002", "test"}, {"mongodb://root:rapadura@localhost:40002/admin", "admin"}, {"mongodb://localhost:40001", "test"}, {"mongodb://localhost:40001/", "test"}, {"mongodb://localhost:40001/mydb", "mydb"}, } for _, test := range tests { session, err := mgo.Dial(test.url) c.Assert(err, IsNil) defer session.Close() c.Logf("test: %#v", test) c.Assert(session.DB("").Name, Equals, test.db) scopy := session.Copy() c.Check(scopy.DB("").Name, Equals, test.db) scopy.Close() } } func (s *S) TestAuthDirect(c *C) { // Direct connections must work to the master and slaves. for _, port := range []string{"40031", "40032", "40033"} { url := fmt.Sprintf("mongodb://root:rapadura@localhost:%s/?connect=direct", port) session, err := mgo.Dial(url) c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, true) var result struct{} err = session.DB("mydb").C("mycoll").Find(nil).One(&result) c.Assert(err, Equals, mgo.ErrNotFound) } } func (s *S) TestAuthDirectWithLogin(c *C) { // Direct connections must work to the master and slaves. for _, port := range []string{"40031", "40032", "40033"} { url := fmt.Sprintf("mongodb://localhost:%s/?connect=direct", port) session, err := mgo.Dial(url) c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, true) session.SetSyncTimeout(3 * time.Second) err = session.DB("admin").Login("root", "rapadura") c.Assert(err, IsNil) var result struct{} err = session.DB("mydb").C("mycoll").Find(nil).One(&result) c.Assert(err, Equals, mgo.ErrNotFound) } } func (s *S) TestAuthScramSha1Cred(c *C) { if !s.versionAtLeast(2, 7, 7) { c.Skip("SCRAM-SHA-1 tests depend on 2.7.7") } cred := &mgo.Credential{ Username: "root", Password: "rapadura", Mechanism: "SCRAM-SHA-1", Source: "admin", } host := "localhost:40002" c.Logf("Connecting to %s...", host) session, err := mgo.Dial(host) c.Assert(err, IsNil) defer session.Close() mycoll := session.DB("admin").C("mycoll") c.Logf("Connected! Testing the need for authentication...") err = mycoll.Find(nil).One(nil) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") c.Logf("Authenticating...") err = session.Login(cred) c.Assert(err, IsNil) c.Logf("Authenticated!") c.Logf("Connected! Testing the need for authentication...") err = mycoll.Find(nil).One(nil) c.Assert(err, Equals, mgo.ErrNotFound) } func (s *S) TestAuthScramSha1URL(c *C) { if !s.versionAtLeast(2, 7, 7) { c.Skip("SCRAM-SHA-1 tests depend on 2.7.7") } host := "localhost:40002" c.Logf("Connecting to %s...", host) session, err := mgo.Dial(fmt.Sprintf("root:rapadura@%s?authMechanism=SCRAM-SHA-1", host)) c.Assert(err, IsNil) defer session.Close() mycoll := session.DB("admin").C("mycoll") c.Logf("Connected! Testing the need for authentication...") err = mycoll.Find(nil).One(nil) c.Assert(err, Equals, mgo.ErrNotFound) } func (s *S) TestAuthX509Cred(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() binfo, err := session.BuildInfo() c.Assert(err, IsNil) if binfo.OpenSSLVersion == "" { c.Skip("server does not support SSL") } clientCertPEM, err := ioutil.ReadFile("testdb/client.pem") c.Assert(err, IsNil) clientCert, err := tls.X509KeyPair(clientCertPEM, clientCertPEM) c.Assert(err, IsNil) tlsConfig := &tls.Config{ // Isolating tests to client certs, don't care about server validation. InsecureSkipVerify: true, Certificates: []tls.Certificate{clientCert}, } var host = "localhost:40003" c.Logf("Connecting to %s...", host) session, err = mgo.DialWithInfo(&mgo.DialInfo{ Addrs: []string{host}, DialServer: func(addr *mgo.ServerAddr) (net.Conn, error) { return tls.Dial("tcp", addr.String(), tlsConfig) }, }) c.Assert(err, IsNil) defer session.Close() err = session.Login(&mgo.Credential{Username: "root", Password: "rapadura"}) c.Assert(err, IsNil) // This needs to be kept in sync with client.pem x509Subject := "CN=localhost,OU=Client,O=MGO,L=MGO,ST=MGO,C=GO" externalDB := session.DB("$external") var x509User mgo.User = mgo.User{ Username: x509Subject, OtherDBRoles: map[string][]mgo.Role{"admin": []mgo.Role{mgo.RoleRoot}}, } err = externalDB.UpsertUser(&x509User) c.Assert(err, IsNil) session.LogoutAll() c.Logf("Connected! Ensuring authentication is required...") names, err := session.DatabaseNames() c.Assert(err, ErrorMatches, "not authorized .*") cred := &mgo.Credential{ Username: x509Subject, Mechanism: "MONGODB-X509", Source: "$external", } c.Logf("Authenticating...") err = session.Login(cred) c.Assert(err, IsNil) c.Logf("Authenticated!") names, err = session.DatabaseNames() c.Assert(err, IsNil) c.Assert(len(names) > 0, Equals, true) } var ( plainFlag = flag.String("plain", "", "Host to test PLAIN authentication against (depends on custom environment)") plainUser = "einstein" plainPass = "password" ) func (s *S) TestAuthPlainCred(c *C) { if *plainFlag == "" { c.Skip("no -plain") } cred := &mgo.Credential{ Username: plainUser, Password: plainPass, Source: "$external", Mechanism: "PLAIN", } c.Logf("Connecting to %s...", *plainFlag) session, err := mgo.Dial(*plainFlag) c.Assert(err, IsNil) defer session.Close() records := session.DB("records").C("records") c.Logf("Connected! Testing the need for authentication...") err = records.Find(nil).One(nil) c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") c.Logf("Authenticating...") err = session.Login(cred) c.Assert(err, IsNil) c.Logf("Authenticated!") c.Logf("Connected! Testing the need for authentication...") err = records.Find(nil).One(nil) c.Assert(err, Equals, mgo.ErrNotFound) } func (s *S) TestAuthPlainURL(c *C) { if *plainFlag == "" { c.Skip("no -plain") } c.Logf("Connecting to %s...", *plainFlag) session, err := mgo.Dial(fmt.Sprintf("%s:%s@%s?authMechanism=PLAIN", url.QueryEscape(plainUser), url.QueryEscape(plainPass), *plainFlag)) c.Assert(err, IsNil) defer session.Close() c.Logf("Connected! Testing the need for authentication...") err = session.DB("records").C("records").Find(nil).One(nil) c.Assert(err, Equals, mgo.ErrNotFound) } var ( kerberosFlag = flag.Bool("kerberos", false, "Test Kerberos authentication (depends on custom environment)") kerberosHost = "ldaptest.10gen.cc" kerberosUser = "drivers@LDAPTEST.10GEN.CC" winKerberosPasswordEnv = "MGO_KERBEROS_PASSWORD" ) // Kerberos has its own suite because it talks to a remote server // that is prepared to authenticate against a kerberos deployment. type KerberosSuite struct{} var _ = Suite(&KerberosSuite{}) func (kerberosSuite *KerberosSuite) SetUpSuite(c *C) { mgo.SetDebug(true) mgo.SetStats(true) } func (kerberosSuite *KerberosSuite) TearDownSuite(c *C) { mgo.SetDebug(false) mgo.SetStats(false) } func (kerberosSuite *KerberosSuite) SetUpTest(c *C) { mgo.SetLogger((*cLogger)(c)) mgo.ResetStats() } func (kerberosSuite *KerberosSuite) TearDownTest(c *C) { mgo.SetLogger(nil) } func (kerberosSuite *KerberosSuite) TestAuthKerberosCred(c *C) { if !*kerberosFlag { c.Skip("no -kerberos") } cred := &mgo.Credential{ Username: kerberosUser, Mechanism: "GSSAPI", } windowsAppendPasswordToCredential(cred) c.Logf("Connecting to %s...", kerberosHost) session, err := mgo.Dial(kerberosHost) c.Assert(err, IsNil) defer session.Close() c.Logf("Connected! Testing the need for authentication...") n, err := session.DB("kerberos").C("test").Find(M{}).Count() c.Assert(err, ErrorMatches, ".*authorized.*") c.Logf("Authenticating...") err = session.Login(cred) c.Assert(err, IsNil) c.Logf("Authenticated!") n, err = session.DB("kerberos").C("test").Find(M{}).Count() c.Assert(err, IsNil) c.Assert(n, Equals, 1) } func (kerberosSuite *KerberosSuite) TestAuthKerberosURL(c *C) { if !*kerberosFlag { c.Skip("no -kerberos") } c.Logf("Connecting to %s...", kerberosHost) connectUri := url.QueryEscape(kerberosUser) + "@" + kerberosHost + "?authMechanism=GSSAPI" if runtime.GOOS == "windows" { connectUri = url.QueryEscape(kerberosUser) + ":" + url.QueryEscape(getWindowsKerberosPassword()) + "@" + kerberosHost + "?authMechanism=GSSAPI" } session, err := mgo.Dial(connectUri) c.Assert(err, IsNil) defer session.Close() n, err := session.DB("kerberos").C("test").Find(M{}).Count() c.Assert(err, IsNil) c.Assert(n, Equals, 1) } func (kerberosSuite *KerberosSuite) TestAuthKerberosServiceName(c *C) { if !*kerberosFlag { c.Skip("no -kerberos") } wrongServiceName := "wrong" rightServiceName := "mongodb" cred := &mgo.Credential{ Username: kerberosUser, Mechanism: "GSSAPI", Service: wrongServiceName, } windowsAppendPasswordToCredential(cred) c.Logf("Connecting to %s...", kerberosHost) session, err := mgo.Dial(kerberosHost) c.Assert(err, IsNil) defer session.Close() c.Logf("Authenticating with incorrect service name...") err = session.Login(cred) c.Assert(err, ErrorMatches, ".*@LDAPTEST.10GEN.CC not found.*") cred.Service = rightServiceName c.Logf("Authenticating with correct service name...") err = session.Login(cred) c.Assert(err, IsNil) c.Logf("Authenticated!") n, err := session.DB("kerberos").C("test").Find(M{}).Count() c.Assert(err, IsNil) c.Assert(n, Equals, 1) } func (kerberosSuite *KerberosSuite) TestAuthKerberosServiceHost(c *C) { if !*kerberosFlag { c.Skip("no -kerberos") } wrongServiceHost := "eggs.bacon.tk" rightServiceHost := kerberosHost cred := &mgo.Credential{ Username: kerberosUser, Mechanism: "GSSAPI", ServiceHost: wrongServiceHost, } windowsAppendPasswordToCredential(cred) c.Logf("Connecting to %s...", kerberosHost) session, err := mgo.Dial(kerberosHost) c.Assert(err, IsNil) defer session.Close() c.Logf("Authenticating with incorrect service host...") err = session.Login(cred) c.Assert(err, ErrorMatches, ".*@LDAPTEST.10GEN.CC not found.*") cred.ServiceHost = rightServiceHost c.Logf("Authenticating with correct service host...") err = session.Login(cred) c.Assert(err, IsNil) c.Logf("Authenticated!") n, err := session.DB("kerberos").C("test").Find(M{}).Count() c.Assert(err, IsNil) c.Assert(n, Equals, 1) } // No kinit on SSPI-style Kerberos, so we need to provide a password. In order // to avoid inlining password, require it to be set as an environment variable, // for instance: `SET MGO_KERBEROS_PASSWORD=this_isnt_the_password` func getWindowsKerberosPassword() string { pw := os.Getenv(winKerberosPasswordEnv) if pw == "" { panic(fmt.Sprintf("Need to set %v environment variable to run Kerberos tests on Windows", winKerberosPasswordEnv)) } return pw } func windowsAppendPasswordToCredential(cred *mgo.Credential) { if runtime.GOOS == "windows" { cred.Password = getWindowsKerberosPassword() } } charm-2.1.1/src/gopkg.in/mgo.v2/bulk.go0000664000175000017500000001451412672604565016514 0ustar marcomarcopackage mgo import ( "bytes" "gopkg.in/mgo.v2/bson" ) // Bulk represents an operation that can be prepared with several // orthogonal changes before being delivered to the server. // // Relevant documentation: // // http://blog.mongodb.org/post/84922794768/mongodbs-new-bulk-api // type Bulk struct { c *Collection ordered bool actions []bulkAction } type bulkOp int const ( bulkInsert bulkOp = iota + 1 bulkUpdate bulkUpdateAll ) type bulkAction struct { op bulkOp docs []interface{} } type bulkUpdateOp []interface{} // BulkError holds an error returned from running a Bulk operation. // // TODO: This is private for the moment, until we understand exactly how // to report these multi-errors in a useful and convenient way. type bulkError struct { errs []error } // BulkResult holds the results for a bulk operation. type BulkResult struct { Matched int Modified int // Available only for MongoDB 2.6+ // Be conservative while we understand exactly how to report these // results in a useful and convenient way, and also how to emulate // them with prior servers. private bool } func (e *bulkError) Error() string { if len(e.errs) == 0 { return "invalid bulkError instance: no errors" } if len(e.errs) == 1 { return e.errs[0].Error() } msgs := make(map[string]bool) for _, err := range e.errs { msgs[err.Error()] = true } if len(msgs) == 1 { for msg := range msgs { return msg } } var buf bytes.Buffer buf.WriteString("multiple errors in bulk operation:\n") for msg := range msgs { buf.WriteString(" - ") buf.WriteString(msg) buf.WriteByte('\n') } return buf.String() } // Bulk returns a value to prepare the execution of a bulk operation. // // WARNING: This API is still experimental. // func (c *Collection) Bulk() *Bulk { return &Bulk{c: c, ordered: true} } // Unordered puts the bulk operation in unordered mode. // // In unordered mode the indvidual operations may be sent // out of order, which means latter operations may proceed // even if prior ones have failed. func (b *Bulk) Unordered() { b.ordered = false } func (b *Bulk) action(op bulkOp) *bulkAction { if len(b.actions) > 0 && b.actions[len(b.actions)-1].op == op { return &b.actions[len(b.actions)-1] } if !b.ordered { for i := range b.actions { if b.actions[i].op == op { return &b.actions[i] } } } b.actions = append(b.actions, bulkAction{op: op}) return &b.actions[len(b.actions)-1] } // Insert queues up the provided documents for insertion. func (b *Bulk) Insert(docs ...interface{}) { action := b.action(bulkInsert) action.docs = append(action.docs, docs...) } // Update queues up the provided pairs of updating instructions. // The first element of each pair selects which documents must be // updated, and the second element defines how to update it. // Each pair matches exactly one document for updating at most. func (b *Bulk) Update(pairs ...interface{}) { if len(pairs)%2 != 0 { panic("Bulk.Update requires an even number of parameters") } action := b.action(bulkUpdate) for i := 0; i < len(pairs); i += 2 { selector := pairs[i] if selector == nil { selector = bson.D{} } action.docs = append(action.docs, &updateOp{ Collection: b.c.FullName, Selector: selector, Update: pairs[i+1], }) } } // UpdateAll queues up the provided pairs of updating instructions. // The first element of each pair selects which documents must be // updated, and the second element defines how to update it. // Each pair updates all documents matching the selector. func (b *Bulk) UpdateAll(pairs ...interface{}) { if len(pairs)%2 != 0 { panic("Bulk.UpdateAll requires an even number of parameters") } action := b.action(bulkUpdate) for i := 0; i < len(pairs); i += 2 { selector := pairs[i] if selector == nil { selector = bson.D{} } action.docs = append(action.docs, &updateOp{ Collection: b.c.FullName, Selector: selector, Update: pairs[i+1], Flags: 2, Multi: true, }) } } // Upsert queues up the provided pairs of upserting instructions. // The first element of each pair selects which documents must be // updated, and the second element defines how to update it. // Each pair matches exactly one document for updating at most. func (b *Bulk) Upsert(pairs ...interface{}) { if len(pairs)%2 != 0 { panic("Bulk.Update requires an even number of parameters") } action := b.action(bulkUpdate) for i := 0; i < len(pairs); i += 2 { selector := pairs[i] if selector == nil { selector = bson.D{} } action.docs = append(action.docs, &updateOp{ Collection: b.c.FullName, Selector: selector, Update: pairs[i+1], Flags: 1, Upsert: true, }) } } // Run runs all the operations queued up. // // If an error is reported on an unordered bulk operation, the error value may // be an aggregation of all issues observed. As an exception to that, Insert // operations running on MongoDB versions prior to 2.6 will report the last // error only due to a limitation in the wire protocol. func (b *Bulk) Run() (*BulkResult, error) { var result BulkResult var berr bulkError var failed bool for i := range b.actions { action := &b.actions[i] var ok bool switch action.op { case bulkInsert: ok = b.runInsert(action, &result, &berr) case bulkUpdate: ok = b.runUpdate(action, &result, &berr) default: panic("unknown bulk operation") } if !ok { failed = true if b.ordered { break } } } if failed { return nil, &berr } return &result, nil } func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *bulkError) bool { op := &insertOp{b.c.FullName, action.docs, 0} if !b.ordered { op.flags = 1 // ContinueOnError } lerr, err := b.c.writeOp(op, b.ordered) return b.checkSuccess(berr, lerr, err) } func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *bulkError) bool { ok := true for _, op := range action.docs { lerr, err := b.c.writeOp(op, b.ordered) if !b.checkSuccess(berr, lerr, err) { ok = false if b.ordered { break } } result.Matched += lerr.N result.Modified += lerr.modified } return ok } func (b *Bulk) checkSuccess(berr *bulkError, lerr *LastError, err error) bool { if lerr != nil && len(lerr.errors) > 0 { berr.errs = append(berr.errs, lerr.errors...) return false } else if err != nil { berr.errs = append(berr.errs, err) return false } return true } charm-2.1.1/src/gopkg.in/mgo.v2/session_test.go0000664000175000017500000027344112672604565020307 0ustar marcomarco// mgo - MongoDB driver for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo_test import ( "flag" "fmt" "math" "runtime" "sort" "strconv" "strings" "time" . "gopkg.in/check.v1" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" ) func (s *S) TestRunString(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() result := struct{ Ok int }{} err = session.Run("ping", &result) c.Assert(err, IsNil) c.Assert(result.Ok, Equals, 1) } func (s *S) TestRunValue(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() result := struct{ Ok int }{} err = session.Run(M{"ping": 1}, &result) c.Assert(err, IsNil) c.Assert(result.Ok, Equals, 1) } func (s *S) TestPing(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() // Just ensure the nonce has been received. result := struct{}{} err = session.Run("ping", &result) mgo.ResetStats() err = session.Ping() c.Assert(err, IsNil) // Pretty boring. stats := mgo.GetStats() c.Assert(stats.SentOps, Equals, 1) c.Assert(stats.ReceivedOps, Equals, 1) } func (s *S) TestDialIPAddress(c *C) { session, err := mgo.Dial("127.0.0.1:40001") c.Assert(err, IsNil) defer session.Close() session, err = mgo.Dial("[::1%]:40001") c.Assert(err, IsNil) defer session.Close() } func (s *S) TestURLSingle(c *C) { session, err := mgo.Dial("mongodb://localhost:40001/") c.Assert(err, IsNil) defer session.Close() result := struct{ Ok int }{} err = session.Run("ping", &result) c.Assert(err, IsNil) c.Assert(result.Ok, Equals, 1) } func (s *S) TestURLMany(c *C) { session, err := mgo.Dial("mongodb://localhost:40011,localhost:40012/") c.Assert(err, IsNil) defer session.Close() result := struct{ Ok int }{} err = session.Run("ping", &result) c.Assert(err, IsNil) c.Assert(result.Ok, Equals, 1) } func (s *S) TestURLParsing(c *C) { urls := []string{ "localhost:40001?foo=1&bar=2", "localhost:40001?foo=1;bar=2", } for _, url := range urls { session, err := mgo.Dial(url) if session != nil { session.Close() } c.Assert(err, ErrorMatches, "unsupported connection URL option: (foo=1|bar=2)") } } func (s *S) TestInsertFindOne(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1, "b": 2}) c.Assert(err, IsNil) err = coll.Insert(M{"a": 1, "b": 3}) c.Assert(err, IsNil) result := struct{ A, B int }{} err = coll.Find(M{"a": 1}).Sort("b").One(&result) c.Assert(err, IsNil) c.Assert(result.A, Equals, 1) c.Assert(result.B, Equals, 2) } func (s *S) TestInsertFindOneNil(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Find(nil).One(nil) c.Assert(err, ErrorMatches, "unauthorized.*|not authorized.*") } func (s *S) TestInsertFindOneMap(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1, "b": 2}) c.Assert(err, IsNil) result := make(M) err = coll.Find(M{"a": 1}).One(result) c.Assert(err, IsNil) c.Assert(result["a"], Equals, 1) c.Assert(result["b"], Equals, 2) } func (s *S) TestInsertFindAll(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1, "b": 2}) c.Assert(err, IsNil) err = coll.Insert(M{"a": 3, "b": 4}) c.Assert(err, IsNil) type R struct{ A, B int } var result []R assertResult := func() { c.Assert(len(result), Equals, 2) c.Assert(result[0].A, Equals, 1) c.Assert(result[0].B, Equals, 2) c.Assert(result[1].A, Equals, 3) c.Assert(result[1].B, Equals, 4) } // nil slice err = coll.Find(nil).Sort("a").All(&result) c.Assert(err, IsNil) assertResult() // Previously allocated slice allocd := make([]R, 5) result = allocd err = coll.Find(nil).Sort("a").All(&result) c.Assert(err, IsNil) assertResult() // Ensure result is backed by the originally allocated array c.Assert(&result[0], Equals, &allocd[0]) // Non-pointer slice error f := func() { coll.Find(nil).All(result) } c.Assert(f, Panics, "result argument must be a slice address") // Non-slice error f = func() { coll.Find(nil).All(new(int)) } c.Assert(f, Panics, "result argument must be a slice address") } func (s *S) TestFindRef(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() db1 := session.DB("db1") db1col1 := db1.C("col1") db2 := session.DB("db2") db2col1 := db2.C("col1") err = db1col1.Insert(M{"_id": 1, "n": 1}) c.Assert(err, IsNil) err = db1col1.Insert(M{"_id": 2, "n": 2}) c.Assert(err, IsNil) err = db2col1.Insert(M{"_id": 2, "n": 3}) c.Assert(err, IsNil) result := struct{ N int }{} ref1 := &mgo.DBRef{Collection: "col1", Id: 1} ref2 := &mgo.DBRef{Collection: "col1", Id: 2, Database: "db2"} err = db1.FindRef(ref1).One(&result) c.Assert(err, IsNil) c.Assert(result.N, Equals, 1) err = db1.FindRef(ref2).One(&result) c.Assert(err, IsNil) c.Assert(result.N, Equals, 3) err = db2.FindRef(ref1).One(&result) c.Assert(err, Equals, mgo.ErrNotFound) err = db2.FindRef(ref2).One(&result) c.Assert(err, IsNil) c.Assert(result.N, Equals, 3) err = session.FindRef(ref2).One(&result) c.Assert(err, IsNil) c.Assert(result.N, Equals, 3) f := func() { session.FindRef(ref1).One(&result) } c.Assert(f, PanicMatches, "Can't resolve database for &mgo.DBRef{Collection:\"col1\", Id:1, Database:\"\"}") } func (s *S) TestDatabaseAndCollectionNames(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() db1 := session.DB("db1") db1col1 := db1.C("col1") db1col2 := db1.C("col2") db2 := session.DB("db2") db2col1 := db2.C("col3") err = db1col1.Insert(M{"_id": 1}) c.Assert(err, IsNil) err = db1col2.Insert(M{"_id": 1}) c.Assert(err, IsNil) err = db2col1.Insert(M{"_id": 1}) c.Assert(err, IsNil) names, err := session.DatabaseNames() c.Assert(err, IsNil) c.Assert(filterDBs(names), DeepEquals, []string{"db1", "db2"}) // Try to exercise cursor logic. 2.8.0-rc3 still ignores this. session.SetBatch(2) names, err = db1.CollectionNames() c.Assert(err, IsNil) c.Assert(names, DeepEquals, []string{"col1", "col2", "system.indexes"}) names, err = db2.CollectionNames() c.Assert(err, IsNil) c.Assert(names, DeepEquals, []string{"col3", "system.indexes"}) } func (s *S) TestSelect(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") coll.Insert(M{"a": 1, "b": 2}) result := struct{ A, B int }{} err = coll.Find(M{"a": 1}).Select(M{"b": 1}).One(&result) c.Assert(err, IsNil) c.Assert(result.A, Equals, 0) c.Assert(result.B, Equals, 2) } func (s *S) TestInlineMap(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") var v, result1 struct { A int M map[string]int ",inline" } v.A = 1 v.M = map[string]int{"b": 2} err = coll.Insert(v) c.Assert(err, IsNil) noId := M{"_id": 0} err = coll.Find(nil).Select(noId).One(&result1) c.Assert(err, IsNil) c.Assert(result1.A, Equals, 1) c.Assert(result1.M, DeepEquals, map[string]int{"b": 2}) var result2 M err = coll.Find(nil).Select(noId).One(&result2) c.Assert(err, IsNil) c.Assert(result2, DeepEquals, M{"a": 1, "b": 2}) } func (s *S) TestUpdate(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { err := coll.Insert(M{"k": n, "n": n}) c.Assert(err, IsNil) } // No changes is a no-op and shouldn't return an error. err = coll.Update(M{"k": 42}, M{"$set": M{"n": 42}}) c.Assert(err, IsNil) err = coll.Update(M{"k": 42}, M{"$inc": M{"n": 1}}) c.Assert(err, IsNil) result := make(M) err = coll.Find(M{"k": 42}).One(result) c.Assert(err, IsNil) c.Assert(result["n"], Equals, 43) err = coll.Update(M{"k": 47}, M{"k": 47, "n": 47}) c.Assert(err, Equals, mgo.ErrNotFound) err = coll.Find(M{"k": 47}).One(result) c.Assert(err, Equals, mgo.ErrNotFound) } func (s *S) TestUpdateId(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { err := coll.Insert(M{"_id": n, "n": n}) c.Assert(err, IsNil) } err = coll.UpdateId(42, M{"$inc": M{"n": 1}}) c.Assert(err, IsNil) result := make(M) err = coll.FindId(42).One(result) c.Assert(err, IsNil) c.Assert(result["n"], Equals, 43) err = coll.UpdateId(47, M{"k": 47, "n": 47}) c.Assert(err, Equals, mgo.ErrNotFound) err = coll.FindId(47).One(result) c.Assert(err, Equals, mgo.ErrNotFound) } func (s *S) TestUpdateNil(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"k": 42, "n": 42}) c.Assert(err, IsNil) err = coll.Update(nil, M{"$inc": M{"n": 1}}) c.Assert(err, IsNil) result := make(M) err = coll.Find(M{"k": 42}).One(result) c.Assert(err, IsNil) c.Assert(result["n"], Equals, 43) err = coll.Insert(M{"k": 45, "n": 45}) c.Assert(err, IsNil) _, err = coll.UpdateAll(nil, M{"$inc": M{"n": 1}}) c.Assert(err, IsNil) err = coll.Find(M{"k": 42}).One(result) c.Assert(err, IsNil) c.Assert(result["n"], Equals, 44) err = coll.Find(M{"k": 45}).One(result) c.Assert(err, IsNil) c.Assert(result["n"], Equals, 46) } func (s *S) TestUpsert(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { err := coll.Insert(M{"k": n, "n": n}) c.Assert(err, IsNil) } info, err := coll.Upsert(M{"k": 42}, M{"k": 42, "n": 24}) c.Assert(err, IsNil) c.Assert(info.Updated, Equals, 1) c.Assert(info.UpsertedId, IsNil) result := M{} err = coll.Find(M{"k": 42}).One(result) c.Assert(err, IsNil) c.Assert(result["n"], Equals, 24) // Insert with internally created id. info, err = coll.Upsert(M{"k": 47}, M{"k": 47, "n": 47}) c.Assert(err, IsNil) c.Assert(info.Updated, Equals, 0) c.Assert(info.UpsertedId, NotNil) err = coll.Find(M{"k": 47}).One(result) c.Assert(err, IsNil) c.Assert(result["n"], Equals, 47) result = M{} err = coll.Find(M{"_id": info.UpsertedId}).One(result) c.Assert(err, IsNil) c.Assert(result["n"], Equals, 47) // Insert with provided id. info, err = coll.Upsert(M{"k": 48}, M{"k": 48, "n": 48, "_id": 48}) c.Assert(err, IsNil) c.Assert(info.Updated, Equals, 0) if s.versionAtLeast(2, 6) { c.Assert(info.UpsertedId, Equals, 48) } else { c.Assert(info.UpsertedId, IsNil) // Unfortunate, but that's what Mongo gave us. } err = coll.Find(M{"k": 48}).One(result) c.Assert(err, IsNil) c.Assert(result["n"], Equals, 48) } func (s *S) TestUpsertId(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { err := coll.Insert(M{"_id": n, "n": n}) c.Assert(err, IsNil) } info, err := coll.UpsertId(42, M{"n": 24}) c.Assert(err, IsNil) c.Assert(info.Updated, Equals, 1) c.Assert(info.UpsertedId, IsNil) result := M{} err = coll.FindId(42).One(result) c.Assert(err, IsNil) c.Assert(result["n"], Equals, 24) info, err = coll.UpsertId(47, M{"_id": 47, "n": 47}) c.Assert(err, IsNil) c.Assert(info.Updated, Equals, 0) if s.versionAtLeast(2, 6) { c.Assert(info.UpsertedId, Equals, 47) } else { c.Assert(info.UpsertedId, IsNil) } err = coll.FindId(47).One(result) c.Assert(err, IsNil) c.Assert(result["n"], Equals, 47) } func (s *S) TestUpdateAll(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { err := coll.Insert(M{"k": n, "n": n}) c.Assert(err, IsNil) } // Don't actually modify the documents. Should still report 4 matching updates. info, err := coll.UpdateAll(M{"k": M{"$gt": 42}}, M{"$unset": M{"missing": 1}}) c.Assert(err, IsNil) c.Assert(info.Updated, Equals, 4) info, err = coll.UpdateAll(M{"k": M{"$gt": 42}}, M{"$inc": M{"n": 1}}) c.Assert(err, IsNil) c.Assert(info.Updated, Equals, 4) result := make(M) err = coll.Find(M{"k": 42}).One(result) c.Assert(err, IsNil) c.Assert(result["n"], Equals, 42) err = coll.Find(M{"k": 43}).One(result) c.Assert(err, IsNil) c.Assert(result["n"], Equals, 44) err = coll.Find(M{"k": 44}).One(result) c.Assert(err, IsNil) c.Assert(result["n"], Equals, 45) if !s.versionAtLeast(2, 6) { // 2.6 made this invalid. info, err = coll.UpdateAll(M{"k": 47}, M{"k": 47, "n": 47}) c.Assert(err, Equals, nil) c.Assert(info.Updated, Equals, 0) } } func (s *S) TestRemove(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { err := coll.Insert(M{"n": n}) c.Assert(err, IsNil) } err = coll.Remove(M{"n": M{"$gt": 42}}) c.Assert(err, IsNil) result := &struct{ N int }{} err = coll.Find(M{"n": 42}).One(result) c.Assert(err, IsNil) c.Assert(result.N, Equals, 42) err = coll.Find(M{"n": 43}).One(result) c.Assert(err, Equals, mgo.ErrNotFound) err = coll.Find(M{"n": 44}).One(result) c.Assert(err, IsNil) c.Assert(result.N, Equals, 44) } func (s *S) TestRemoveId(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"_id": 40}, M{"_id": 41}, M{"_id": 42}) c.Assert(err, IsNil) err = coll.RemoveId(41) c.Assert(err, IsNil) c.Assert(coll.FindId(40).One(nil), IsNil) c.Assert(coll.FindId(41).One(nil), Equals, mgo.ErrNotFound) c.Assert(coll.FindId(42).One(nil), IsNil) } func (s *S) TestRemoveUnsafe(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() session.SetSafe(nil) coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"_id": 40}, M{"_id": 41}, M{"_id": 42}) c.Assert(err, IsNil) err = coll.RemoveId(41) c.Assert(err, IsNil) c.Assert(coll.FindId(40).One(nil), IsNil) c.Assert(coll.FindId(41).One(nil), Equals, mgo.ErrNotFound) c.Assert(coll.FindId(42).One(nil), IsNil) } func (s *S) TestRemoveAll(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { err := coll.Insert(M{"n": n}) c.Assert(err, IsNil) } info, err := coll.RemoveAll(M{"n": M{"$gt": 42}}) c.Assert(err, IsNil) c.Assert(info.Updated, Equals, 0) c.Assert(info.Removed, Equals, 4) c.Assert(info.UpsertedId, IsNil) result := &struct{ N int }{} err = coll.Find(M{"n": 42}).One(result) c.Assert(err, IsNil) c.Assert(result.N, Equals, 42) err = coll.Find(M{"n": 43}).One(result) c.Assert(err, Equals, mgo.ErrNotFound) err = coll.Find(M{"n": 44}).One(result) c.Assert(err, Equals, mgo.ErrNotFound) info, err = coll.RemoveAll(nil) c.Assert(err, IsNil) c.Assert(info.Updated, Equals, 0) c.Assert(info.Removed, Equals, 3) c.Assert(info.UpsertedId, IsNil) n, err := coll.Find(nil).Count() c.Assert(err, IsNil) c.Assert(n, Equals, 0) } func (s *S) TestDropDatabase(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() db1 := session.DB("db1") db1.C("col").Insert(M{"_id": 1}) db2 := session.DB("db2") db2.C("col").Insert(M{"_id": 1}) err = db1.DropDatabase() c.Assert(err, IsNil) names, err := session.DatabaseNames() c.Assert(err, IsNil) c.Assert(filterDBs(names), DeepEquals, []string{"db2"}) err = db2.DropDatabase() c.Assert(err, IsNil) names, err = session.DatabaseNames() c.Assert(err, IsNil) c.Assert(filterDBs(names), DeepEquals, []string{}) } func filterDBs(dbs []string) []string { var i int for _, name := range dbs { switch name { case "admin", "local": default: dbs[i] = name i++ } } if len(dbs) == 0 { return []string{} } return dbs[:i] } func (s *S) TestDropCollection(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() db := session.DB("db1") db.C("col1").Insert(M{"_id": 1}) db.C("col2").Insert(M{"_id": 1}) err = db.C("col1").DropCollection() c.Assert(err, IsNil) names, err := db.CollectionNames() c.Assert(err, IsNil) c.Assert(names, DeepEquals, []string{"col2", "system.indexes"}) err = db.C("col2").DropCollection() c.Assert(err, IsNil) names, err = db.CollectionNames() c.Assert(err, IsNil) c.Assert(names, DeepEquals, []string{"system.indexes"}) } func (s *S) TestCreateCollectionCapped(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") info := &mgo.CollectionInfo{ Capped: true, MaxBytes: 1024, MaxDocs: 3, } err = coll.Create(info) c.Assert(err, IsNil) ns := []int{1, 2, 3, 4, 5} for _, n := range ns { err := coll.Insert(M{"n": n}) c.Assert(err, IsNil) } n, err := coll.Find(nil).Count() c.Assert(err, IsNil) c.Assert(n, Equals, 3) } func (s *S) TestCreateCollectionNoIndex(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") info := &mgo.CollectionInfo{ DisableIdIndex: true, } err = coll.Create(info) c.Assert(err, IsNil) err = coll.Insert(M{"n": 1}) c.Assert(err, IsNil) indexes, err := coll.Indexes() c.Assert(indexes, HasLen, 0) } func (s *S) TestCreateCollectionForceIndex(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") info := &mgo.CollectionInfo{ ForceIdIndex: true, Capped: true, MaxBytes: 1024, } err = coll.Create(info) c.Assert(err, IsNil) err = coll.Insert(M{"n": 1}) c.Assert(err, IsNil) indexes, err := coll.Indexes() c.Assert(indexes, HasLen, 1) } func (s *S) TestIsDupValues(c *C) { c.Assert(mgo.IsDup(nil), Equals, false) c.Assert(mgo.IsDup(&mgo.LastError{Code: 1}), Equals, false) c.Assert(mgo.IsDup(&mgo.QueryError{Code: 1}), Equals, false) c.Assert(mgo.IsDup(&mgo.LastError{Code: 11000}), Equals, true) c.Assert(mgo.IsDup(&mgo.QueryError{Code: 11000}), Equals, true) c.Assert(mgo.IsDup(&mgo.LastError{Code: 11001}), Equals, true) c.Assert(mgo.IsDup(&mgo.QueryError{Code: 11001}), Equals, true) c.Assert(mgo.IsDup(&mgo.LastError{Code: 12582}), Equals, true) c.Assert(mgo.IsDup(&mgo.QueryError{Code: 12582}), Equals, true) lerr := &mgo.LastError{Code: 16460, Err: "error inserting 1 documents to shard ... caused by :: E11000 duplicate key error index: ..."} c.Assert(mgo.IsDup(lerr), Equals, true) } func (s *S) TestIsDupPrimary(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"_id": 1}) c.Assert(err, IsNil) err = coll.Insert(M{"_id": 1}) c.Assert(err, ErrorMatches, ".*duplicate key error.*") c.Assert(mgo.IsDup(err), Equals, true) } func (s *S) TestIsDupUnique(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() index := mgo.Index{ Key: []string{"a", "b"}, Unique: true, } coll := session.DB("mydb").C("mycoll") err = coll.EnsureIndex(index) c.Assert(err, IsNil) err = coll.Insert(M{"a": 1, "b": 1}) c.Assert(err, IsNil) err = coll.Insert(M{"a": 1, "b": 1}) c.Assert(err, ErrorMatches, ".*duplicate key error.*") c.Assert(mgo.IsDup(err), Equals, true) } func (s *S) TestIsDupCapped(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") info := &mgo.CollectionInfo{ ForceIdIndex: true, Capped: true, MaxBytes: 1024, } err = coll.Create(info) c.Assert(err, IsNil) err = coll.Insert(M{"_id": 1}) c.Assert(err, IsNil) err = coll.Insert(M{"_id": 1}) // The error was different for capped collections before 2.6. c.Assert(err, ErrorMatches, ".*duplicate key.*") // The issue is reduced by using IsDup. c.Assert(mgo.IsDup(err), Equals, true) } func (s *S) TestIsDupFindAndModify(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.EnsureIndex(mgo.Index{Key: []string{"n"}, Unique: true}) c.Assert(err, IsNil) err = coll.Insert(M{"n": 1}) c.Assert(err, IsNil) err = coll.Insert(M{"n": 2}) c.Assert(err, IsNil) _, err = coll.Find(M{"n": 1}).Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}}, bson.M{}) c.Assert(err, ErrorMatches, ".*duplicate key error.*") c.Assert(mgo.IsDup(err), Equals, true) } func (s *S) TestFindAndModify(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 42}) session.SetMode(mgo.Monotonic, true) result := M{} info, err := coll.Find(M{"n": 42}).Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}}, result) c.Assert(err, IsNil) c.Assert(result["n"], Equals, 42) c.Assert(info.Updated, Equals, 1) c.Assert(info.Removed, Equals, 0) c.Assert(info.UpsertedId, IsNil) // A nil result parameter should be acceptable. info, err = coll.Find(M{"n": 43}).Apply(mgo.Change{Update: M{"$unset": M{"missing": 1}}}, nil) c.Assert(err, IsNil) c.Assert(info.Updated, Equals, 1) c.Assert(info.Removed, Equals, 0) c.Assert(info.UpsertedId, IsNil) result = M{} info, err = coll.Find(M{"n": 43}).Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}, ReturnNew: true}, result) c.Assert(err, IsNil) c.Assert(result["n"], Equals, 44) c.Assert(info.Updated, Equals, 1) c.Assert(info.Removed, Equals, 0) c.Assert(info.UpsertedId, IsNil) result = M{} info, err = coll.Find(M{"n": 50}).Apply(mgo.Change{Upsert: true, Update: M{"n": 51, "o": 52}}, result) c.Assert(err, IsNil) c.Assert(result["n"], IsNil) c.Assert(info.Updated, Equals, 0) c.Assert(info.Removed, Equals, 0) c.Assert(info.UpsertedId, NotNil) result = M{} info, err = coll.Find(nil).Sort("-n").Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}, ReturnNew: true}, result) c.Assert(err, IsNil) c.Assert(result["n"], Equals, 52) c.Assert(info.Updated, Equals, 1) c.Assert(info.Removed, Equals, 0) c.Assert(info.UpsertedId, IsNil) result = M{} info, err = coll.Find(M{"n": 52}).Select(M{"o": 1}).Apply(mgo.Change{Remove: true}, result) c.Assert(err, IsNil) c.Assert(result["n"], IsNil) c.Assert(result["o"], Equals, 52) c.Assert(info.Updated, Equals, 0) c.Assert(info.Removed, Equals, 1) c.Assert(info.UpsertedId, IsNil) result = M{} info, err = coll.Find(M{"n": 60}).Apply(mgo.Change{Remove: true}, result) c.Assert(err, Equals, mgo.ErrNotFound) c.Assert(len(result), Equals, 0) c.Assert(info, IsNil) } func (s *S) TestFindAndModifyBug997828(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": "not-a-number"}) result := make(M) _, err = coll.Find(M{"n": "not-a-number"}).Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}}, result) c.Assert(err, ErrorMatches, `(exception: )?Cannot apply \$inc .*`) if s.versionAtLeast(2, 1) { qerr, _ := err.(*mgo.QueryError) c.Assert(qerr, NotNil, Commentf("err: %#v", err)) if s.versionAtLeast(2, 6) { // Oh, the dance of error codes. :-( c.Assert(qerr.Code, Equals, 16837) } else { c.Assert(qerr.Code, Equals, 10140) } } else { lerr, _ := err.(*mgo.LastError) c.Assert(lerr, NotNil, Commentf("err: %#v", err)) c.Assert(lerr.Code, Equals, 10140) } } func (s *S) TestCountCollection(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42} for _, n := range ns { err := coll.Insert(M{"n": n}) c.Assert(err, IsNil) } n, err := coll.Count() c.Assert(err, IsNil) c.Assert(n, Equals, 3) } func (s *S) TestCountQuery(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42} for _, n := range ns { err := coll.Insert(M{"n": n}) c.Assert(err, IsNil) } n, err := coll.Find(M{"n": M{"$gt": 40}}).Count() c.Assert(err, IsNil) c.Assert(n, Equals, 2) } func (s *S) TestCountQuerySorted(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42} for _, n := range ns { err := coll.Insert(M{"n": n}) c.Assert(err, IsNil) } n, err := coll.Find(M{"n": M{"$gt": 40}}).Sort("n").Count() c.Assert(err, IsNil) c.Assert(n, Equals, 2) } func (s *S) TestCountSkipLimit(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42, 43, 44} for _, n := range ns { err := coll.Insert(M{"n": n}) c.Assert(err, IsNil) } n, err := coll.Find(nil).Skip(1).Limit(3).Count() c.Assert(err, IsNil) c.Assert(n, Equals, 3) n, err = coll.Find(nil).Skip(1).Limit(5).Count() c.Assert(err, IsNil) c.Assert(n, Equals, 4) } func (s *S) TestQueryExplain(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42} for _, n := range ns { err := coll.Insert(M{"n": n}) c.Assert(err, IsNil) } m := M{} query := coll.Find(nil).Limit(2) err = query.Explain(m) c.Assert(err, IsNil) if m["queryPlanner"] != nil { c.Assert(m["executionStats"].(M)["totalDocsExamined"], Equals, 2) } else { c.Assert(m["cursor"], Equals, "BasicCursor") c.Assert(m["nscanned"], Equals, 2) c.Assert(m["n"], Equals, 2) } n := 0 var result M iter := query.Iter() for iter.Next(&result) { n++ } c.Assert(iter.Close(), IsNil) c.Assert(n, Equals, 2) } func (s *S) TestQuerySetMaxScan(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42} for _, n := range ns { err := coll.Insert(M{"n": n}) c.Assert(err, IsNil) } query := coll.Find(nil).SetMaxScan(2) var result []M err = query.All(&result) c.Assert(err, IsNil) c.Assert(result, HasLen, 2) } func (s *S) TestQuerySetMaxTime(c *C) { if !s.versionAtLeast(2, 6) { c.Skip("SetMaxTime only supported in 2.6+") } session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") for i := 0; i < 1000; i++ { err := coll.Insert(M{"n": i}) c.Assert(err, IsNil) } query := coll.Find(nil) query.SetMaxTime(1 * time.Millisecond) query.Batch(2) var result []M err = query.All(&result) c.Assert(err, ErrorMatches, "operation exceeded time limit") } func (s *S) TestQueryHint(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") coll.EnsureIndexKey("a") m := M{} err = coll.Find(nil).Hint("a").Explain(m) c.Assert(err, IsNil) if m["queryPlanner"] != nil { m = m["queryPlanner"].(M) m = m["winningPlan"].(M) m = m["inputStage"].(M) c.Assert(m["indexName"], Equals, "a_1") } else { c.Assert(m["indexBounds"], NotNil) c.Assert(m["indexBounds"].(M)["a"], NotNil) } } func (s *S) TestQueryComment(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") coll := db.C("mycoll") err = db.Run(bson.M{"profile": 2}, nil) c.Assert(err, IsNil) ns := []int{40, 41, 42} for _, n := range ns { err := coll.Insert(M{"n": n}) c.Assert(err, IsNil) } query := coll.Find(bson.M{"n": 41}) query.Comment("some comment") err = query.One(nil) c.Assert(err, IsNil) query = coll.Find(bson.M{"n": 41}) query.Comment("another comment") err = query.One(nil) c.Assert(err, IsNil) n, err := session.DB("mydb").C("system.profile").Find(bson.M{"query.$query.n": 41, "query.$comment": "some comment"}).Count() c.Assert(err, IsNil) c.Assert(n, Equals, 1) } func (s *S) TestFindOneNotFound(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") result := struct{ A, B int }{} err = coll.Find(M{"a": 1}).One(&result) c.Assert(err, Equals, mgo.ErrNotFound) c.Assert(err, ErrorMatches, "not found") c.Assert(err == mgo.ErrNotFound, Equals, true) } func (s *S) TestFindNil(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) c.Assert(err, IsNil) result := struct{ N int }{} err = coll.Find(nil).One(&result) c.Assert(err, IsNil) c.Assert(result.N, Equals, 1) } func (s *S) TestFindId(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"_id": 41, "n": 41}) c.Assert(err, IsNil) err = coll.Insert(M{"_id": 42, "n": 42}) c.Assert(err, IsNil) result := struct{ N int }{} err = coll.FindId(42).One(&result) c.Assert(err, IsNil) c.Assert(result.N, Equals, 42) } func (s *S) TestFindIterAll(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { coll.Insert(M{"n": n}) } session.Refresh() // Release socket. mgo.ResetStats() iter := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2).Iter() result := struct{ N int }{} for i := 2; i < 7; i++ { ok := iter.Next(&result) c.Assert(ok, Equals, true) c.Assert(result.N, Equals, ns[i]) if i == 1 { stats := mgo.GetStats() c.Assert(stats.ReceivedDocs, Equals, 2) } } ok := iter.Next(&result) c.Assert(ok, Equals, false) c.Assert(iter.Close(), IsNil) session.Refresh() // Release socket. stats := mgo.GetStats() c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP c.Assert(stats.ReceivedOps, Equals, 3) // and their REPLY_OPs. c.Assert(stats.ReceivedDocs, Equals, 5) c.Assert(stats.SocketsInUse, Equals, 0) } func (s *S) TestFindIterTwiceWithSameQuery(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") for i := 40; i != 47; i++ { coll.Insert(M{"n": i}) } query := coll.Find(M{}).Sort("n") result1 := query.Skip(1).Iter() result2 := query.Skip(2).Iter() result := struct{ N int }{} ok := result2.Next(&result) c.Assert(ok, Equals, true) c.Assert(result.N, Equals, 42) ok = result1.Next(&result) c.Assert(ok, Equals, true) c.Assert(result.N, Equals, 41) } func (s *S) TestFindIterWithoutResults(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") coll.Insert(M{"n": 42}) iter := coll.Find(M{"n": 0}).Iter() result := struct{ N int }{} ok := iter.Next(&result) c.Assert(ok, Equals, false) c.Assert(iter.Close(), IsNil) c.Assert(result.N, Equals, 0) } func (s *S) TestFindIterLimit(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { coll.Insert(M{"n": n}) } session.Refresh() // Release socket. mgo.ResetStats() query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Limit(3) iter := query.Iter() result := struct{ N int }{} for i := 2; i < 5; i++ { ok := iter.Next(&result) c.Assert(ok, Equals, true) c.Assert(result.N, Equals, ns[i]) } ok := iter.Next(&result) c.Assert(ok, Equals, false) c.Assert(iter.Close(), IsNil) session.Refresh() // Release socket. stats := mgo.GetStats() c.Assert(stats.SentOps, Equals, 2) // 1*QUERY_OP + 1*KILL_CURSORS_OP c.Assert(stats.ReceivedOps, Equals, 1) // and its REPLY_OP c.Assert(stats.ReceivedDocs, Equals, 3) c.Assert(stats.SocketsInUse, Equals, 0) } var cursorTimeout = flag.Bool("cursor-timeout", false, "Enable cursor timeout test") func (s *S) TestFindIterCursorTimeout(c *C) { if !*cursorTimeout { c.Skip("-cursor-timeout") } session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() type Doc struct { Id int "_id" } coll := session.DB("test").C("test") coll.Remove(nil) for i := 0; i < 100; i++ { err = coll.Insert(Doc{i}) c.Assert(err, IsNil) } session.SetBatch(1) iter := coll.Find(nil).Iter() var doc Doc if !iter.Next(&doc) { c.Fatalf("iterator failed to return any documents") } for i := 10; i > 0; i-- { c.Logf("Sleeping... %d minutes to go...", i) time.Sleep(1*time.Minute + 2*time.Second) } // Drain any existing documents that were fetched. if !iter.Next(&doc) { c.Fatalf("iterator with timed out cursor failed to return previously cached document") } if iter.Next(&doc) { c.Fatalf("timed out cursor returned document") } c.Assert(iter.Err(), Equals, mgo.ErrCursor) } func (s *S) TestTooManyItemsLimitBug(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU())) mgo.SetDebug(false) coll := session.DB("mydb").C("mycoll") words := strings.Split("foo bar baz", " ") for i := 0; i < 5; i++ { words = append(words, words...) } doc := bson.D{{"words", words}} inserts := 10000 limit := 5000 iters := 0 c.Assert(inserts > limit, Equals, true) for i := 0; i < inserts; i++ { err := coll.Insert(&doc) c.Assert(err, IsNil) } iter := coll.Find(nil).Limit(limit).Iter() for iter.Next(&doc) { if iters%100 == 0 { c.Logf("Seen %d docments", iters) } iters++ } c.Assert(iter.Close(), IsNil) c.Assert(iters, Equals, limit) } func serverCursorsOpen(session *mgo.Session) int { var result struct { Cursors struct { TotalOpen int `bson:"totalOpen"` TimedOut int `bson:"timedOut"` } } err := session.Run("serverStatus", &result) if err != nil { panic(err) } return result.Cursors.TotalOpen } func (s *S) TestFindIterLimitWithMore(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") // Insane amounts of logging otherwise due to the // amount of data being shuffled. mgo.SetDebug(false) defer mgo.SetDebug(true) // Should amount to more than 4MB bson payload, // the default limit per result chunk. const total = 4096 var d struct{ A [1024]byte } docs := make([]interface{}, total) for i := 0; i < total; i++ { docs[i] = &d } err = coll.Insert(docs...) c.Assert(err, IsNil) n, err := coll.Count() c.Assert(err, IsNil) c.Assert(n, Equals, total) // First, try restricting to a single chunk with a negative limit. nresults := 0 iter := coll.Find(nil).Limit(-total).Iter() var discard struct{} for iter.Next(&discard) { nresults++ } if nresults < total/2 || nresults >= total { c.Fatalf("Bad result size with negative limit: %d", nresults) } cursorsOpen := serverCursorsOpen(session) // Try again, with a positive limit. Should reach the end now, // using multiple chunks. nresults = 0 iter = coll.Find(nil).Limit(total).Iter() for iter.Next(&discard) { nresults++ } c.Assert(nresults, Equals, total) // Ensure the cursor used is properly killed. c.Assert(serverCursorsOpen(session), Equals, cursorsOpen) // Edge case, -MinInt == -MinInt. nresults = 0 iter = coll.Find(nil).Limit(math.MinInt32).Iter() for iter.Next(&discard) { nresults++ } if nresults < total/2 || nresults >= total { c.Fatalf("Bad result size with MinInt32 limit: %d", nresults) } } func (s *S) TestFindIterLimitWithBatch(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { coll.Insert(M{"n": n}) } // Ping the database to ensure the nonce has been received already. c.Assert(session.Ping(), IsNil) session.Refresh() // Release socket. mgo.ResetStats() query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Limit(3).Batch(2) iter := query.Iter() result := struct{ N int }{} for i := 2; i < 5; i++ { ok := iter.Next(&result) c.Assert(ok, Equals, true) c.Assert(result.N, Equals, ns[i]) if i == 3 { stats := mgo.GetStats() c.Assert(stats.ReceivedDocs, Equals, 2) } } ok := iter.Next(&result) c.Assert(ok, Equals, false) c.Assert(iter.Close(), IsNil) session.Refresh() // Release socket. stats := mgo.GetStats() c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 1*GET_MORE_OP + 1*KILL_CURSORS_OP c.Assert(stats.ReceivedOps, Equals, 2) // and its REPLY_OPs c.Assert(stats.ReceivedDocs, Equals, 3) c.Assert(stats.SocketsInUse, Equals, 0) } func (s *S) TestFindIterSortWithBatch(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { coll.Insert(M{"n": n}) } // Without this, the logic above breaks because Mongo refuses to // return a cursor with an in-memory sort. coll.EnsureIndexKey("n") // Ping the database to ensure the nonce has been received already. c.Assert(session.Ping(), IsNil) session.Refresh() // Release socket. mgo.ResetStats() query := coll.Find(M{"n": M{"$lte": 44}}).Sort("-n").Batch(2) iter := query.Iter() ns = []int{46, 45, 44, 43, 42, 41, 40} result := struct{ N int }{} for i := 2; i < len(ns); i++ { c.Logf("i=%d", i) ok := iter.Next(&result) c.Assert(ok, Equals, true) c.Assert(result.N, Equals, ns[i]) if i == 3 { stats := mgo.GetStats() c.Assert(stats.ReceivedDocs, Equals, 2) } } ok := iter.Next(&result) c.Assert(ok, Equals, false) c.Assert(iter.Close(), IsNil) session.Refresh() // Release socket. stats := mgo.GetStats() c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP c.Assert(stats.ReceivedOps, Equals, 3) // and its REPLY_OPs c.Assert(stats.ReceivedDocs, Equals, 5) c.Assert(stats.SocketsInUse, Equals, 0) } // Test tailable cursors in a situation where Next has to sleep to // respect the timeout requested on Tail. func (s *S) TestFindTailTimeoutWithSleep(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() cresult := struct{ ErrMsg string }{} db := session.DB("mydb") err = db.Run(bson.D{{"create", "mycoll"}, {"capped", true}, {"size", 1024}}, &cresult) c.Assert(err, IsNil) c.Assert(cresult.ErrMsg, Equals, "") coll := db.C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { coll.Insert(M{"n": n}) } session.Refresh() // Release socket. mgo.ResetStats() timeout := 3 * time.Second query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2) iter := query.Tail(timeout) n := len(ns) result := struct{ N int }{} for i := 2; i != n; i++ { ok := iter.Next(&result) c.Assert(ok, Equals, true) c.Assert(iter.Err(), IsNil) c.Assert(iter.Timeout(), Equals, false) c.Assert(result.N, Equals, ns[i]) if i == 3 { // The batch boundary. stats := mgo.GetStats() c.Assert(stats.ReceivedDocs, Equals, 2) } } mgo.ResetStats() // The following call to Next will block. go func() { // The internal AwaitData timing of MongoDB is around 2 seconds, // so this should force mgo to sleep at least once by itself to // respect the requested timeout. time.Sleep(timeout + 5e8*time.Nanosecond) session := session.New() defer session.Close() coll := session.DB("mydb").C("mycoll") coll.Insert(M{"n": 47}) }() c.Log("Will wait for Next with N=47...") ok := iter.Next(&result) c.Assert(ok, Equals, true) c.Assert(iter.Err(), IsNil) c.Assert(iter.Timeout(), Equals, false) c.Assert(result.N, Equals, 47) c.Log("Got Next with N=47!") // The following may break because it depends a bit on the internal // timing used by MongoDB's AwaitData logic. If it does, the problem // will be observed as more GET_MORE_OPs than predicted: // 1*QUERY for nonce + 1*GET_MORE_OP on Next + 1*GET_MORE_OP on Next after sleep + // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 stats := mgo.GetStats() if s.versionAtLeast(2, 6) { c.Assert(stats.SentOps, Equals, 4) } else { c.Assert(stats.SentOps, Equals, 5) } c.Assert(stats.ReceivedOps, Equals, 4) // REPLY_OPs for 1*QUERY_OP for nonce + 2*GET_MORE_OPs + 1*QUERY_OP c.Assert(stats.ReceivedDocs, Equals, 3) // nonce + N=47 result + getLastError response c.Log("Will wait for a result which will never come...") started := time.Now() ok = iter.Next(&result) c.Assert(ok, Equals, false) c.Assert(iter.Err(), IsNil) c.Assert(iter.Timeout(), Equals, true) c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) c.Log("Will now reuse the timed out tail cursor...") coll.Insert(M{"n": 48}) ok = iter.Next(&result) c.Assert(ok, Equals, true) c.Assert(iter.Close(), IsNil) c.Assert(iter.Timeout(), Equals, false) c.Assert(result.N, Equals, 48) } // Test tailable cursors in a situation where Next never gets to sleep once // to respect the timeout requested on Tail. func (s *S) TestFindTailTimeoutNoSleep(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() cresult := struct{ ErrMsg string }{} db := session.DB("mydb") err = db.Run(bson.D{{"create", "mycoll"}, {"capped", true}, {"size", 1024}}, &cresult) c.Assert(err, IsNil) c.Assert(cresult.ErrMsg, Equals, "") coll := db.C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { coll.Insert(M{"n": n}) } session.Refresh() // Release socket. mgo.ResetStats() timeout := 1 * time.Second query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2) iter := query.Tail(timeout) n := len(ns) result := struct{ N int }{} for i := 2; i != n; i++ { ok := iter.Next(&result) c.Assert(ok, Equals, true) c.Assert(iter.Err(), IsNil) c.Assert(iter.Timeout(), Equals, false) c.Assert(result.N, Equals, ns[i]) if i == 3 { // The batch boundary. stats := mgo.GetStats() c.Assert(stats.ReceivedDocs, Equals, 2) } } mgo.ResetStats() // The following call to Next will block. go func() { // The internal AwaitData timing of MongoDB is around 2 seconds, // so this item should arrive within the AwaitData threshold. time.Sleep(5e8) session := session.New() defer session.Close() coll := session.DB("mydb").C("mycoll") coll.Insert(M{"n": 47}) }() c.Log("Will wait for Next with N=47...") ok := iter.Next(&result) c.Assert(ok, Equals, true) c.Assert(iter.Err(), IsNil) c.Assert(iter.Timeout(), Equals, false) c.Assert(result.N, Equals, 47) c.Log("Got Next with N=47!") // The following may break because it depends a bit on the internal // timing used by MongoDB's AwaitData logic. If it does, the problem // will be observed as more GET_MORE_OPs than predicted: // 1*QUERY_OP for nonce + 1*GET_MORE_OP on Next + // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 stats := mgo.GetStats() if s.versionAtLeast(2, 6) { c.Assert(stats.SentOps, Equals, 3) } else { c.Assert(stats.SentOps, Equals, 4) } c.Assert(stats.ReceivedOps, Equals, 3) // REPLY_OPs for 1*QUERY_OP for nonce + 1*GET_MORE_OPs and 1*QUERY_OP c.Assert(stats.ReceivedDocs, Equals, 3) // nonce + N=47 result + getLastError response c.Log("Will wait for a result which will never come...") started := time.Now() ok = iter.Next(&result) c.Assert(ok, Equals, false) c.Assert(iter.Err(), IsNil) c.Assert(iter.Timeout(), Equals, true) c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) c.Log("Will now reuse the timed out tail cursor...") coll.Insert(M{"n": 48}) ok = iter.Next(&result) c.Assert(ok, Equals, true) c.Assert(iter.Close(), IsNil) c.Assert(iter.Timeout(), Equals, false) c.Assert(result.N, Equals, 48) } // Test tailable cursors in a situation where Next never gets to sleep once // to respect the timeout requested on Tail. func (s *S) TestFindTailNoTimeout(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() cresult := struct{ ErrMsg string }{} db := session.DB("mydb") err = db.Run(bson.D{{"create", "mycoll"}, {"capped", true}, {"size", 1024}}, &cresult) c.Assert(err, IsNil) c.Assert(cresult.ErrMsg, Equals, "") coll := db.C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { coll.Insert(M{"n": n}) } session.Refresh() // Release socket. mgo.ResetStats() query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2) iter := query.Tail(-1) c.Assert(err, IsNil) n := len(ns) result := struct{ N int }{} for i := 2; i != n; i++ { ok := iter.Next(&result) c.Assert(ok, Equals, true) c.Assert(result.N, Equals, ns[i]) if i == 3 { // The batch boundary. stats := mgo.GetStats() c.Assert(stats.ReceivedDocs, Equals, 2) } } mgo.ResetStats() // The following call to Next will block. go func() { time.Sleep(5e8) session := session.New() defer session.Close() coll := session.DB("mydb").C("mycoll") coll.Insert(M{"n": 47}) }() c.Log("Will wait for Next with N=47...") ok := iter.Next(&result) c.Assert(ok, Equals, true) c.Assert(iter.Err(), IsNil) c.Assert(iter.Timeout(), Equals, false) c.Assert(result.N, Equals, 47) c.Log("Got Next with N=47!") // The following may break because it depends a bit on the internal // timing used by MongoDB's AwaitData logic. If it does, the problem // will be observed as more GET_MORE_OPs than predicted: // 1*QUERY_OP for nonce + 1*GET_MORE_OP on Next + // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 stats := mgo.GetStats() if s.versionAtLeast(2, 6) { c.Assert(stats.SentOps, Equals, 3) } else { c.Assert(stats.SentOps, Equals, 4) } c.Assert(stats.ReceivedOps, Equals, 3) // REPLY_OPs for 1*QUERY_OP for nonce + 1*GET_MORE_OPs and 1*QUERY_OP c.Assert(stats.ReceivedDocs, Equals, 3) // nonce + N=47 result + getLastError response c.Log("Will wait for a result which will never come...") gotNext := make(chan bool) go func() { ok := iter.Next(&result) gotNext <- ok }() select { case ok := <-gotNext: c.Fatalf("Next returned: %v", ok) case <-time.After(3e9): // Good. Should still be sleeping at that point. } // Closing the session should cause Next to return. session.Close() select { case ok := <-gotNext: c.Assert(ok, Equals, false) c.Assert(iter.Err(), ErrorMatches, "Closed explicitly") c.Assert(iter.Timeout(), Equals, false) case <-time.After(1e9): c.Fatal("Closing the session did not unblock Next") } } func (s *S) TestIterNextResetsResult(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{1, 2, 3} for _, n := range ns { coll.Insert(M{"n" + strconv.Itoa(n): n}) } query := coll.Find(nil).Sort("$natural") i := 0 var sresult *struct{ N1, N2, N3 int } iter := query.Iter() for iter.Next(&sresult) { switch i { case 0: c.Assert(sresult.N1, Equals, 1) c.Assert(sresult.N2+sresult.N3, Equals, 0) case 1: c.Assert(sresult.N2, Equals, 2) c.Assert(sresult.N1+sresult.N3, Equals, 0) case 2: c.Assert(sresult.N3, Equals, 3) c.Assert(sresult.N1+sresult.N2, Equals, 0) } i++ } c.Assert(iter.Close(), IsNil) i = 0 var mresult M iter = query.Iter() for iter.Next(&mresult) { delete(mresult, "_id") switch i { case 0: c.Assert(mresult, DeepEquals, M{"n1": 1}) case 1: c.Assert(mresult, DeepEquals, M{"n2": 2}) case 2: c.Assert(mresult, DeepEquals, M{"n3": 3}) } i++ } c.Assert(iter.Close(), IsNil) i = 0 var iresult interface{} iter = query.Iter() for iter.Next(&iresult) { mresult, ok := iresult.(bson.M) c.Assert(ok, Equals, true, Commentf("%#v", iresult)) delete(mresult, "_id") switch i { case 0: c.Assert(mresult, DeepEquals, bson.M{"n1": 1}) case 1: c.Assert(mresult, DeepEquals, bson.M{"n2": 2}) case 2: c.Assert(mresult, DeepEquals, bson.M{"n3": 3}) } i++ } c.Assert(iter.Close(), IsNil) } func (s *S) TestFindForOnIter(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { coll.Insert(M{"n": n}) } session.Refresh() // Release socket. mgo.ResetStats() query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2) iter := query.Iter() i := 2 var result *struct{ N int } err = iter.For(&result, func() error { c.Assert(i < 7, Equals, true) c.Assert(result.N, Equals, ns[i]) if i == 1 { stats := mgo.GetStats() c.Assert(stats.ReceivedDocs, Equals, 2) } i++ return nil }) c.Assert(err, IsNil) session.Refresh() // Release socket. stats := mgo.GetStats() c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP c.Assert(stats.ReceivedOps, Equals, 3) // and their REPLY_OPs. c.Assert(stats.ReceivedDocs, Equals, 5) c.Assert(stats.SocketsInUse, Equals, 0) } func (s *S) TestFindFor(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { coll.Insert(M{"n": n}) } session.Refresh() // Release socket. mgo.ResetStats() query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2) i := 2 var result *struct{ N int } err = query.For(&result, func() error { c.Assert(i < 7, Equals, true) c.Assert(result.N, Equals, ns[i]) if i == 1 { stats := mgo.GetStats() c.Assert(stats.ReceivedDocs, Equals, 2) } i++ return nil }) c.Assert(err, IsNil) session.Refresh() // Release socket. stats := mgo.GetStats() c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP c.Assert(stats.ReceivedOps, Equals, 3) // and their REPLY_OPs. c.Assert(stats.ReceivedDocs, Equals, 5) c.Assert(stats.SocketsInUse, Equals, 0) } func (s *S) TestFindForStopOnError(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { coll.Insert(M{"n": n}) } query := coll.Find(M{"n": M{"$gte": 42}}) i := 2 var result *struct{ N int } err = query.For(&result, func() error { c.Assert(i < 4, Equals, true) c.Assert(result.N, Equals, ns[i]) if i == 3 { return fmt.Errorf("stop!") } i++ return nil }) c.Assert(err, ErrorMatches, "stop!") } func (s *S) TestFindForResetsResult(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{1, 2, 3} for _, n := range ns { coll.Insert(M{"n" + strconv.Itoa(n): n}) } query := coll.Find(nil).Sort("$natural") i := 0 var sresult *struct{ N1, N2, N3 int } err = query.For(&sresult, func() error { switch i { case 0: c.Assert(sresult.N1, Equals, 1) c.Assert(sresult.N2+sresult.N3, Equals, 0) case 1: c.Assert(sresult.N2, Equals, 2) c.Assert(sresult.N1+sresult.N3, Equals, 0) case 2: c.Assert(sresult.N3, Equals, 3) c.Assert(sresult.N1+sresult.N2, Equals, 0) } i++ return nil }) c.Assert(err, IsNil) i = 0 var mresult M err = query.For(&mresult, func() error { delete(mresult, "_id") switch i { case 0: c.Assert(mresult, DeepEquals, M{"n1": 1}) case 1: c.Assert(mresult, DeepEquals, M{"n2": 2}) case 2: c.Assert(mresult, DeepEquals, M{"n3": 3}) } i++ return nil }) c.Assert(err, IsNil) i = 0 var iresult interface{} err = query.For(&iresult, func() error { mresult, ok := iresult.(bson.M) c.Assert(ok, Equals, true, Commentf("%#v", iresult)) delete(mresult, "_id") switch i { case 0: c.Assert(mresult, DeepEquals, bson.M{"n1": 1}) case 1: c.Assert(mresult, DeepEquals, bson.M{"n2": 2}) case 2: c.Assert(mresult, DeepEquals, bson.M{"n3": 3}) } i++ return nil }) c.Assert(err, IsNil) } func (s *S) TestFindIterSnapshot(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() // Insane amounts of logging otherwise due to the // amount of data being shuffled. mgo.SetDebug(false) defer mgo.SetDebug(true) coll := session.DB("mydb").C("mycoll") var a [1024000]byte for n := 0; n < 10; n++ { err := coll.Insert(M{"_id": n, "n": n, "a1": &a}) c.Assert(err, IsNil) } query := coll.Find(M{"n": M{"$gt": -1}}).Batch(2).Prefetch(0) query.Snapshot() iter := query.Iter() seen := map[int]bool{} result := struct { Id int "_id" }{} for iter.Next(&result) { if len(seen) == 2 { // Grow all entries so that they have to move. // Backwards so that the order is inverted. for n := 10; n >= 0; n-- { _, err := coll.Upsert(M{"_id": n}, M{"$set": M{"a2": &a}}) c.Assert(err, IsNil) } } if seen[result.Id] { c.Fatalf("seen duplicated key: %d", result.Id) } seen[result.Id] = true } c.Assert(iter.Close(), IsNil) } func (s *S) TestSort(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") coll.Insert(M{"a": 1, "b": 1}) coll.Insert(M{"a": 2, "b": 2}) coll.Insert(M{"a": 2, "b": 1}) coll.Insert(M{"a": 0, "b": 1}) coll.Insert(M{"a": 2, "b": 0}) coll.Insert(M{"a": 0, "b": 2}) coll.Insert(M{"a": 1, "b": 2}) coll.Insert(M{"a": 0, "b": 0}) coll.Insert(M{"a": 1, "b": 0}) query := coll.Find(M{}) query.Sort("-a") // Should be ignored. query.Sort("-b", "a") iter := query.Iter() l := make([]int, 18) r := struct{ A, B int }{} for i := 0; i != len(l); i += 2 { ok := iter.Next(&r) c.Assert(ok, Equals, true) c.Assert(err, IsNil) l[i] = r.A l[i+1] = r.B } c.Assert(l, DeepEquals, []int{0, 2, 1, 2, 2, 2, 0, 1, 1, 1, 2, 1, 0, 0, 1, 0, 2, 0}) } func (s *S) TestSortWithBadArgs(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") f1 := func() { coll.Find(nil).Sort("") } f2 := func() { coll.Find(nil).Sort("+") } f3 := func() { coll.Find(nil).Sort("foo", "-") } for _, f := range []func(){f1, f2, f3} { c.Assert(f, PanicMatches, "Sort: empty field name") } } func (s *S) TestSortScoreText(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.EnsureIndex(mgo.Index{ Key: []string{"$text:a", "$text:b"}, }) msg := "text search not enabled" if err != nil && strings.Contains(err.Error(), msg) { c.Skip(msg) } c.Assert(err, IsNil) err = coll.Insert(M{ "a": "none", "b": "twice: foo foo", }) c.Assert(err, IsNil) err = coll.Insert(M{ "a": "just once: foo", "b": "none", }) c.Assert(err, IsNil) err = coll.Insert(M{ "a": "many: foo foo foo", "b": "none", }) c.Assert(err, IsNil) err = coll.Insert(M{ "a": "none", "b": "none", "c": "ignore: foo", }) c.Assert(err, IsNil) query := coll.Find(M{"$text": M{"$search": "foo"}}) query.Select(M{"score": M{"$meta": "textScore"}}) query.Sort("$textScore:score") iter := query.Iter() var r struct{ A, B string } var results []string for iter.Next(&r) { results = append(results, r.A, r.B) } c.Assert(results, DeepEquals, []string{ "many: foo foo foo", "none", "none", "twice: foo foo", "just once: foo", "none", }) } func (s *S) TestPrefetching(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") const total = 600 mgo.SetDebug(false) docs := make([]interface{}, total) for i := 0; i != total; i++ { docs[i] = bson.D{{"n", i}} } err = coll.Insert(docs...) c.Assert(err, IsNil) for testi := 0; testi < 5; testi++ { mgo.ResetStats() var iter *mgo.Iter var beforeMore int switch testi { case 0: // The default session value. session.SetBatch(100) iter = coll.Find(M{}).Iter() beforeMore = 75 case 2: // Changing the session value. session.SetBatch(100) session.SetPrefetch(0.27) iter = coll.Find(M{}).Iter() beforeMore = 73 case 1: // Changing via query methods. iter = coll.Find(M{}).Prefetch(0.27).Batch(100).Iter() beforeMore = 73 case 3: // With prefetch on first document. iter = coll.Find(M{}).Prefetch(1.0).Batch(100).Iter() beforeMore = 0 case 4: // Without prefetch. iter = coll.Find(M{}).Prefetch(0).Batch(100).Iter() beforeMore = 100 } pings := 0 for batchi := 0; batchi < len(docs)/100-1; batchi++ { c.Logf("Iterating over %d documents on batch %d", beforeMore, batchi) var result struct{ N int } for i := 0; i < beforeMore; i++ { ok := iter.Next(&result) c.Assert(ok, Equals, true, Commentf("iter.Err: %v", iter.Err())) } beforeMore = 99 c.Logf("Done iterating.") session.Run("ping", nil) // Roundtrip to settle down. pings++ stats := mgo.GetStats() c.Assert(stats.ReceivedDocs, Equals, (batchi+1)*100+pings) c.Logf("Iterating over one more document on batch %d", batchi) ok := iter.Next(&result) c.Assert(ok, Equals, true, Commentf("iter.Err: %v", iter.Err())) c.Logf("Done iterating.") session.Run("ping", nil) // Roundtrip to settle down. pings++ stats = mgo.GetStats() c.Assert(stats.ReceivedDocs, Equals, (batchi+2)*100+pings) } } } func (s *S) TestSafeSetting(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() // Check the default safe := session.Safe() c.Assert(safe.W, Equals, 0) c.Assert(safe.WMode, Equals, "") c.Assert(safe.WTimeout, Equals, 0) c.Assert(safe.FSync, Equals, false) c.Assert(safe.J, Equals, false) // Tweak it session.SetSafe(&mgo.Safe{W: 1, WTimeout: 2, FSync: true}) safe = session.Safe() c.Assert(safe.W, Equals, 1) c.Assert(safe.WMode, Equals, "") c.Assert(safe.WTimeout, Equals, 2) c.Assert(safe.FSync, Equals, true) c.Assert(safe.J, Equals, false) // Reset it again. session.SetSafe(&mgo.Safe{}) safe = session.Safe() c.Assert(safe.W, Equals, 0) c.Assert(safe.WMode, Equals, "") c.Assert(safe.WTimeout, Equals, 0) c.Assert(safe.FSync, Equals, false) c.Assert(safe.J, Equals, false) // Ensure safety to something more conservative. session.SetSafe(&mgo.Safe{W: 5, WTimeout: 6, J: true}) safe = session.Safe() c.Assert(safe.W, Equals, 5) c.Assert(safe.WMode, Equals, "") c.Assert(safe.WTimeout, Equals, 6) c.Assert(safe.FSync, Equals, false) c.Assert(safe.J, Equals, true) // Ensure safety to something less conservative won't change it. session.EnsureSafe(&mgo.Safe{W: 4, WTimeout: 7}) safe = session.Safe() c.Assert(safe.W, Equals, 5) c.Assert(safe.WMode, Equals, "") c.Assert(safe.WTimeout, Equals, 6) c.Assert(safe.FSync, Equals, false) c.Assert(safe.J, Equals, true) // But to something more conservative will. session.EnsureSafe(&mgo.Safe{W: 6, WTimeout: 4, FSync: true}) safe = session.Safe() c.Assert(safe.W, Equals, 6) c.Assert(safe.WMode, Equals, "") c.Assert(safe.WTimeout, Equals, 4) c.Assert(safe.FSync, Equals, true) c.Assert(safe.J, Equals, false) // Even more conservative. session.EnsureSafe(&mgo.Safe{WMode: "majority", WTimeout: 2}) safe = session.Safe() c.Assert(safe.W, Equals, 0) c.Assert(safe.WMode, Equals, "majority") c.Assert(safe.WTimeout, Equals, 2) c.Assert(safe.FSync, Equals, true) c.Assert(safe.J, Equals, false) // WMode always overrides, whatever it is, but J doesn't. session.EnsureSafe(&mgo.Safe{WMode: "something", J: true}) safe = session.Safe() c.Assert(safe.W, Equals, 0) c.Assert(safe.WMode, Equals, "something") c.Assert(safe.WTimeout, Equals, 2) c.Assert(safe.FSync, Equals, true) c.Assert(safe.J, Equals, false) // EnsureSafe with nil does nothing. session.EnsureSafe(nil) safe = session.Safe() c.Assert(safe.W, Equals, 0) c.Assert(safe.WMode, Equals, "something") c.Assert(safe.WTimeout, Equals, 2) c.Assert(safe.FSync, Equals, true) c.Assert(safe.J, Equals, false) // Changing the safety of a cloned session doesn't touch the original. clone := session.Clone() defer clone.Close() clone.EnsureSafe(&mgo.Safe{WMode: "foo"}) safe = session.Safe() c.Assert(safe.WMode, Equals, "something") } func (s *S) TestSafeInsert(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") // Insert an element with a predefined key. err = coll.Insert(M{"_id": 1}) c.Assert(err, IsNil) mgo.ResetStats() // Session should be safe by default, so inserting it again must fail. err = coll.Insert(M{"_id": 1}) c.Assert(err, ErrorMatches, ".*E11000 duplicate.*") c.Assert(err.(*mgo.LastError).Code, Equals, 11000) // It must have sent two operations (INSERT_OP + getLastError QUERY_OP) stats := mgo.GetStats() if s.versionAtLeast(2, 6) { c.Assert(stats.SentOps, Equals, 1) } else { c.Assert(stats.SentOps, Equals, 2) } mgo.ResetStats() // If we disable safety, though, it won't complain. session.SetSafe(nil) err = coll.Insert(M{"_id": 1}) c.Assert(err, IsNil) // Must have sent a single operation this time (just the INSERT_OP) stats = mgo.GetStats() c.Assert(stats.SentOps, Equals, 1) } func (s *S) TestSafeParameters(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") // Tweak the safety parameters to something unachievable. session.SetSafe(&mgo.Safe{W: 4, WTimeout: 100}) err = coll.Insert(M{"_id": 1}) c.Assert(err, ErrorMatches, "timeout|timed out waiting for slaves|Not enough data-bearing nodes|waiting for replication timed out") // :-( if !s.versionAtLeast(2, 6) { // 2.6 turned it into a query error. c.Assert(err.(*mgo.LastError).WTimeout, Equals, true) } } func (s *S) TestQueryErrorOne(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") result := struct { Err string "$err" }{} err = coll.Find(M{"a": 1}).Select(M{"a": M{"b": 1}}).One(&result) c.Assert(err, ErrorMatches, ".*Unsupported projection option:.*") c.Assert(err.(*mgo.QueryError).Message, Matches, ".*Unsupported projection option:.*") if s.versionAtLeast(2, 6) { // Oh, the dance of error codes. :-( c.Assert(err.(*mgo.QueryError).Code, Equals, 17287) } else { c.Assert(err.(*mgo.QueryError).Code, Equals, 13097) } // The result should be properly unmarshalled with QueryError c.Assert(result.Err, Matches, ".*Unsupported projection option:.*") } func (s *S) TestQueryErrorNext(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") result := struct { Err string "$err" }{} iter := coll.Find(M{"a": 1}).Select(M{"a": M{"b": 1}}).Iter() ok := iter.Next(&result) c.Assert(ok, Equals, false) err = iter.Close() c.Assert(err, ErrorMatches, ".*Unsupported projection option:.*") c.Assert(err.(*mgo.QueryError).Message, Matches, ".*Unsupported projection option:.*") if s.versionAtLeast(2, 6) { // Oh, the dance of error codes. :-( c.Assert(err.(*mgo.QueryError).Code, Equals, 17287) } else { c.Assert(err.(*mgo.QueryError).Code, Equals, 13097) } c.Assert(iter.Err(), Equals, err) // The result should be properly unmarshalled with QueryError c.Assert(result.Err, Matches, ".*Unsupported projection option:.*") } var indexTests = []struct { index mgo.Index expected M }{{ mgo.Index{ Key: []string{"a"}, Background: true, }, M{ "name": "a_1", "key": M{"a": 1}, "ns": "mydb.mycoll", "background": true, }, }, { mgo.Index{ Key: []string{"a", "-b"}, Unique: true, DropDups: true, }, M{ "name": "a_1_b_-1", "key": M{"a": 1, "b": -1}, "ns": "mydb.mycoll", "unique": true, "dropDups": true, }, }, { mgo.Index{ Key: []string{"@loc_old"}, // Obsolete Min: -500, Max: 500, Bits: 32, }, M{ "name": "loc_old_2d", "key": M{"loc_old": "2d"}, "ns": "mydb.mycoll", "min": -500.0, "max": 500.0, "bits": 32, }, }, { mgo.Index{ Key: []string{"$2d:loc"}, Min: -500, Max: 500, Bits: 32, }, M{ "name": "loc_2d", "key": M{"loc": "2d"}, "ns": "mydb.mycoll", "min": -500.0, "max": 500.0, "bits": 32, }, }, { mgo.Index{ Key: []string{"$2d:loc"}, Minf: -500.1, Maxf: 500.1, Min: 1, // Should be ignored Max: 2, Bits: 32, }, M{ "name": "loc_2d", "key": M{"loc": "2d"}, "ns": "mydb.mycoll", "min": -500.1, "max": 500.1, "bits": 32, }, }, { mgo.Index{ Key: []string{"$geoHaystack:loc", "type"}, BucketSize: 1, }, M{ "name": "loc_geoHaystack_type_1", "key": M{"loc": "geoHaystack", "type": 1}, "ns": "mydb.mycoll", "bucketSize": 1.0, }, }, { mgo.Index{ Key: []string{"$text:a", "$text:b"}, Weights: map[string]int{"b": 42}, }, M{ "name": "a_text_b_text", "key": M{"_fts": "text", "_ftsx": 1}, "ns": "mydb.mycoll", "weights": M{"a": 1, "b": 42}, "default_language": "english", "language_override": "language", "textIndexVersion": 2, }, }, { mgo.Index{ Key: []string{"$text:a"}, DefaultLanguage: "portuguese", LanguageOverride: "idioma", }, M{ "name": "a_text", "key": M{"_fts": "text", "_ftsx": 1}, "ns": "mydb.mycoll", "weights": M{"a": 1}, "default_language": "portuguese", "language_override": "idioma", "textIndexVersion": 2, }, }, { mgo.Index{ Key: []string{"$text:$**"}, }, M{ "name": "$**_text", "key": M{"_fts": "text", "_ftsx": 1}, "ns": "mydb.mycoll", "weights": M{"$**": 1}, "default_language": "english", "language_override": "language", "textIndexVersion": 2, }, }, { mgo.Index{ Key: []string{"cn"}, Name: "CustomName", }, M{ "name": "CustomName", "key": M{"cn": 1}, "ns": "mydb.mycoll", }, }} func (s *S) TestEnsureIndex(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") idxs := session.DB("mydb").C("system.indexes") for _, test := range indexTests { err = coll.EnsureIndex(test.index) msg := "text search not enabled" if err != nil && strings.Contains(err.Error(), msg) { continue } c.Assert(err, IsNil) expectedName := test.index.Name if expectedName == "" { expectedName, _ = test.expected["name"].(string) } obtained := M{} err = idxs.Find(M{"name": expectedName}).One(obtained) c.Assert(err, IsNil) delete(obtained, "v") if s.versionAtLeast(2, 7) { // Was deprecated in 2.6, and not being reported by 2.7+. delete(test.expected, "dropDups") test.index.DropDups = false } c.Assert(obtained, DeepEquals, test.expected) // The result of Indexes must match closely what was used to create the index. indexes, err := coll.Indexes() c.Assert(err, IsNil) c.Assert(indexes, HasLen, 2) gotIndex := indexes[0] if gotIndex.Name == "_id_" { gotIndex = indexes[1] } wantIndex := test.index if wantIndex.Name == "" { wantIndex.Name = gotIndex.Name } if strings.HasPrefix(wantIndex.Key[0], "@") { wantIndex.Key[0] = "$2d:" + wantIndex.Key[0][1:] } if wantIndex.Minf == 0 && wantIndex.Maxf == 0 { wantIndex.Minf = float64(wantIndex.Min) wantIndex.Maxf = float64(wantIndex.Max) } else { wantIndex.Min = gotIndex.Min wantIndex.Max = gotIndex.Max } if wantIndex.DefaultLanguage == "" { wantIndex.DefaultLanguage = gotIndex.DefaultLanguage } if wantIndex.LanguageOverride == "" { wantIndex.LanguageOverride = gotIndex.LanguageOverride } for name, _ := range gotIndex.Weights { if _, ok := wantIndex.Weights[name]; !ok { if wantIndex.Weights == nil { wantIndex.Weights = make(map[string]int) } wantIndex.Weights[name] = 1 } } c.Assert(gotIndex, DeepEquals, wantIndex) // Drop created index by key or by name if a custom name was used. if test.index.Name == "" { err = coll.DropIndex(test.index.Key...) c.Assert(err, IsNil) } else { err = coll.DropIndexName(test.index.Name) c.Assert(err, IsNil) } } } func (s *S) TestEnsureIndexWithBadInfo(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.EnsureIndex(mgo.Index{}) c.Assert(err, ErrorMatches, "invalid index key:.*") err = coll.EnsureIndex(mgo.Index{Key: []string{""}}) c.Assert(err, ErrorMatches, "invalid index key:.*") } func (s *S) TestEnsureIndexWithUnsafeSession(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() session.SetSafe(nil) coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) // Should fail since there are duplicated entries. index := mgo.Index{ Key: []string{"a"}, Unique: true, } err = coll.EnsureIndex(index) c.Assert(err, ErrorMatches, ".*duplicate key error.*") } func (s *S) TestEnsureIndexKey(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.EnsureIndexKey("a") c.Assert(err, IsNil) err = coll.EnsureIndexKey("a", "-b") c.Assert(err, IsNil) sysidx := session.DB("mydb").C("system.indexes") result1 := M{} err = sysidx.Find(M{"name": "a_1"}).One(result1) c.Assert(err, IsNil) result2 := M{} err = sysidx.Find(M{"name": "a_1_b_-1"}).One(result2) c.Assert(err, IsNil) delete(result1, "v") expected1 := M{ "name": "a_1", "key": M{"a": 1}, "ns": "mydb.mycoll", } c.Assert(result1, DeepEquals, expected1) delete(result2, "v") expected2 := M{ "name": "a_1_b_-1", "key": M{"a": 1, "b": -1}, "ns": "mydb.mycoll", } c.Assert(result2, DeepEquals, expected2) } func (s *S) TestEnsureIndexDropIndex(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.EnsureIndexKey("a") c.Assert(err, IsNil) err = coll.EnsureIndexKey("-b") c.Assert(err, IsNil) err = coll.DropIndex("-b") c.Assert(err, IsNil) sysidx := session.DB("mydb").C("system.indexes") err = sysidx.Find(M{"name": "a_1"}).One(nil) c.Assert(err, IsNil) err = sysidx.Find(M{"name": "b_1"}).One(nil) c.Assert(err, Equals, mgo.ErrNotFound) err = coll.DropIndex("a") c.Assert(err, IsNil) err = sysidx.Find(M{"name": "a_1"}).One(nil) c.Assert(err, Equals, mgo.ErrNotFound) err = coll.DropIndex("a") c.Assert(err, ErrorMatches, "index not found.*") } func (s *S) TestEnsureIndexDropIndexName(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.EnsureIndexKey("a") c.Assert(err, IsNil) err = coll.EnsureIndex(mgo.Index{Key: []string{"b"}, Name: "a"}) c.Assert(err, IsNil) err = coll.DropIndexName("a") c.Assert(err, IsNil) sysidx := session.DB("mydb").C("system.indexes") err = sysidx.Find(M{"name": "a_1"}).One(nil) c.Assert(err, IsNil) err = sysidx.Find(M{"name": "a"}).One(nil) c.Assert(err, Equals, mgo.ErrNotFound) err = coll.DropIndexName("a_1") c.Assert(err, IsNil) err = sysidx.Find(M{"name": "a_1"}).One(nil) c.Assert(err, Equals, mgo.ErrNotFound) err = coll.DropIndexName("a_1") c.Assert(err, ErrorMatches, "index not found.*") } func (s *S) TestEnsureIndexCaching(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.EnsureIndexKey("a") c.Assert(err, IsNil) mgo.ResetStats() // Second EnsureIndex should be cached and do nothing. err = coll.EnsureIndexKey("a") c.Assert(err, IsNil) stats := mgo.GetStats() c.Assert(stats.SentOps, Equals, 0) // Resetting the cache should make it contact the server again. session.ResetIndexCache() err = coll.EnsureIndexKey("a") c.Assert(err, IsNil) stats = mgo.GetStats() c.Assert(stats.SentOps > 0, Equals, true) // Dropping the index should also drop the cached index key. err = coll.DropIndex("a") c.Assert(err, IsNil) mgo.ResetStats() err = coll.EnsureIndexKey("a") c.Assert(err, IsNil) stats = mgo.GetStats() c.Assert(stats.SentOps > 0, Equals, true) } func (s *S) TestEnsureIndexGetIndexes(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.EnsureIndexKey("-b") c.Assert(err, IsNil) err = coll.EnsureIndexKey("a") c.Assert(err, IsNil) // Obsolete. err = coll.EnsureIndexKey("@c") c.Assert(err, IsNil) err = coll.EnsureIndexKey("$2d:d") c.Assert(err, IsNil) // Try to exercise cursor logic. 2.8.0-rc3 still ignores this. session.SetBatch(2) indexes, err := coll.Indexes() c.Assert(err, IsNil) c.Assert(indexes[0].Name, Equals, "_id_") c.Assert(indexes[1].Name, Equals, "a_1") c.Assert(indexes[1].Key, DeepEquals, []string{"a"}) c.Assert(indexes[2].Name, Equals, "b_-1") c.Assert(indexes[2].Key, DeepEquals, []string{"-b"}) c.Assert(indexes[3].Name, Equals, "c_2d") c.Assert(indexes[3].Key, DeepEquals, []string{"$2d:c"}) c.Assert(indexes[4].Name, Equals, "d_2d") c.Assert(indexes[4].Key, DeepEquals, []string{"$2d:d"}) } func (s *S) TestEnsureIndexNameCaching(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.EnsureIndex(mgo.Index{Key: []string{"a"}, Name: "custom"}) c.Assert(err, IsNil) mgo.ResetStats() // Second EnsureIndex should be cached and do nothing. err = coll.EnsureIndexKey("a") c.Assert(err, IsNil) err = coll.EnsureIndex(mgo.Index{Key: []string{"a"}, Name: "custom"}) c.Assert(err, IsNil) stats := mgo.GetStats() c.Assert(stats.SentOps, Equals, 0) // Resetting the cache should make it contact the server again. session.ResetIndexCache() err = coll.EnsureIndex(mgo.Index{Key: []string{"a"}, Name: "custom"}) c.Assert(err, IsNil) stats = mgo.GetStats() c.Assert(stats.SentOps > 0, Equals, true) // Dropping the index should also drop the cached index key. err = coll.DropIndexName("custom") c.Assert(err, IsNil) mgo.ResetStats() err = coll.EnsureIndex(mgo.Index{Key: []string{"a"}, Name: "custom"}) c.Assert(err, IsNil) stats = mgo.GetStats() c.Assert(stats.SentOps > 0, Equals, true) } func (s *S) TestEnsureIndexEvalGetIndexes(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({b: -1})"}}, nil) c.Assert(err, IsNil) err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({a: 1})"}}, nil) c.Assert(err, IsNil) err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({c: -1, e: 1})"}}, nil) c.Assert(err, IsNil) err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({d: '2d'})"}}, nil) c.Assert(err, IsNil) indexes, err := coll.Indexes() c.Assert(err, IsNil) c.Assert(indexes[0].Name, Equals, "_id_") c.Assert(indexes[1].Name, Equals, "a_1") c.Assert(indexes[1].Key, DeepEquals, []string{"a"}) c.Assert(indexes[2].Name, Equals, "b_-1") c.Assert(indexes[2].Key, DeepEquals, []string{"-b"}) c.Assert(indexes[3].Name, Equals, "c_-1_e_1") c.Assert(indexes[3].Key, DeepEquals, []string{"-c", "e"}) if s.versionAtLeast(2, 2) { c.Assert(indexes[4].Name, Equals, "d_2d") c.Assert(indexes[4].Key, DeepEquals, []string{"$2d:d"}) } else { c.Assert(indexes[4].Name, Equals, "d_") c.Assert(indexes[4].Key, DeepEquals, []string{"$2d:d"}) } } var testTTL = flag.Bool("test-ttl", false, "test TTL collections (may take 1 minute)") func (s *S) TestEnsureIndexExpireAfter(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() session.SetSafe(nil) coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1, "t": time.Now().Add(-120 * time.Second)}) c.Assert(err, IsNil) err = coll.Insert(M{"n": 2, "t": time.Now()}) c.Assert(err, IsNil) // Should fail since there are duplicated entries. index := mgo.Index{ Key: []string{"t"}, ExpireAfter: 1 * time.Minute, } err = coll.EnsureIndex(index) c.Assert(err, IsNil) indexes, err := coll.Indexes() c.Assert(err, IsNil) c.Assert(indexes[1].Name, Equals, "t_1") c.Assert(indexes[1].ExpireAfter, Equals, 1*time.Minute) if *testTTL { worked := false stop := time.Now().Add(70 * time.Second) for time.Now().Before(stop) { n, err := coll.Count() c.Assert(err, IsNil) if n == 1 { worked = true break } c.Assert(n, Equals, 2) c.Logf("Still has 2 entries...") time.Sleep(1 * time.Second) } if !worked { c.Fatalf("TTL index didn't work") } } } func (s *S) TestDistinct(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { coll.Insert(M{"n": i}) } var result []int err = coll.Find(M{"n": M{"$gt": 2}}).Sort("n").Distinct("n", &result) sort.IntSlice(result).Sort() c.Assert(result, DeepEquals, []int{3, 4, 6}) } func (s *S) TestMapReduce(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { coll.Insert(M{"n": i}) } job := &mgo.MapReduce{ Map: "function() { emit(this.n, 1); }", Reduce: "function(key, values) { return Array.sum(values); }", } var result []struct { Id int "_id" Value int } info, err := coll.Find(M{"n": M{"$gt": 2}}).MapReduce(job, &result) c.Assert(err, IsNil) c.Assert(info.InputCount, Equals, 4) c.Assert(info.EmitCount, Equals, 4) c.Assert(info.OutputCount, Equals, 3) c.Assert(info.VerboseTime, IsNil) expected := map[int]int{3: 1, 4: 2, 6: 1} for _, item := range result { c.Logf("Item: %#v", &item) c.Assert(item.Value, Equals, expected[item.Id]) expected[item.Id] = -1 } } func (s *S) TestMapReduceFinalize(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { coll.Insert(M{"n": i}) } job := &mgo.MapReduce{ Map: "function() { emit(this.n, 1) }", Reduce: "function(key, values) { return Array.sum(values) }", Finalize: "function(key, count) { return {count: count} }", } var result []struct { Id int "_id" Value struct{ Count int } } _, err = coll.Find(nil).MapReduce(job, &result) c.Assert(err, IsNil) expected := map[int]int{1: 1, 2: 2, 3: 1, 4: 2, 6: 1} for _, item := range result { c.Logf("Item: %#v", &item) c.Assert(item.Value.Count, Equals, expected[item.Id]) expected[item.Id] = -1 } } func (s *S) TestMapReduceToCollection(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { coll.Insert(M{"n": i}) } job := &mgo.MapReduce{ Map: "function() { emit(this.n, 1); }", Reduce: "function(key, values) { return Array.sum(values); }", Out: "mr", } info, err := coll.Find(nil).MapReduce(job, nil) c.Assert(err, IsNil) c.Assert(info.InputCount, Equals, 7) c.Assert(info.EmitCount, Equals, 7) c.Assert(info.OutputCount, Equals, 5) c.Assert(info.Collection, Equals, "mr") c.Assert(info.Database, Equals, "mydb") expected := map[int]int{1: 1, 2: 2, 3: 1, 4: 2, 6: 1} var item *struct { Id int "_id" Value int } mr := session.DB("mydb").C("mr") iter := mr.Find(nil).Iter() for iter.Next(&item) { c.Logf("Item: %#v", &item) c.Assert(item.Value, Equals, expected[item.Id]) expected[item.Id] = -1 } c.Assert(iter.Close(), IsNil) } func (s *S) TestMapReduceToOtherDb(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { coll.Insert(M{"n": i}) } job := &mgo.MapReduce{ Map: "function() { emit(this.n, 1); }", Reduce: "function(key, values) { return Array.sum(values); }", Out: bson.D{{"replace", "mr"}, {"db", "otherdb"}}, } info, err := coll.Find(nil).MapReduce(job, nil) c.Assert(err, IsNil) c.Assert(info.InputCount, Equals, 7) c.Assert(info.EmitCount, Equals, 7) c.Assert(info.OutputCount, Equals, 5) c.Assert(info.Collection, Equals, "mr") c.Assert(info.Database, Equals, "otherdb") expected := map[int]int{1: 1, 2: 2, 3: 1, 4: 2, 6: 1} var item *struct { Id int "_id" Value int } mr := session.DB("otherdb").C("mr") iter := mr.Find(nil).Iter() for iter.Next(&item) { c.Logf("Item: %#v", &item) c.Assert(item.Value, Equals, expected[item.Id]) expected[item.Id] = -1 } c.Assert(iter.Close(), IsNil) } func (s *S) TestMapReduceOutOfOrder(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { coll.Insert(M{"n": i}) } job := &mgo.MapReduce{ Map: "function() { emit(this.n, 1); }", Reduce: "function(key, values) { return Array.sum(values); }", Out: bson.M{"a": "a", "z": "z", "replace": "mr", "db": "otherdb", "b": "b", "y": "y"}, } info, err := coll.Find(nil).MapReduce(job, nil) c.Assert(err, IsNil) c.Assert(info.Collection, Equals, "mr") c.Assert(info.Database, Equals, "otherdb") } func (s *S) TestMapReduceScope(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") coll.Insert(M{"n": 1}) job := &mgo.MapReduce{ Map: "function() { emit(this.n, x); }", Reduce: "function(key, values) { return Array.sum(values); }", Scope: M{"x": 42}, } var result []bson.M _, err = coll.Find(nil).MapReduce(job, &result) c.Assert(len(result), Equals, 1) c.Assert(result[0]["value"], Equals, 42.0) } func (s *S) TestMapReduceVerbose(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") for i := 0; i < 100; i++ { err = coll.Insert(M{"n": i}) c.Assert(err, IsNil) } job := &mgo.MapReduce{ Map: "function() { emit(this.n, 1); }", Reduce: "function(key, values) { return Array.sum(values); }", Verbose: true, } info, err := coll.Find(nil).MapReduce(job, nil) c.Assert(err, IsNil) c.Assert(info.VerboseTime, NotNil) } func (s *S) TestMapReduceLimit(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") for _, i := range []int{1, 4, 6, 2, 2, 3, 4} { coll.Insert(M{"n": i}) } job := &mgo.MapReduce{ Map: "function() { emit(this.n, 1); }", Reduce: "function(key, values) { return Array.sum(values); }", } var result []bson.M _, err = coll.Find(nil).Limit(3).MapReduce(job, &result) c.Assert(err, IsNil) c.Assert(len(result), Equals, 3) } func (s *S) TestBuildInfo(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() info, err := session.BuildInfo() c.Assert(err, IsNil) var v []int for i, a := range strings.Split(info.Version, ".") { for _, token := range []string{"-rc", "-pre"} { if i == 2 && strings.Contains(a, token) { a = a[:strings.Index(a, token)] info.VersionArray[len(info.VersionArray)-1] = 0 } } n, err := strconv.Atoi(a) c.Assert(err, IsNil) v = append(v, n) } for len(v) < 4 { v = append(v, 0) } c.Assert(info.VersionArray, DeepEquals, v) c.Assert(info.GitVersion, Matches, "[a-z0-9]+") c.Assert(info.SysInfo, Matches, ".*[0-9:]+.*") if info.Bits != 32 && info.Bits != 64 { c.Fatalf("info.Bits is %d", info.Bits) } if info.MaxObjectSize < 8192 { c.Fatalf("info.MaxObjectSize seems too small: %d", info.MaxObjectSize) } } func (s *S) TestZeroTimeRoundtrip(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() var d struct{ T time.Time } conn := session.DB("mydb").C("mycoll") err = conn.Insert(d) c.Assert(err, IsNil) var result bson.M err = conn.Find(nil).One(&result) c.Assert(err, IsNil) t, isTime := result["t"].(time.Time) c.Assert(isTime, Equals, true) c.Assert(t, Equals, time.Time{}) } func (s *S) TestFsyncLock(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() clone := session.Clone() defer clone.Close() err = session.FsyncLock() c.Assert(err, IsNil) done := make(chan time.Time) go func() { time.Sleep(3e9) now := time.Now() err := session.FsyncUnlock() c.Check(err, IsNil) done <- now }() err = clone.DB("mydb").C("mycoll").Insert(bson.M{"n": 1}) unlocked := time.Now() unlocking := <-done c.Assert(err, IsNil) c.Assert(unlocked.After(unlocking), Equals, true) c.Assert(unlocked.Sub(unlocking) < 1e9, Equals, true) } func (s *S) TestFsync(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() // Not much to do here. Just a smoke check. err = session.Fsync(false) c.Assert(err, IsNil) err = session.Fsync(true) c.Assert(err, IsNil) } func (s *S) TestRepairCursor(c *C) { if !s.versionAtLeast(2, 7) { c.Skip("RepairCursor only works on 2.7+") } session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() session.SetBatch(2) coll := session.DB("mydb").C("mycoll3") err = coll.DropCollection() ns := []int{0, 10, 20, 30, 40, 50} for _, n := range ns { coll.Insert(M{"n": n}) } repairIter := coll.Repair() c.Assert(repairIter.Err(), IsNil) result := struct{ N int }{} resultCounts := map[int]int{} for repairIter.Next(&result) { resultCounts[result.N]++ } c.Assert(repairIter.Next(&result), Equals, false) c.Assert(repairIter.Err(), IsNil) c.Assert(repairIter.Close(), IsNil) // Verify that the results of the repair cursor are valid. // The repair cursor can return multiple copies // of the same document, so to check correctness we only // need to verify that at least 1 of each document was returned. for _, key := range ns { c.Assert(resultCounts[key] > 0, Equals, true) } } func (s *S) TestPipeIter(c *C) { if !s.versionAtLeast(2, 1) { c.Skip("Pipe only works on 2.1+") } session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { coll.Insert(M{"n": n}) } pipe := coll.Pipe([]M{{"$match": M{"n": M{"$gte": 42}}}}) // Ensure cursor logic is working by forcing a small batch. pipe.Batch(2) // Smoke test for AllowDiskUse. pipe.AllowDiskUse() iter := pipe.Iter() result := struct{ N int }{} for i := 2; i < 7; i++ { ok := iter.Next(&result) c.Assert(ok, Equals, true) c.Assert(result.N, Equals, ns[i]) } c.Assert(iter.Next(&result), Equals, false) c.Assert(iter.Close(), IsNil) } func (s *S) TestPipeAll(c *C) { if !s.versionAtLeast(2, 1) { c.Skip("Pipe only works on 2.1+") } session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { err := coll.Insert(M{"n": n}) c.Assert(err, IsNil) } var result []struct{ N int } err = coll.Pipe([]M{{"$match": M{"n": M{"$gte": 42}}}}).All(&result) c.Assert(err, IsNil) for i := 2; i < 7; i++ { c.Assert(result[i-2].N, Equals, ns[i]) } } func (s *S) TestPipeOne(c *C) { if !s.versionAtLeast(2, 1) { c.Skip("Pipe only works on 2.1+") } session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") coll.Insert(M{"a": 1, "b": 2}) result := struct{ A, B int }{} pipe := coll.Pipe([]M{{"$project": M{"a": 1, "b": M{"$add": []interface{}{"$b", 1}}}}}) err = pipe.One(&result) c.Assert(err, IsNil) c.Assert(result.A, Equals, 1) c.Assert(result.B, Equals, 3) pipe = coll.Pipe([]M{{"$match": M{"a": 2}}}) err = pipe.One(&result) c.Assert(err, Equals, mgo.ErrNotFound) } func (s *S) TestPipeExplain(c *C) { if !s.versionAtLeast(2, 1) { c.Skip("Pipe only works on 2.1+") } session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") coll.Insert(M{"a": 1, "b": 2}) pipe := coll.Pipe([]M{{"$project": M{"a": 1, "b": M{"$add": []interface{}{"$b", 1}}}}}) // The explain command result changes across versions. var result struct{ Ok int } err = pipe.Explain(&result) c.Assert(err, IsNil) c.Assert(result.Ok, Equals, 1) } func (s *S) TestBatch1Bug(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") for i := 0; i < 3; i++ { err := coll.Insert(M{"n": i}) c.Assert(err, IsNil) } var ns []struct{ N int } err = coll.Find(nil).Batch(1).All(&ns) c.Assert(err, IsNil) c.Assert(len(ns), Equals, 3) session.SetBatch(1) err = coll.Find(nil).All(&ns) c.Assert(err, IsNil) c.Assert(len(ns), Equals, 3) } func (s *S) TestInterfaceIterBug(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") for i := 0; i < 3; i++ { err := coll.Insert(M{"n": i}) c.Assert(err, IsNil) } var result interface{} i := 0 iter := coll.Find(nil).Sort("n").Iter() for iter.Next(&result) { c.Assert(result.(bson.M)["n"], Equals, i) i++ } c.Assert(iter.Close(), IsNil) } func (s *S) TestFindIterCloseKillsCursor(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() cursors := serverCursorsOpen(session) coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { err = coll.Insert(M{"n": n}) c.Assert(err, IsNil) } iter := coll.Find(nil).Batch(2).Iter() c.Assert(iter.Next(bson.M{}), Equals, true) c.Assert(iter.Close(), IsNil) c.Assert(serverCursorsOpen(session), Equals, cursors) } func (s *S) TestLogReplay(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") for i := 0; i < 5; i++ { err = coll.Insert(M{"ts": time.Now()}) c.Assert(err, IsNil) } iter := coll.Find(nil).LogReplay().Iter() if s.versionAtLeast(2, 6) { // This used to fail in 2.4. Now it's just a smoke test. c.Assert(iter.Err(), IsNil) } else { c.Assert(iter.Next(bson.M{}), Equals, false) c.Assert(iter.Err(), ErrorMatches, "no ts field in query") } } func (s *S) TestSetCursorTimeout(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 42}) // This is just a smoke test. Won't wait 10 minutes for an actual timeout. session.SetCursorTimeout(0) var result struct{ N int } iter := coll.Find(nil).Iter() c.Assert(iter.Next(&result), Equals, true) c.Assert(result.N, Equals, 42) c.Assert(iter.Next(&result), Equals, false) } func (s *S) TestNewIterNoServer(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() data, err := bson.Marshal(bson.M{"a": 1}) coll := session.DB("mydb").C("mycoll") iter := coll.NewIter(nil, []bson.Raw{{3, data}}, 42, nil) var result struct{ A int } ok := iter.Next(&result) c.Assert(ok, Equals, true) c.Assert(result.A, Equals, 1) ok = iter.Next(&result) c.Assert(ok, Equals, false) c.Assert(iter.Err(), ErrorMatches, "server not available") } func (s *S) TestNewIterNoServerPresetErr(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() data, err := bson.Marshal(bson.M{"a": 1}) coll := session.DB("mydb").C("mycoll") iter := coll.NewIter(nil, []bson.Raw{{3, data}}, 42, fmt.Errorf("my error")) var result struct{ A int } ok := iter.Next(&result) c.Assert(ok, Equals, true) c.Assert(result.A, Equals, 1) ok = iter.Next(&result) c.Assert(ok, Equals, false) c.Assert(iter.Err(), ErrorMatches, "my error") } // -------------------------------------------------------------------------- // Some benchmarks that require a running database. func (s *S) BenchmarkFindIterRaw(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") doc := bson.D{ {"f2", "a short string"}, {"f3", bson.D{{"1", "one"}, {"2", 2.0}}}, {"f4", []string{"a", "b", "c", "d", "e", "f", "g"}}, } for i := 0; i < c.N+1; i++ { err := coll.Insert(doc) c.Assert(err, IsNil) } session.SetBatch(c.N) var raw bson.Raw iter := coll.Find(nil).Iter() iter.Next(&raw) c.ResetTimer() i := 0 for iter.Next(&raw) { i++ } c.StopTimer() c.Assert(iter.Err(), IsNil) c.Assert(i, Equals, c.N) } charm-2.1.1/src/gopkg.in/mgo.v2/testserver/0000775000175000017500000000000012672604565017431 5ustar marcomarcocharm-2.1.1/src/gopkg.in/mgo.v2/testserver/export_test.go0000664000175000017500000000023312672604565022336 0ustar marcomarcopackage testserver import ( "os" ) func (ts *TestServer) ProcessTest() *os.Process { if ts.server == nil { return nil } return ts.server.Process } charm-2.1.1/src/gopkg.in/mgo.v2/testserver/testserver.go0000664000175000017500000000713412672604565022173 0ustar marcomarco// WARNING: This package was replaced by mgo.v2/dbtest. package testserver import ( "bytes" "fmt" "net" "os" "os/exec" "strconv" "time" "gopkg.in/mgo.v2" "gopkg.in/tomb.v2" ) // WARNING: This package was replaced by mgo.v2/dbtest. type TestServer struct { session *mgo.Session output bytes.Buffer server *exec.Cmd dbpath string host string tomb tomb.Tomb } // WARNING: This package was replaced by mgo.v2/dbtest. func (ts *TestServer) SetPath(dbpath string) { ts.dbpath = dbpath } func (ts *TestServer) start() { if ts.server != nil { panic("TestServer already started") } if ts.dbpath == "" { panic("TestServer.SetPath must be called before using the server") } mgo.SetStats(true) l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { panic("unable to listen on a local address: " + err.Error()) } addr := l.Addr().(*net.TCPAddr) l.Close() ts.host = addr.String() args := []string{ "--dbpath", ts.dbpath, "--bind_ip", "127.0.0.1", "--port", strconv.Itoa(addr.Port), "--nssize", "1", "--noprealloc", "--smallfiles", "--nojournal", } ts.tomb = tomb.Tomb{} ts.server = exec.Command("mongod", args...) ts.server.Stdout = &ts.output ts.server.Stderr = &ts.output err = ts.server.Start() if err != nil { panic(err) } ts.tomb.Go(ts.monitor) ts.Wipe() } func (ts *TestServer) monitor() error { ts.server.Process.Wait() if ts.tomb.Alive() { // Present some debugging information. fmt.Fprintf(os.Stderr, "---- mongod process died unexpectedly:\n") fmt.Fprintf(os.Stderr, "%s", ts.output.Bytes()) fmt.Fprintf(os.Stderr, "---- mongod processes running right now:\n") cmd := exec.Command("/bin/sh", "-c", "ps auxw | grep mongod") cmd.Stdout = os.Stderr cmd.Stderr = os.Stderr cmd.Run() fmt.Fprintf(os.Stderr, "----------------------------------------\n") panic("mongod process died unexpectedly") } return nil } // WARNING: This package was replaced by mgo.v2/dbtest. func (ts *TestServer) Stop() { if ts.session != nil { ts.checkSessions() if ts.session != nil { ts.session.Close() ts.session = nil } } if ts.server != nil { ts.tomb.Kill(nil) ts.server.Process.Kill() select { case <-ts.tomb.Dead(): case <-time.After(5 * time.Second): panic("timeout waiting for mongod process to die") } ts.server = nil } } // WARNING: This package was replaced by mgo.v2/dbtest. func (ts *TestServer) Session() *mgo.Session { if ts.server == nil { ts.start() } if ts.session == nil { mgo.ResetStats() var err error ts.session, err = mgo.Dial(ts.host + "/test") if err != nil { panic(err) } } return ts.session.Copy() } // WARNING: This package was replaced by mgo.v2/dbtest. func (ts *TestServer) checkSessions() { if check := os.Getenv("CHECK_SESSIONS"); check == "0" || ts.server == nil || ts.session == nil { return } ts.session.Close() ts.session = nil for i := 0; i < 100; i++ { stats := mgo.GetStats() if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 { return } time.Sleep(100 * time.Millisecond) } panic("There are mgo sessions still alive.") } // WARNING: This package was replaced by mgo.v2/dbtest. func (ts *TestServer) Wipe() { if ts.server == nil || ts.session == nil { return } ts.checkSessions() sessionUnset := ts.session == nil session := ts.Session() defer session.Close() if sessionUnset { ts.session.Close() ts.session = nil } names, err := session.DatabaseNames() if err != nil { panic(err) } for _, name := range names { switch name { case "admin", "local", "config": default: err = session.DB(name).DropDatabase() if err != nil { panic(err) } } } } charm-2.1.1/src/gopkg.in/mgo.v2/testserver/testserver_test.go0000664000175000017500000000421412672604565023226 0ustar marcomarcopackage testserver_test import ( "os" "testing" "time" . "gopkg.in/check.v1" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/testserver" ) type M map[string]interface{} func TestAll(t *testing.T) { TestingT(t) } type S struct { oldCheckSessions string } var _ = Suite(&S{}) func (s *S) SetUpTest(c *C) { s.oldCheckSessions = os.Getenv("CHECK_SESSIONS") os.Setenv("CHECK_SESSIONS", "") } func (s *S) TearDownTest(c *C) { os.Setenv("CHECK_SESSIONS", s.oldCheckSessions) } func (s *S) TestWipeData(c *C) { var server testserver.TestServer server.SetPath(c.MkDir()) defer server.Stop() session := server.Session() err := session.DB("mydb").C("mycoll").Insert(M{"a": 1}) session.Close() c.Assert(err, IsNil) server.Wipe() session = server.Session() names, err := session.DatabaseNames() session.Close() c.Assert(err, IsNil) for _, name := range names { if name != "local" && name != "admin" { c.Fatalf("Wipe should have removed this database: %s", name) } } } func (s *S) TestStop(c *C) { var server testserver.TestServer server.SetPath(c.MkDir()) defer server.Stop() // Server should not be running. process := server.ProcessTest() c.Assert(process, IsNil) session := server.Session() addr := session.LiveServers()[0] session.Close() // Server should be running now. process = server.ProcessTest() p, err := os.FindProcess(process.Pid) c.Assert(err, IsNil) p.Release() server.Stop() // Server should not be running anymore. session, err = mgo.DialWithTimeout(addr, 500*time.Millisecond) if session != nil { session.Close() c.Fatalf("Stop did not stop the server") } } func (s *S) TestCheckSessions(c *C) { var server testserver.TestServer server.SetPath(c.MkDir()) defer server.Stop() session := server.Session() defer session.Close() c.Assert(server.Wipe, PanicMatches, "There are mgo sessions still alive.") } func (s *S) TestCheckSessionsDisabled(c *C) { var server testserver.TestServer server.SetPath(c.MkDir()) defer server.Stop() os.Setenv("CHECK_SESSIONS", "0") // Should not panic, although it looks to Wipe like this session will leak. session := server.Session() defer session.Close() server.Wipe() } charm-2.1.1/src/gopkg.in/mgo.v2/session.go0000664000175000017500000040610312672604565017241 0ustar marcomarco// mgo - MongoDB driver for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo import ( "crypto/md5" "encoding/hex" "errors" "fmt" "math" "net" "net/url" "reflect" "sort" "strconv" "strings" "sync" "time" "gopkg.in/mgo.v2/bson" ) type Mode int const ( // Relevant documentation on read preference modes: // // http://docs.mongodb.org/manual/reference/read-preference/ // Primary Mode = 2 // Default mode. All operations read from the current replica set primary. PrimaryPreferred Mode = 3 // Read from the primary if available. Read from the secondary otherwise. Secondary Mode = 4 // Read from one of the nearest secondary members of the replica set. SecondaryPreferred Mode = 5 // Read from one of the nearest secondaries if available. Read from primary otherwise. Nearest Mode = 6 // Read from one of the nearest members, irrespective of it being primary or secondary. // Read preference modes are specific to mgo: Eventual Mode = 0 // Same as Nearest, but may change servers between reads. Monotonic Mode = 1 // Same as SecondaryPreferred before first write. Same as Primary after first write. Strong Mode = 2 // Same as Primary. ) // When changing the Session type, check if newSession and copySession // need to be updated too. // Session represents a communication session with the database. // // All Session methods are concurrency-safe and may be called from multiple // goroutines. In all session modes but Eventual, using the session from // multiple goroutines will cause them to share the same underlying socket. // See the documentation on Session.SetMode for more details. type Session struct { m sync.RWMutex cluster_ *mongoCluster slaveSocket *mongoSocket masterSocket *mongoSocket slaveOk bool consistency Mode queryConfig query safeOp *queryOp syncTimeout time.Duration sockTimeout time.Duration defaultdb string sourcedb string dialCred *Credential creds []Credential poolLimit int } type Database struct { Session *Session Name string } type Collection struct { Database *Database Name string // "collection" FullName string // "db.collection" } type Query struct { m sync.Mutex session *Session query // Enables default settings in session. } type query struct { op queryOp prefetch float64 limit int32 } type getLastError struct { CmdName int "getLastError,omitempty" W interface{} "w,omitempty" WTimeout int "wtimeout,omitempty" FSync bool "fsync,omitempty" J bool "j,omitempty" } type Iter struct { m sync.Mutex gotReply sync.Cond session *Session server *mongoServer docData queue err error op getMoreOp prefetch float64 limit int32 docsToReceive int docsBeforeMore int timeout time.Duration timedout bool } var ( ErrNotFound = errors.New("not found") ErrCursor = errors.New("invalid cursor") ) const defaultPrefetch = 0.25 // Dial establishes a new session to the cluster identified by the given seed // server(s). The session will enable communication with all of the servers in // the cluster, so the seed servers are used only to find out about the cluster // topology. // // Dial will timeout after 10 seconds if a server isn't reached. The returned // session will timeout operations after one minute by default if servers // aren't available. To customize the timeout, see DialWithTimeout, // SetSyncTimeout, and SetSocketTimeout. // // This method is generally called just once for a given cluster. Further // sessions to the same cluster are then established using the New or Copy // methods on the obtained session. This will make them share the underlying // cluster, and manage the pool of connections appropriately. // // Once the session is not useful anymore, Close must be called to release the // resources appropriately. // // The seed servers must be provided in the following format: // // [mongodb://][user:pass@]host1[:port1][,host2[:port2],...][/database][?options] // // For example, it may be as simple as: // // localhost // // Or more involved like: // // mongodb://myuser:mypass@localhost:40001,otherhost:40001/mydb // // If the port number is not provided for a server, it defaults to 27017. // // The username and password provided in the URL will be used to authenticate // into the database named after the slash at the end of the host names, or // into the "admin" database if none is provided. The authentication information // will persist in sessions obtained through the New method as well. // // The following connection options are supported after the question mark: // // connect=direct // // Disables the automatic replica set server discovery logic, and // forces the use of servers provided only (even if secondaries). // Note that to talk to a secondary the consistency requirements // must be relaxed to Monotonic or Eventual via SetMode. // // // authSource= // // Informs the database used to establish credentials and privileges // with a MongoDB server. Defaults to the database name provided via // the URL path, and "admin" if that's unset. // // // authMechanism= // // Defines the protocol for credential negotiation. Defaults to "MONGODB-CR", // which is the default username/password challenge-response mechanism. // // // gssapiServiceName= // // Defines the service name to use when authenticating with the GSSAPI // mechanism. Defaults to "mongodb". // // maxPoolSize= // // Defines the per-server socket pool limit. Defaults to 4096. // See Session.SetPoolLimit for details. // // // Relevant documentation: // // http://docs.mongodb.org/manual/reference/connection-string/ // func Dial(url string) (*Session, error) { session, err := DialWithTimeout(url, 10*time.Second) if err == nil { session.SetSyncTimeout(1 * time.Minute) session.SetSocketTimeout(1 * time.Minute) } return session, err } // DialWithTimeout works like Dial, but uses timeout as the amount of time to // wait for a server to respond when first connecting and also on follow up // operations in the session. If timeout is zero, the call may block // forever waiting for a connection to be made. // // See SetSyncTimeout for customizing the timeout for the session. func DialWithTimeout(url string, timeout time.Duration) (*Session, error) { info, err := ParseURL(url) if err != nil { return nil, err } info.Timeout = timeout return DialWithInfo(info) } // ParseURL parses a MongoDB URL as accepted by the Dial function and returns // a value suitable for providing into DialWithInfo. // // See Dial for more details on the format of url. func ParseURL(url string) (*DialInfo, error) { uinfo, err := extractURL(url) if err != nil { return nil, err } direct := false mechanism := "" service := "" source := "" setName := "" poolLimit := 0 for k, v := range uinfo.options { switch k { case "authSource": source = v case "authMechanism": mechanism = v case "gssapiServiceName": service = v case "replicaSet": setName = v case "maxPoolSize": poolLimit, err = strconv.Atoi(v) if err != nil { return nil, errors.New("bad value for maxPoolSize: " + v) } case "connect": if v == "direct" { direct = true break } if v == "replicaSet" { break } fallthrough default: return nil, errors.New("unsupported connection URL option: " + k + "=" + v) } } info := DialInfo{ Addrs: uinfo.addrs, Direct: direct, Database: uinfo.db, Username: uinfo.user, Password: uinfo.pass, Mechanism: mechanism, Service: service, Source: source, PoolLimit: poolLimit, ReplicaSetName: setName, } return &info, nil } // DialInfo holds options for establishing a session with a MongoDB cluster. // To use a URL, see the Dial function. type DialInfo struct { // Addrs holds the addresses for the seed servers. Addrs []string // Direct informs whether to establish connections only with the // specified seed servers, or to obtain information for the whole // cluster and establish connections with further servers too. Direct bool // Timeout is the amount of time to wait for a server to respond when // first connecting and on follow up operations in the session. If // timeout is zero, the call may block forever waiting for a connection // to be established. Timeout time.Duration // FailFast will cause connection and query attempts to fail faster when // the server is unavailable, instead of retrying until the configured // timeout period. Note that an unavailable server may silently drop // packets instead of rejecting them, in which case it's impossible to // distinguish it from a slow server, so the timeout stays relevant. FailFast bool // Database is the default database name used when the Session.DB method // is called with an empty name, and is also used during the intial // authentication if Source is unset. Database string // ReplicaSetName, if specified, will prevent the obtained session from // communicating with any server which is not part of a replica set // with the given name. The default is to communicate with any server // specified or discovered via the servers contacted. ReplicaSetName string // Source is the database used to establish credentials and privileges // with a MongoDB server. Defaults to the value of Database, if that is // set, or "admin" otherwise. Source string // Service defines the service name to use when authenticating with the GSSAPI // mechanism. Defaults to "mongodb". Service string // ServiceHost defines which hostname to use when authenticating // with the GSSAPI mechanism. If not specified, defaults to the MongoDB // server's address. ServiceHost string // Mechanism defines the protocol for credential negotiation. // Defaults to "MONGODB-CR". Mechanism string // Username and Password inform the credentials for the initial authentication // done on the database defined by the Source field. See Session.Login. Username string Password string // PoolLimit defines the per-server socket pool limit. Defaults to 4096. // See Session.SetPoolLimit for details. PoolLimit int // DialServer optionally specifies the dial function for establishing // connections with the MongoDB servers. DialServer func(addr *ServerAddr) (net.Conn, error) // WARNING: This field is obsolete. See DialServer above. Dial func(addr net.Addr) (net.Conn, error) } // mgo.v3: Drop DialInfo.Dial. // ServerAddr represents the address for establishing a connection to an // individual MongoDB server. type ServerAddr struct { str string tcp *net.TCPAddr } // String returns the address that was provided for the server before resolution. func (addr *ServerAddr) String() string { return addr.str } // TCPAddr returns the resolved TCP address for the server. func (addr *ServerAddr) TCPAddr() *net.TCPAddr { return addr.tcp } // DialWithInfo establishes a new session to the cluster identified by info. func DialWithInfo(info *DialInfo) (*Session, error) { addrs := make([]string, len(info.Addrs)) for i, addr := range info.Addrs { p := strings.LastIndexAny(addr, "]:") if p == -1 || addr[p] != ':' { // XXX This is untested. The test suite doesn't use the standard port. addr += ":27017" } addrs[i] = addr } cluster := newCluster(addrs, info.Direct, info.FailFast, dialer{info.Dial, info.DialServer}, info.ReplicaSetName) session := newSession(Eventual, cluster, info.Timeout) session.defaultdb = info.Database if session.defaultdb == "" { session.defaultdb = "test" } session.sourcedb = info.Source if session.sourcedb == "" { session.sourcedb = info.Database if session.sourcedb == "" { session.sourcedb = "admin" } } if info.Username != "" { source := session.sourcedb if info.Source == "" && (info.Mechanism == "GSSAPI" || info.Mechanism == "PLAIN" || info.Mechanism == "MONGODB-X509") { source = "$external" } session.dialCred = &Credential{ Username: info.Username, Password: info.Password, Mechanism: info.Mechanism, Service: info.Service, ServiceHost: info.ServiceHost, Source: source, } session.creds = []Credential{*session.dialCred} } if info.PoolLimit > 0 { session.poolLimit = info.PoolLimit } cluster.Release() // People get confused when we return a session that is not actually // established to any servers yet (e.g. what if url was wrong). So, // ping the server to ensure there's someone there, and abort if it // fails. if err := session.Ping(); err != nil { session.Close() return nil, err } session.SetMode(Strong, true) return session, nil } func isOptSep(c rune) bool { return c == ';' || c == '&' } type urlInfo struct { addrs []string user string pass string db string options map[string]string } func extractURL(s string) (*urlInfo, error) { if strings.HasPrefix(s, "mongodb://") { s = s[10:] } info := &urlInfo{options: make(map[string]string)} if c := strings.Index(s, "?"); c != -1 { for _, pair := range strings.FieldsFunc(s[c+1:], isOptSep) { l := strings.SplitN(pair, "=", 2) if len(l) != 2 || l[0] == "" || l[1] == "" { return nil, errors.New("connection option must be key=value: " + pair) } info.options[l[0]] = l[1] } s = s[:c] } if c := strings.Index(s, "@"); c != -1 { pair := strings.SplitN(s[:c], ":", 2) if len(pair) > 2 || pair[0] == "" { return nil, errors.New("credentials must be provided as user:pass@host") } var err error info.user, err = url.QueryUnescape(pair[0]) if err != nil { return nil, fmt.Errorf("cannot unescape username in URL: %q", pair[0]) } if len(pair) > 1 { info.pass, err = url.QueryUnescape(pair[1]) if err != nil { return nil, fmt.Errorf("cannot unescape password in URL") } } s = s[c+1:] } if c := strings.Index(s, "/"); c != -1 { info.db = s[c+1:] s = s[:c] } info.addrs = strings.Split(s, ",") return info, nil } func newSession(consistency Mode, cluster *mongoCluster, timeout time.Duration) (session *Session) { cluster.Acquire() session = &Session{ cluster_: cluster, syncTimeout: timeout, sockTimeout: timeout, poolLimit: 4096, } debugf("New session %p on cluster %p", session, cluster) session.SetMode(consistency, true) session.SetSafe(&Safe{}) session.queryConfig.prefetch = defaultPrefetch return session } func copySession(session *Session, keepCreds bool) (s *Session) { cluster := session.cluster() cluster.Acquire() if session.masterSocket != nil { session.masterSocket.Acquire() } if session.slaveSocket != nil { session.slaveSocket.Acquire() } var creds []Credential if keepCreds { creds = make([]Credential, len(session.creds)) copy(creds, session.creds) } else if session.dialCred != nil { creds = []Credential{*session.dialCred} } scopy := *session scopy.m = sync.RWMutex{} scopy.creds = creds s = &scopy debugf("New session %p on cluster %p (copy from %p)", s, cluster, session) return s } // LiveServers returns a list of server addresses which are // currently known to be alive. func (s *Session) LiveServers() (addrs []string) { s.m.RLock() addrs = s.cluster().LiveServers() s.m.RUnlock() return addrs } // DB returns a value representing the named database. If name // is empty, the database name provided in the dialed URL is // used instead. If that is also empty, "test" is used as a // fallback in a way equivalent to the mongo shell. // // Creating this value is a very lightweight operation, and // involves no network communication. func (s *Session) DB(name string) *Database { if name == "" { name = s.defaultdb } return &Database{s, name} } // C returns a value representing the named collection. // // Creating this value is a very lightweight operation, and // involves no network communication. func (db *Database) C(name string) *Collection { return &Collection{db, name, db.Name + "." + name} } // With returns a copy of db that uses session s. func (db *Database) With(s *Session) *Database { newdb := *db newdb.Session = s return &newdb } // With returns a copy of c that uses session s. func (c *Collection) With(s *Session) *Collection { newdb := *c.Database newdb.Session = s newc := *c newc.Database = &newdb return &newc } // GridFS returns a GridFS value representing collections in db that // follow the standard GridFS specification. // The provided prefix (sometimes known as root) will determine which // collections to use, and is usually set to "fs" when there is a // single GridFS in the database. // // See the GridFS Create, Open, and OpenId methods for more details. // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/GridFS // http://www.mongodb.org/display/DOCS/GridFS+Tools // http://www.mongodb.org/display/DOCS/GridFS+Specification // func (db *Database) GridFS(prefix string) *GridFS { return newGridFS(db, prefix) } // Run issues the provided command on the db database and unmarshals // its result in the respective argument. The cmd argument may be either // a string with the command name itself, in which case an empty document of // the form bson.M{cmd: 1} will be used, or it may be a full command document. // // Note that MongoDB considers the first marshalled key as the command // name, so when providing a command with options, it's important to // use an ordering-preserving document, such as a struct value or an // instance of bson.D. For instance: // // db.Run(bson.D{{"create", "mycollection"}, {"size", 1024}}) // // For privilleged commands typically run on the "admin" database, see // the Run method in the Session type. // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/Commands // http://www.mongodb.org/display/DOCS/List+of+Database+CommandSkips // func (db *Database) Run(cmd interface{}, result interface{}) error { socket, err := db.Session.acquireSocket(true) if err != nil { return err } defer socket.Release() // This is an optimized form of db.C("$cmd").Find(cmd).One(result). return db.run(socket, cmd, result) } // Credential holds details to authenticate with a MongoDB server. type Credential struct { // Username and Password hold the basic details for authentication. // Password is optional with some authentication mechanisms. Username string Password string // Source is the database used to establish credentials and privileges // with a MongoDB server. Defaults to the default database provided // during dial, or "admin" if that was unset. Source string // Service defines the service name to use when authenticating with the GSSAPI // mechanism. Defaults to "mongodb". Service string // ServiceHost defines which hostname to use when authenticating // with the GSSAPI mechanism. If not specified, defaults to the MongoDB // server's address. ServiceHost string // Mechanism defines the protocol for credential negotiation. // Defaults to "MONGODB-CR". Mechanism string } // Login authenticates with MongoDB using the provided credential. The // authentication is valid for the whole session and will stay valid until // Logout is explicitly called for the same database, or the session is // closed. func (db *Database) Login(user, pass string) error { return db.Session.Login(&Credential{Username: user, Password: pass, Source: db.Name}) } // Login authenticates with MongoDB using the provided credential. The // authentication is valid for the whole session and will stay valid until // Logout is explicitly called for the same database, or the session is // closed. func (s *Session) Login(cred *Credential) error { socket, err := s.acquireSocket(true) if err != nil { return err } defer socket.Release() credCopy := *cred if cred.Source == "" { if cred.Mechanism == "GSSAPI" { credCopy.Source = "$external" } else { credCopy.Source = s.sourcedb } } err = socket.Login(credCopy) if err != nil { return err } s.m.Lock() s.creds = append(s.creds, credCopy) s.m.Unlock() return nil } func (s *Session) socketLogin(socket *mongoSocket) error { for _, cred := range s.creds { if err := socket.Login(cred); err != nil { return err } } return nil } // Logout removes any established authentication credentials for the database. func (db *Database) Logout() { session := db.Session dbname := db.Name session.m.Lock() found := false for i, cred := range session.creds { if cred.Source == dbname { copy(session.creds[i:], session.creds[i+1:]) session.creds = session.creds[:len(session.creds)-1] found = true break } } if found { if session.masterSocket != nil { session.masterSocket.Logout(dbname) } if session.slaveSocket != nil { session.slaveSocket.Logout(dbname) } } session.m.Unlock() } // LogoutAll removes all established authentication credentials for the session. func (s *Session) LogoutAll() { s.m.Lock() for _, cred := range s.creds { if s.masterSocket != nil { s.masterSocket.Logout(cred.Source) } if s.slaveSocket != nil { s.slaveSocket.Logout(cred.Source) } } s.creds = s.creds[0:0] s.m.Unlock() } // User represents a MongoDB user. // // Relevant documentation: // // http://docs.mongodb.org/manual/reference/privilege-documents/ // http://docs.mongodb.org/manual/reference/user-privileges/ // type User struct { // Username is how the user identifies itself to the system. Username string `bson:"user"` // Password is the plaintext password for the user. If set, // the UpsertUser method will hash it into PasswordHash and // unset it before the user is added to the database. Password string `bson:",omitempty"` // PasswordHash is the MD5 hash of Username+":mongo:"+Password. PasswordHash string `bson:"pwd,omitempty"` // CustomData holds arbitrary data admins decide to associate // with this user, such as the full name or employee id. CustomData interface{} `bson:"customData,omitempty"` // Roles indicates the set of roles the user will be provided. // See the Role constants. Roles []Role `bson:"roles"` // OtherDBRoles allows assigning roles in other databases from // user documents inserted in the admin database. This field // only works in the admin database. OtherDBRoles map[string][]Role `bson:"otherDBRoles,omitempty"` // UserSource indicates where to look for this user's credentials. // It may be set to a database name, or to "$external" for // consulting an external resource such as Kerberos. UserSource // must not be set if Password or PasswordHash are present. // // WARNING: This setting was only ever supported in MongoDB 2.4, // and is now obsolete. UserSource string `bson:"userSource,omitempty"` } type Role string const ( // Relevant documentation: // // http://docs.mongodb.org/manual/reference/user-privileges/ // RoleRoot Role = "root" RoleRead Role = "read" RoleReadAny Role = "readAnyDatabase" RoleReadWrite Role = "readWrite" RoleReadWriteAny Role = "readWriteAnyDatabase" RoleDBAdmin Role = "dbAdmin" RoleDBAdminAny Role = "dbAdminAnyDatabase" RoleUserAdmin Role = "userAdmin" RoleUserAdminAny Role = "userAdminAnyDatabase" RoleClusterAdmin Role = "clusterAdmin" ) // UpsertUser updates the authentication credentials and the roles for // a MongoDB user within the db database. If the named user doesn't exist // it will be created. // // This method should only be used from MongoDB 2.4 and on. For older // MongoDB releases, use the obsolete AddUser method instead. // // Relevant documentation: // // http://docs.mongodb.org/manual/reference/user-privileges/ // http://docs.mongodb.org/manual/reference/privilege-documents/ // func (db *Database) UpsertUser(user *User) error { if user.Username == "" { return fmt.Errorf("user has no Username") } if (user.Password != "" || user.PasswordHash != "") && user.UserSource != "" { return fmt.Errorf("user has both Password/PasswordHash and UserSource set") } if len(user.OtherDBRoles) > 0 && db.Name != "admin" && db.Name != "$external" { return fmt.Errorf("user with OtherDBRoles is only supported in the admin or $external databases") } // Attempt to run this using 2.6+ commands. rundb := db if user.UserSource != "" { // Compatibility logic for the userSource field of MongoDB <= 2.4.X rundb = db.Session.DB(user.UserSource) } err := rundb.runUserCmd("updateUser", user) // retry with createUser when isAuthError in order to enable the "localhost exception" if isNotFound(err) || isAuthError(err) { return rundb.runUserCmd("createUser", user) } if !isNoCmd(err) { return err } // Command does not exist. Fallback to pre-2.6 behavior. var set, unset bson.D if user.Password != "" { psum := md5.New() psum.Write([]byte(user.Username + ":mongo:" + user.Password)) set = append(set, bson.DocElem{"pwd", hex.EncodeToString(psum.Sum(nil))}) unset = append(unset, bson.DocElem{"userSource", 1}) } else if user.PasswordHash != "" { set = append(set, bson.DocElem{"pwd", user.PasswordHash}) unset = append(unset, bson.DocElem{"userSource", 1}) } if user.UserSource != "" { set = append(set, bson.DocElem{"userSource", user.UserSource}) unset = append(unset, bson.DocElem{"pwd", 1}) } if user.Roles != nil || user.OtherDBRoles != nil { set = append(set, bson.DocElem{"roles", user.Roles}) if len(user.OtherDBRoles) > 0 { set = append(set, bson.DocElem{"otherDBRoles", user.OtherDBRoles}) } else { unset = append(unset, bson.DocElem{"otherDBRoles", 1}) } } users := db.C("system.users") err = users.Update(bson.D{{"user", user.Username}}, bson.D{{"$unset", unset}, {"$set", set}}) if err == ErrNotFound { set = append(set, bson.DocElem{"user", user.Username}) if user.Roles == nil && user.OtherDBRoles == nil { // Roles must be sent, as it's the way MongoDB distinguishes // old-style documents from new-style documents in pre-2.6. set = append(set, bson.DocElem{"roles", user.Roles}) } err = users.Insert(set) } return err } func isNoCmd(err error) bool { e, ok := err.(*QueryError) return ok && (e.Code == 59 || e.Code == 13390 || strings.HasPrefix(e.Message, "no such cmd:")) } func isNotFound(err error) bool { e, ok := err.(*QueryError) return ok && e.Code == 11 } func isAuthError(err error) bool { e, ok := err.(*QueryError) return ok && e.Code == 13 } func (db *Database) runUserCmd(cmdName string, user *User) error { cmd := make(bson.D, 0, 16) cmd = append(cmd, bson.DocElem{cmdName, user.Username}) if user.Password != "" { cmd = append(cmd, bson.DocElem{"pwd", user.Password}) } var roles []interface{} for _, role := range user.Roles { roles = append(roles, role) } for db, dbroles := range user.OtherDBRoles { for _, role := range dbroles { roles = append(roles, bson.D{{"role", role}, {"db", db}}) } } if roles != nil || user.Roles != nil || cmdName == "createUser" { cmd = append(cmd, bson.DocElem{"roles", roles}) } err := db.Run(cmd, nil) if !isNoCmd(err) && user.UserSource != "" && (user.UserSource != "$external" || db.Name != "$external") { return fmt.Errorf("MongoDB 2.6+ does not support the UserSource setting") } return err } // AddUser creates or updates the authentication credentials of user within // the db database. // // WARNING: This method is obsolete and should only be used with MongoDB 2.2 // or earlier. For MongoDB 2.4 and on, use UpsertUser instead. func (db *Database) AddUser(username, password string, readOnly bool) error { // Try to emulate the old behavior on 2.6+ user := &User{Username: username, Password: password} if db.Name == "admin" { if readOnly { user.Roles = []Role{RoleReadAny} } else { user.Roles = []Role{RoleReadWriteAny} } } else { if readOnly { user.Roles = []Role{RoleRead} } else { user.Roles = []Role{RoleReadWrite} } } err := db.runUserCmd("updateUser", user) if isNotFound(err) { return db.runUserCmd("createUser", user) } if !isNoCmd(err) { return err } // Command doesn't exist. Fallback to pre-2.6 behavior. psum := md5.New() psum.Write([]byte(username + ":mongo:" + password)) digest := hex.EncodeToString(psum.Sum(nil)) c := db.C("system.users") _, err = c.Upsert(bson.M{"user": username}, bson.M{"$set": bson.M{"user": username, "pwd": digest, "readOnly": readOnly}}) return err } // RemoveUser removes the authentication credentials of user from the database. func (db *Database) RemoveUser(user string) error { err := db.Run(bson.D{{"dropUser", user}}, nil) if isNoCmd(err) { users := db.C("system.users") return users.Remove(bson.M{"user": user}) } if isNotFound(err) { return ErrNotFound } return err } type indexSpec struct { Name, NS string Key bson.D Unique bool ",omitempty" DropDups bool "dropDups,omitempty" Background bool ",omitempty" Sparse bool ",omitempty" Bits int ",omitempty" Min, Max float64 ",omitempty" BucketSize float64 "bucketSize,omitempty" ExpireAfter int "expireAfterSeconds,omitempty" Weights bson.D ",omitempty" DefaultLanguage string "default_language,omitempty" LanguageOverride string "language_override,omitempty" TextIndexVersion int "textIndexVersion,omitempty" } type Index struct { Key []string // Index key fields; prefix name with dash (-) for descending order Unique bool // Prevent two documents from having the same index key DropDups bool // Drop documents with the same index key as a previously indexed one Background bool // Build index in background and return immediately Sparse bool // Only index documents containing the Key fields // If ExpireAfter is defined the server will periodically delete // documents with indexed time.Time older than the provided delta. ExpireAfter time.Duration // Name holds the stored index name. On creation if this field is unset it is // computed by EnsureIndex based on the index key. Name string // Properties for spatial indexes. // // Min and Max were improperly typed as int when they should have been // floats. To preserve backwards compatibility they are still typed as // int and the following two fields enable reading and writing the same // fields as float numbers. In mgo.v3, these fields will be dropped and // Min/Max will become floats. Min, Max int Minf, Maxf float64 BucketSize float64 Bits int // Properties for text indexes. DefaultLanguage string LanguageOverride string // Weights defines the significance of provided fields relative to other // fields in a text index. The score for a given word in a document is derived // from the weighted sum of the frequency for each of the indexed fields in // that document. The default field weight is 1. Weights map[string]int } // mgo.v3: Drop Minf and Maxf and transform Min and Max to floats. type indexKeyInfo struct { name string key bson.D weights bson.D } func parseIndexKey(key []string) (*indexKeyInfo, error) { var keyInfo indexKeyInfo isText := false var order interface{} for _, field := range key { raw := field if keyInfo.name != "" { keyInfo.name += "_" } var kind string if field != "" { if field[0] == '$' { if c := strings.Index(field, ":"); c > 1 && c < len(field)-1 { kind = field[1:c] field = field[c+1:] keyInfo.name += field + "_" + kind } else { field = "\x00" } } switch field[0] { case 0: // Logic above failed. Reset and error. field = "" case '@': order = "2d" field = field[1:] // The shell used to render this field as key_ instead of key_2d, // and mgo followed suit. This has been fixed in recent server // releases, and mgo followed as well. keyInfo.name += field + "_2d" case '-': order = -1 field = field[1:] keyInfo.name += field + "_-1" case '+': field = field[1:] fallthrough default: if kind == "" { order = 1 keyInfo.name += field + "_1" } else { order = kind } } } if field == "" || kind != "" && order != kind { return nil, fmt.Errorf(`invalid index key: want "[$:][-]", got %q`, raw) } if kind == "text" { if !isText { keyInfo.key = append(keyInfo.key, bson.DocElem{"_fts", "text"}, bson.DocElem{"_ftsx", 1}) isText = true } keyInfo.weights = append(keyInfo.weights, bson.DocElem{field, 1}) } else { keyInfo.key = append(keyInfo.key, bson.DocElem{field, order}) } } if keyInfo.name == "" { return nil, errors.New("invalid index key: no fields provided") } return &keyInfo, nil } // EnsureIndexKey ensures an index with the given key exists, creating it // if necessary. // // This example: // // err := collection.EnsureIndexKey("a", "b") // // Is equivalent to: // // err := collection.EnsureIndex(mgo.Index{Key: []string{"a", "b"}}) // // See the EnsureIndex method for more details. func (c *Collection) EnsureIndexKey(key ...string) error { return c.EnsureIndex(Index{Key: key}) } // EnsureIndex ensures an index with the given key exists, creating it with // the provided parameters if necessary. EnsureIndex does not modify a previously // existent index with a matching key. The old index must be dropped first instead. // // Once EnsureIndex returns successfully, following requests for the same index // will not contact the server unless Collection.DropIndex is used to drop the // same index, or Session.ResetIndexCache is called. // // For example: // // index := Index{ // Key: []string{"lastname", "firstname"}, // Unique: true, // DropDups: true, // Background: true, // See notes. // Sparse: true, // } // err := collection.EnsureIndex(index) // // The Key value determines which fields compose the index. The index ordering // will be ascending by default. To obtain an index with a descending order, // the field name should be prefixed by a dash (e.g. []string{"-time"}). It can // also be optionally prefixed by an index kind, as in "$text:summary" or // "$2d:-point". The key string format is: // // [$:][-] // // If the Unique field is true, the index must necessarily contain only a single // document per Key. With DropDups set to true, documents with the same key // as a previously indexed one will be dropped rather than an error returned. // // If Background is true, other connections will be allowed to proceed using // the collection without the index while it's being built. Note that the // session executing EnsureIndex will be blocked for as long as it takes for // the index to be built. // // If Sparse is true, only documents containing the provided Key fields will be // included in the index. When using a sparse index for sorting, only indexed // documents will be returned. // // If ExpireAfter is non-zero, the server will periodically scan the collection // and remove documents containing an indexed time.Time field with a value // older than ExpireAfter. See the documentation for details: // // http://docs.mongodb.org/manual/tutorial/expire-data // // Other kinds of indexes are also supported through that API. Here is an example: // // index := Index{ // Key: []string{"$2d:loc"}, // Bits: 26, // } // err := collection.EnsureIndex(index) // // The example above requests the creation of a "2d" index for the "loc" field. // // The 2D index bounds may be changed using the Min and Max attributes of the // Index value. The default bound setting of (-180, 180) is suitable for // latitude/longitude pairs. // // The Bits parameter sets the precision of the 2D geohash values. If not // provided, 26 bits are used, which is roughly equivalent to 1 foot of // precision for the default (-180, 180) index bounds. // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/Indexes // http://www.mongodb.org/display/DOCS/Indexing+Advice+and+FAQ // http://www.mongodb.org/display/DOCS/Indexing+as+a+Background+Operation // http://www.mongodb.org/display/DOCS/Geospatial+Indexing // http://www.mongodb.org/display/DOCS/Multikeys // func (c *Collection) EnsureIndex(index Index) error { keyInfo, err := parseIndexKey(index.Key) if err != nil { return err } session := c.Database.Session cacheKey := c.FullName + "\x00" + keyInfo.name if session.cluster().HasCachedIndex(cacheKey) { return nil } spec := indexSpec{ Name: keyInfo.name, NS: c.FullName, Key: keyInfo.key, Unique: index.Unique, DropDups: index.DropDups, Background: index.Background, Sparse: index.Sparse, Bits: index.Bits, Min: index.Minf, Max: index.Maxf, BucketSize: index.BucketSize, ExpireAfter: int(index.ExpireAfter / time.Second), Weights: keyInfo.weights, DefaultLanguage: index.DefaultLanguage, LanguageOverride: index.LanguageOverride, } if spec.Min == 0 && spec.Max == 0 { spec.Min = float64(index.Min) spec.Max = float64(index.Max) } if index.Name != "" { spec.Name = index.Name } NextField: for name, weight := range index.Weights { for i, elem := range spec.Weights { if elem.Name == name { spec.Weights[i].Value = weight continue NextField } } panic("weight provided for field that is not part of index key: " + name) } cloned := session.Clone() defer cloned.Close() cloned.SetMode(Strong, false) cloned.EnsureSafe(&Safe{}) db := c.Database.With(cloned) // Try with a command first. err = db.Run(bson.D{{"createIndexes", c.Name}, {"indexes", []indexSpec{spec}}}, nil) if isNoCmd(err) { // Command not yet supported. Insert into the indexes collection instead. err = db.C("system.indexes").Insert(&spec) } if err == nil { session.cluster().CacheIndex(cacheKey, true) } return err } // DropIndex drops the index with the provided key from the c collection. // // See EnsureIndex for details on the accepted key variants. // // For example: // // err1 := collection.DropIndex("firstField", "-secondField") // err2 := collection.DropIndex("customIndexName") // func (c *Collection) DropIndex(key ...string) error { keyInfo, err := parseIndexKey(key) if err != nil { return err } session := c.Database.Session cacheKey := c.FullName + "\x00" + keyInfo.name session.cluster().CacheIndex(cacheKey, false) session = session.Clone() defer session.Close() session.SetMode(Strong, false) db := c.Database.With(session) result := struct { ErrMsg string Ok bool }{} err = db.Run(bson.D{{"dropIndexes", c.Name}, {"index", keyInfo.name}}, &result) if err != nil { return err } if !result.Ok { return errors.New(result.ErrMsg) } return nil } // DropIndexName removes the index with the provided index name. // // For example: // // err := collection.DropIndex("customIndexName") // func (c *Collection) DropIndexName(name string) error { session := c.Database.Session session = session.Clone() defer session.Close() session.SetMode(Strong, false) c = c.With(session) indexes, err := c.Indexes() if err != nil { return err } var index Index for _, idx := range indexes { if idx.Name == name { index = idx break } } if index.Name != "" { keyInfo, err := parseIndexKey(index.Key) if err != nil { return err } cacheKey := c.FullName + "\x00" + keyInfo.name session.cluster().CacheIndex(cacheKey, false) } result := struct { ErrMsg string Ok bool }{} err = c.Database.Run(bson.D{{"dropIndexes", c.Name}, {"index", name}}, &result) if err != nil { return err } if !result.Ok { return errors.New(result.ErrMsg) } return nil } // Indexes returns a list of all indexes for the collection. // // For example, this snippet would drop all available indexes: // // indexes, err := collection.Indexes() // if err != nil { // return err // } // for _, index := range indexes { // err = collection.DropIndex(index.Key...) // if err != nil { // return err // } // } // // See the EnsureIndex method for more details on indexes. func (c *Collection) Indexes() (indexes []Index, err error) { // Clone session and set it to Monotonic mode so that the server // used for the query may be safely obtained afterwards, if // necessary for iteration when a cursor is received. session := c.Database.Session cloned := session.Clone() cloned.SetMode(Monotonic, false) defer cloned.Close() batchSize := int(cloned.queryConfig.op.limit) // Try with a command. var result struct { Indexes []bson.Raw Cursor struct { FirstBatch []bson.Raw "firstBatch" NS string Id int64 } } var iter *Iter err = c.Database.With(cloned).Run(bson.D{{"listIndexes", c.Name}, {"cursor", bson.D{{"batchSize", batchSize}}}}, &result) if err == nil { firstBatch := result.Indexes if firstBatch == nil { firstBatch = result.Cursor.FirstBatch } ns := strings.SplitN(result.Cursor.NS, ".", 2) if len(ns) < 2 { iter = c.With(cloned).NewIter(nil, firstBatch, result.Cursor.Id, nil) } else { iter = cloned.DB(ns[0]).C(ns[1]).NewIter(nil, firstBatch, result.Cursor.Id, nil) } } else if isNoCmd(err) { // Command not yet supported. Query the database instead. iter = c.Database.C("system.indexes").Find(bson.M{"ns": c.FullName}).Iter() } else { return nil, err } var spec indexSpec for iter.Next(&spec) { indexes = append(indexes, indexFromSpec(spec)) } if err = iter.Close(); err != nil { return nil, err } sort.Sort(indexSlice(indexes)) return indexes, nil } func indexFromSpec(spec indexSpec) Index { index := Index{ Name: spec.Name, Key: simpleIndexKey(spec.Key), Unique: spec.Unique, DropDups: spec.DropDups, Background: spec.Background, Sparse: spec.Sparse, Minf: spec.Min, Maxf: spec.Max, Bits: spec.Bits, BucketSize: spec.BucketSize, DefaultLanguage: spec.DefaultLanguage, LanguageOverride: spec.LanguageOverride, ExpireAfter: time.Duration(spec.ExpireAfter) * time.Second, } if float64(int(spec.Min)) == spec.Min && float64(int(spec.Max)) == spec.Max { index.Min = int(spec.Min) index.Max = int(spec.Max) } if spec.TextIndexVersion > 0 { index.Key = make([]string, len(spec.Weights)) index.Weights = make(map[string]int) for i, elem := range spec.Weights { index.Key[i] = "$text:" + elem.Name if w, ok := elem.Value.(int); ok { index.Weights[elem.Name] = w } } } return index } type indexSlice []Index func (idxs indexSlice) Len() int { return len(idxs) } func (idxs indexSlice) Less(i, j int) bool { return idxs[i].Name < idxs[j].Name } func (idxs indexSlice) Swap(i, j int) { idxs[i], idxs[j] = idxs[j], idxs[i] } func simpleIndexKey(realKey bson.D) (key []string) { for i := range realKey { field := realKey[i].Name vi, ok := realKey[i].Value.(int) if !ok { vf, _ := realKey[i].Value.(float64) vi = int(vf) } if vi == 1 { key = append(key, field) continue } if vi == -1 { key = append(key, "-"+field) continue } if vs, ok := realKey[i].Value.(string); ok { key = append(key, "$"+vs+":"+field) continue } panic("Got unknown index key type for field " + field) } return } // ResetIndexCache() clears the cache of previously ensured indexes. // Following requests to EnsureIndex will contact the server. func (s *Session) ResetIndexCache() { s.cluster().ResetIndexCache() } // New creates a new session with the same parameters as the original // session, including consistency, batch size, prefetching, safety mode, // etc. The returned session will use sockets from the pool, so there's // a chance that writes just performed in another session may not yet // be visible. // // Login information from the original session will not be copied over // into the new session unless it was provided through the initial URL // for the Dial function. // // See the Copy and Clone methods. // func (s *Session) New() *Session { s.m.Lock() scopy := copySession(s, false) s.m.Unlock() scopy.Refresh() return scopy } // Copy works just like New, but preserves the exact authentication // information from the original session. func (s *Session) Copy() *Session { s.m.Lock() scopy := copySession(s, true) s.m.Unlock() scopy.Refresh() return scopy } // Clone works just like Copy, but also reuses the same socket as the original // session, in case it had already reserved one due to its consistency // guarantees. This behavior ensures that writes performed in the old session // are necessarily observed when using the new session, as long as it was a // strong or monotonic session. That said, it also means that long operations // may cause other goroutines using the original session to wait. func (s *Session) Clone() *Session { s.m.Lock() scopy := copySession(s, true) s.m.Unlock() return scopy } // Close terminates the session. It's a runtime error to use a session // after it has been closed. func (s *Session) Close() { s.m.Lock() if s.cluster_ != nil { debugf("Closing session %p", s) s.unsetSocket() s.cluster_.Release() s.cluster_ = nil } s.m.Unlock() } func (s *Session) cluster() *mongoCluster { if s.cluster_ == nil { panic("Session already closed") } return s.cluster_ } // Refresh puts back any reserved sockets in use and restarts the consistency // guarantees according to the current consistency setting for the session. func (s *Session) Refresh() { s.m.Lock() s.slaveOk = s.consistency != Strong s.unsetSocket() s.m.Unlock() } // SetMode changes the consistency mode for the session. // // In the Strong consistency mode reads and writes will always be made to // the primary server using a unique connection so that reads and writes are // fully consistent, ordered, and observing the most up-to-date data. // This offers the least benefits in terms of distributing load, but the // most guarantees. See also Monotonic and Eventual. // // In the Monotonic consistency mode reads may not be entirely up-to-date, // but they will always see the history of changes moving forward, the data // read will be consistent across sequential queries in the same session, // and modifications made within the session will be observed in following // queries (read-your-writes). // // In practice, the Monotonic mode is obtained by performing initial reads // on a unique connection to an arbitrary secondary, if one is available, // and once the first write happens, the session connection is switched over // to the primary server. This manages to distribute some of the reading // load with secondaries, while maintaining some useful guarantees. // // In the Eventual consistency mode reads will be made to any secondary in the // cluster, if one is available, and sequential reads will not necessarily // be made with the same connection. This means that data may be observed // out of order. Writes will of course be issued to the primary, but // independent writes in the same Eventual session may also be made with // independent connections, so there are also no guarantees in terms of // write ordering (no read-your-writes guarantees either). // // The Eventual mode is the fastest and most resource-friendly, but is // also the one offering the least guarantees about ordering of the data // read and written. // // If refresh is true, in addition to ensuring the session is in the given // consistency mode, the consistency guarantees will also be reset (e.g. // a Monotonic session will be allowed to read from secondaries again). // This is equivalent to calling the Refresh function. // // Shifting between Monotonic and Strong modes will keep a previously // reserved connection for the session unless refresh is true or the // connection is unsuitable (to a secondary server in a Strong session). func (s *Session) SetMode(consistency Mode, refresh bool) { s.m.Lock() debugf("Session %p: setting mode %d with refresh=%v (master=%p, slave=%p)", s, consistency, refresh, s.masterSocket, s.slaveSocket) s.consistency = consistency if refresh { s.slaveOk = s.consistency != Strong s.unsetSocket() } else if s.consistency == Strong { s.slaveOk = false } else if s.masterSocket == nil { s.slaveOk = true } s.m.Unlock() } // Mode returns the current consistency mode for the session. func (s *Session) Mode() Mode { s.m.RLock() mode := s.consistency s.m.RUnlock() return mode } // SetSyncTimeout sets the amount of time an operation with this session // will wait before returning an error in case a connection to a usable // server can't be established. Set it to zero to wait forever. The // default value is 7 seconds. func (s *Session) SetSyncTimeout(d time.Duration) { s.m.Lock() s.syncTimeout = d s.m.Unlock() } // SetSocketTimeout sets the amount of time to wait for a non-responding // socket to the database before it is forcefully closed. func (s *Session) SetSocketTimeout(d time.Duration) { s.m.Lock() s.sockTimeout = d if s.masterSocket != nil { s.masterSocket.SetTimeout(d) } if s.slaveSocket != nil { s.slaveSocket.SetTimeout(d) } s.m.Unlock() } // SetCursorTimeout changes the standard timeout period that the server // enforces on created cursors. The only supported value right now is // 0, which disables the timeout. The standard server timeout is 10 minutes. func (s *Session) SetCursorTimeout(d time.Duration) { s.m.Lock() if d == 0 { s.queryConfig.op.flags |= flagNoCursorTimeout } else { panic("SetCursorTimeout: only 0 (disable timeout) supported for now") } s.m.Unlock() } // SetPoolLimit sets the maximum number of sockets in use in a single server // before this session will block waiting for a socket to be available. // The default limit is 4096. // // This limit must be set to cover more than any expected workload of the // application. It is a bad practice and an unsupported use case to use the // database driver to define the concurrency limit of an application. Prevent // such concurrency "at the door" instead, by properly restricting the amount // of used resources and number of goroutines before they are created. func (s *Session) SetPoolLimit(limit int) { s.m.Lock() s.poolLimit = limit s.m.Unlock() } // SetBatch sets the default batch size used when fetching documents from the // database. It's possible to change this setting on a per-query basis as // well, using the Query.Batch method. // // The default batch size is defined by the database itself. As of this // writing, MongoDB will use an initial size of min(100 docs, 4MB) on the // first batch, and 4MB on remaining ones. func (s *Session) SetBatch(n int) { if n == 1 { // Server interprets 1 as -1 and closes the cursor (!?) n = 2 } s.m.Lock() s.queryConfig.op.limit = int32(n) s.m.Unlock() } // SetPrefetch sets the default point at which the next batch of results will be // requested. When there are p*batch_size remaining documents cached in an // Iter, the next batch will be requested in background. For instance, when // using this: // // session.SetBatch(200) // session.SetPrefetch(0.25) // // and there are only 50 documents cached in the Iter to be processed, the // next batch of 200 will be requested. It's possible to change this setting on // a per-query basis as well, using the Prefetch method of Query. // // The default prefetch value is 0.25. func (s *Session) SetPrefetch(p float64) { s.m.Lock() s.queryConfig.prefetch = p s.m.Unlock() } // See SetSafe for details on the Safe type. type Safe struct { W int // Min # of servers to ack before success WMode string // Write mode for MongoDB 2.0+ (e.g. "majority") WTimeout int // Milliseconds to wait for W before timing out FSync bool // Should servers sync to disk before returning success J bool // Wait for next group commit if journaling; no effect otherwise } // Safe returns the current safety mode for the session. func (s *Session) Safe() (safe *Safe) { s.m.Lock() defer s.m.Unlock() if s.safeOp != nil { cmd := s.safeOp.query.(*getLastError) safe = &Safe{WTimeout: cmd.WTimeout, FSync: cmd.FSync, J: cmd.J} switch w := cmd.W.(type) { case string: safe.WMode = w case int: safe.W = w } } return } // SetSafe changes the session safety mode. // // If the safe parameter is nil, the session is put in unsafe mode, and writes // become fire-and-forget, without error checking. The unsafe mode is faster // since operations won't hold on waiting for a confirmation. // // If the safe parameter is not nil, any changing query (insert, update, ...) // will be followed by a getLastError command with the specified parameters, // to ensure the request was correctly processed. // // The safe.W parameter determines how many servers should confirm a write // before the operation is considered successful. If set to 0 or 1, the // command will return as soon as the primary is done with the request. // If safe.WTimeout is greater than zero, it determines how many milliseconds // to wait for the safe.W servers to respond before returning an error. // // Starting with MongoDB 2.0.0 the safe.WMode parameter can be used instead // of W to request for richer semantics. If set to "majority" the server will // wait for a majority of members from the replica set to respond before // returning. Custom modes may also be defined within the server to create // very detailed placement schemas. See the data awareness documentation in // the links below for more details (note that MongoDB internally reuses the // "w" field name for WMode). // // If safe.FSync is true and journaling is disabled, the servers will be // forced to sync all files to disk immediately before returning. If the // same option is true but journaling is enabled, the server will instead // await for the next group commit before returning. // // Since MongoDB 2.0.0, the safe.J option can also be used instead of FSync // to force the server to wait for a group commit in case journaling is // enabled. The option has no effect if the server has journaling disabled. // // For example, the following statement will make the session check for // errors, without imposing further constraints: // // session.SetSafe(&mgo.Safe{}) // // The following statement will force the server to wait for a majority of // members of a replica set to return (MongoDB 2.0+ only): // // session.SetSafe(&mgo.Safe{WMode: "majority"}) // // The following statement, on the other hand, ensures that at least two // servers have flushed the change to disk before confirming the success // of operations: // // session.EnsureSafe(&mgo.Safe{W: 2, FSync: true}) // // The following statement, on the other hand, disables the verification // of errors entirely: // // session.SetSafe(nil) // // See also the EnsureSafe method. // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/getLastError+Command // http://www.mongodb.org/display/DOCS/Verifying+Propagation+of+Writes+with+getLastError // http://www.mongodb.org/display/DOCS/Data+Center+Awareness // func (s *Session) SetSafe(safe *Safe) { s.m.Lock() s.safeOp = nil s.ensureSafe(safe) s.m.Unlock() } // EnsureSafe compares the provided safety parameters with the ones // currently in use by the session and picks the most conservative // choice for each setting. // // That is: // // - safe.WMode is always used if set. // - safe.W is used if larger than the current W and WMode is empty. // - safe.FSync is always used if true. // - safe.J is used if FSync is false. // - safe.WTimeout is used if set and smaller than the current WTimeout. // // For example, the following statement will ensure the session is // at least checking for errors, without enforcing further constraints. // If a more conservative SetSafe or EnsureSafe call was previously done, // the following call will be ignored. // // session.EnsureSafe(&mgo.Safe{}) // // See also the SetSafe method for details on what each option means. // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/getLastError+Command // http://www.mongodb.org/display/DOCS/Verifying+Propagation+of+Writes+with+getLastError // http://www.mongodb.org/display/DOCS/Data+Center+Awareness // func (s *Session) EnsureSafe(safe *Safe) { s.m.Lock() s.ensureSafe(safe) s.m.Unlock() } func (s *Session) ensureSafe(safe *Safe) { if safe == nil { return } var w interface{} if safe.WMode != "" { w = safe.WMode } else if safe.W > 0 { w = safe.W } var cmd getLastError if s.safeOp == nil { cmd = getLastError{1, w, safe.WTimeout, safe.FSync, safe.J} } else { // Copy. We don't want to mutate the existing query. cmd = *(s.safeOp.query.(*getLastError)) if cmd.W == nil { cmd.W = w } else if safe.WMode != "" { cmd.W = safe.WMode } else if i, ok := cmd.W.(int); ok && safe.W > i { cmd.W = safe.W } if safe.WTimeout > 0 && safe.WTimeout < cmd.WTimeout { cmd.WTimeout = safe.WTimeout } if safe.FSync { cmd.FSync = true cmd.J = false } else if safe.J && !cmd.FSync { cmd.J = true } } s.safeOp = &queryOp{ query: &cmd, collection: "admin.$cmd", limit: -1, } } // Run issues the provided command on the "admin" database and // and unmarshals its result in the respective argument. The cmd // argument may be either a string with the command name itself, in // which case an empty document of the form bson.M{cmd: 1} will be used, // or it may be a full command document. // // Note that MongoDB considers the first marshalled key as the command // name, so when providing a command with options, it's important to // use an ordering-preserving document, such as a struct value or an // instance of bson.D. For instance: // // db.Run(bson.D{{"create", "mycollection"}, {"size", 1024}}) // // For commands on arbitrary databases, see the Run method in // the Database type. // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/Commands // http://www.mongodb.org/display/DOCS/List+of+Database+CommandSkips // func (s *Session) Run(cmd interface{}, result interface{}) error { return s.DB("admin").Run(cmd, result) } // SelectServers restricts communication to servers configured with the // given tags. For example, the following statement restricts servers // used for reading operations to those with both tag "disk" set to // "ssd" and tag "rack" set to 1: // // session.SelectServers(bson.D{{"disk", "ssd"}, {"rack", 1}}) // // Multiple sets of tags may be provided, in which case the used server // must match all tags within any one set. // // If a connection was previously assigned to the session due to the // current session mode (see Session.SetMode), the tag selection will // only be enforced after the session is refreshed. // // Relevant documentation: // // http://docs.mongodb.org/manual/tutorial/configure-replica-set-tag-sets // func (s *Session) SelectServers(tags ...bson.D) { s.m.Lock() s.queryConfig.op.serverTags = tags s.m.Unlock() } // Ping runs a trivial ping command just to get in touch with the server. func (s *Session) Ping() error { return s.Run("ping", nil) } // Fsync flushes in-memory writes to disk on the server the session // is established with. If async is true, the call returns immediately, // otherwise it returns after the flush has been made. func (s *Session) Fsync(async bool) error { return s.Run(bson.D{{"fsync", 1}, {"async", async}}, nil) } // FsyncLock locks all writes in the specific server the session is // established with and returns. Any writes attempted to the server // after it is successfully locked will block until FsyncUnlock is // called for the same server. // // This method works on secondaries as well, preventing the oplog from // being flushed while the server is locked, but since only the server // connected to is locked, for locking specific secondaries it may be // necessary to establish a connection directly to the secondary (see // Dial's connect=direct option). // // As an important caveat, note that once a write is attempted and // blocks, follow up reads will block as well due to the way the // lock is internally implemented in the server. More details at: // // https://jira.mongodb.org/browse/SERVER-4243 // // FsyncLock is often used for performing consistent backups of // the database files on disk. // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/fsync+Command // http://www.mongodb.org/display/DOCS/Backups // func (s *Session) FsyncLock() error { return s.Run(bson.D{{"fsync", 1}, {"lock", true}}, nil) } // FsyncUnlock releases the server for writes. See FsyncLock for details. func (s *Session) FsyncUnlock() error { return s.DB("admin").C("$cmd.sys.unlock").Find(nil).One(nil) // WTF? } // Find prepares a query using the provided document. The document may be a // map or a struct value capable of being marshalled with bson. The map // may be a generic one using interface{} for its key and/or values, such as // bson.M, or it may be a properly typed map. Providing nil as the document // is equivalent to providing an empty document such as bson.M{}. // // Further details of the query may be tweaked using the resulting Query value, // and then executed to retrieve results using methods such as One, For, // Iter, or Tail. // // In case the resulting document includes a field named $err or errmsg, which // are standard ways for MongoDB to return query errors, the returned err will // be set to a *QueryError value including the Err message and the Code. In // those cases, the result argument is still unmarshalled into with the // received document so that any other custom values may be obtained if // desired. // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/Querying // http://www.mongodb.org/display/DOCS/Advanced+Queries // func (c *Collection) Find(query interface{}) *Query { session := c.Database.Session session.m.RLock() q := &Query{session: session, query: session.queryConfig} session.m.RUnlock() q.op.query = query q.op.collection = c.FullName return q } type repairCmd struct { RepairCursor string `bson:"repairCursor"` Cursor *repairCmdCursor ",omitempty" } type repairCmdCursor struct { BatchSize int `bson:"batchSize,omitempty"` } // Repair returns an iterator that goes over all recovered documents in the // collection, in a best-effort manner. This is most useful when there are // damaged data files. Multiple copies of the same document may be returned // by the iterator. // // Repair is supported in MongoDB 2.7.8 and later. func (c *Collection) Repair() *Iter { // Clone session and set it to Monotonic mode so that the server // used for the query may be safely obtained afterwards, if // necessary for iteration when a cursor is received. session := c.Database.Session cloned := session.Clone() cloned.SetMode(Monotonic, false) defer cloned.Close() batchSize := int(cloned.queryConfig.op.limit) var result struct { Cursor struct { FirstBatch []bson.Raw "firstBatch" Id int64 } } cmd := repairCmd{ RepairCursor: c.Name, Cursor: &repairCmdCursor{batchSize}, } clonedc := c.With(cloned) err := clonedc.Database.Run(cmd, &result) return clonedc.NewIter(session, result.Cursor.FirstBatch, result.Cursor.Id, err) } // FindId is a convenience helper equivalent to: // // query := collection.Find(bson.M{"_id": id}) // // See the Find method for more details. func (c *Collection) FindId(id interface{}) *Query { return c.Find(bson.D{{"_id", id}}) } type Pipe struct { session *Session collection *Collection pipeline interface{} allowDisk bool batchSize int } type pipeCmd struct { Aggregate string Pipeline interface{} Cursor *pipeCmdCursor ",omitempty" Explain bool ",omitempty" AllowDisk bool "allowDiskUse,omitempty" } type pipeCmdCursor struct { BatchSize int `bson:"batchSize,omitempty"` } // Pipe prepares a pipeline to aggregate. The pipeline document // must be a slice built in terms of the aggregation framework language. // // For example: // // pipe := collection.Pipe([]bson.M{{"$match": bson.M{"name": "Otavio"}}}) // iter := pipe.Iter() // // Relevant documentation: // // http://docs.mongodb.org/manual/reference/aggregation // http://docs.mongodb.org/manual/applications/aggregation // http://docs.mongodb.org/manual/tutorial/aggregation-examples // func (c *Collection) Pipe(pipeline interface{}) *Pipe { session := c.Database.Session session.m.RLock() batchSize := int(session.queryConfig.op.limit) session.m.RUnlock() return &Pipe{ session: session, collection: c, pipeline: pipeline, batchSize: batchSize, } } // Iter executes the pipeline and returns an iterator capable of going // over all the generated results. func (p *Pipe) Iter() *Iter { // Clone session and set it to Monotonic mode so that the server // used for the query may be safely obtained afterwards, if // necessary for iteration when a cursor is received. cloned := p.session.Clone() cloned.SetMode(Monotonic, false) defer cloned.Close() c := p.collection.With(cloned) var result struct { // 2.4, no cursors. Result []bson.Raw // 2.6+, with cursors. Cursor struct { FirstBatch []bson.Raw "firstBatch" Id int64 } } cmd := pipeCmd{ Aggregate: c.Name, Pipeline: p.pipeline, AllowDisk: p.allowDisk, Cursor: &pipeCmdCursor{p.batchSize}, } err := c.Database.Run(cmd, &result) if e, ok := err.(*QueryError); ok && e.Message == `unrecognized field "cursor` { cmd.Cursor = nil cmd.AllowDisk = false err = c.Database.Run(cmd, &result) } firstBatch := result.Result if firstBatch == nil { firstBatch = result.Cursor.FirstBatch } return c.NewIter(p.session, firstBatch, result.Cursor.Id, err) } // NewIter returns a newly created iterator with the provided parameters. // Using this method is not recommended unless the desired functionality // is not yet exposed via a more convenient interface (Find, Pipe, etc). // // The optional session parameter associates the lifetime of the returned // iterator to an arbitrary session. If nil, the iterator will be bound to // c's session. // // Documents in firstBatch will be individually provided by the returned // iterator before documents from cursorId are made available. If cursorId // is zero, only the documents in firstBatch are provided. // // If err is not nil, the iterator's Err method will report it after // exhausting documents in firstBatch. // // NewIter must be called right after the cursor id is obtained, and must not // be called on a collection in Eventual mode, because the cursor id is // associated with the specific server that returned it. The provided session // parameter may be in any mode or state, though. // func (c *Collection) NewIter(session *Session, firstBatch []bson.Raw, cursorId int64, err error) *Iter { var server *mongoServer csession := c.Database.Session csession.m.RLock() socket := csession.masterSocket if socket == nil { socket = csession.slaveSocket } if socket != nil { server = socket.Server() } csession.m.RUnlock() if server == nil { if csession.Mode() == Eventual { panic("Collection.NewIter called in Eventual mode") } if err == nil { err = errors.New("server not available") } } if session == nil { session = csession } iter := &Iter{ session: session, server: server, timeout: -1, err: err, } iter.gotReply.L = &iter.m for _, doc := range firstBatch { iter.docData.Push(doc.Data) } if cursorId != 0 { iter.op.cursorId = cursorId iter.op.collection = c.FullName iter.op.replyFunc = iter.replyFunc() } return iter } // All works like Iter.All. func (p *Pipe) All(result interface{}) error { return p.Iter().All(result) } // One executes the pipeline and unmarshals the first item from the // result set into the result parameter. // It returns ErrNotFound if no items are generated by the pipeline. func (p *Pipe) One(result interface{}) error { iter := p.Iter() if iter.Next(result) { return nil } if err := iter.Err(); err != nil { return err } return ErrNotFound } // Explain returns a number of details about how the MongoDB server would // execute the requested pipeline, such as the number of objects examined, // the number of times the read lock was yielded to allow writes to go in, // and so on. // // For example: // // var m bson.M // err := collection.Pipe(pipeline).Explain(&m) // if err == nil { // fmt.Printf("Explain: %#v\n", m) // } // func (p *Pipe) Explain(result interface{}) error { c := p.collection cmd := pipeCmd{ Aggregate: c.Name, Pipeline: p.pipeline, AllowDisk: p.allowDisk, Explain: true, } return c.Database.Run(cmd, result) } // AllowDiskUse enables writing to the "/_tmp" server directory so // that aggregation pipelines do not have to be held entirely in memory. func (p *Pipe) AllowDiskUse() *Pipe { p.allowDisk = true return p } // Batch sets the batch size used when fetching documents from the database. // It's possible to change this setting on a per-session basis as well, using // the Batch method of Session. // // The default batch size is defined by the database server. func (p *Pipe) Batch(n int) *Pipe { p.batchSize = n return p } // mgo.v3: Use a single user-visible error type. type LastError struct { Err string Code, N, Waited int FSyncFiles int `bson:"fsyncFiles"` WTimeout bool UpdatedExisting bool `bson:"updatedExisting"` UpsertedId interface{} `bson:"upserted"` modified int errors []error } func (err *LastError) Error() string { return err.Err } type queryError struct { Err string "$err" ErrMsg string Assertion string Code int AssertionCode int "assertionCode" LastError *LastError "lastErrorObject" } type QueryError struct { Code int Message string Assertion bool } func (err *QueryError) Error() string { return err.Message } // IsDup returns whether err informs of a duplicate key error because // a primary key index or a secondary unique index already has an entry // with the given value. func IsDup(err error) bool { // Besides being handy, helps with MongoDB bugs SERVER-7164 and SERVER-11493. // What follows makes me sad. Hopefully conventions will be more clear over time. switch e := err.(type) { case *LastError: return e.Code == 11000 || e.Code == 11001 || e.Code == 12582 || e.Code == 16460 && strings.Contains(e.Err, " E11000 ") case *QueryError: return e.Code == 11000 || e.Code == 11001 || e.Code == 12582 case *bulkError: for _, ee := range e.errs { if !IsDup(ee) { return false } } return true } return false } // Insert inserts one or more documents in the respective collection. In // case the session is in safe mode (see the SetSafe method) and an error // happens while inserting the provided documents, the returned error will // be of type *LastError. func (c *Collection) Insert(docs ...interface{}) error { _, err := c.writeOp(&insertOp{c.FullName, docs, 0}, true) return err } // Update finds a single document matching the provided selector document // and modifies it according to the update document. // If the session is in safe mode (see SetSafe) a ErrNotFound error is // returned if a document isn't found, or a value of type *LastError // when some other error is detected. // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/Updating // http://www.mongodb.org/display/DOCS/Atomic+Operations // func (c *Collection) Update(selector interface{}, update interface{}) error { if selector == nil { selector = bson.D{} } op := updateOp{ Collection: c.FullName, Selector: selector, Update: update, } lerr, err := c.writeOp(&op, true) if err == nil && lerr != nil && !lerr.UpdatedExisting { return ErrNotFound } return err } // UpdateId is a convenience helper equivalent to: // // err := collection.Update(bson.M{"_id": id}, update) // // See the Update method for more details. func (c *Collection) UpdateId(id interface{}, update interface{}) error { return c.Update(bson.D{{"_id", id}}, update) } // ChangeInfo holds details about the outcome of an update operation. type ChangeInfo struct { Updated int // Number of existing documents updated Removed int // Number of documents removed UpsertedId interface{} // Upserted _id field, when not explicitly provided } // UpdateAll finds all documents matching the provided selector document // and modifies them according to the update document. // If the session is in safe mode (see SetSafe) details of the executed // operation are returned in info or an error of type *LastError when // some problem is detected. It is not an error for the update to not be // applied on any documents because the selector doesn't match. // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/Updating // http://www.mongodb.org/display/DOCS/Atomic+Operations // func (c *Collection) UpdateAll(selector interface{}, update interface{}) (info *ChangeInfo, err error) { if selector == nil { selector = bson.D{} } op := updateOp{ Collection: c.FullName, Selector: selector, Update: update, Flags: 2, Multi: true, } lerr, err := c.writeOp(&op, true) if err == nil && lerr != nil { info = &ChangeInfo{Updated: lerr.N} } return info, err } // Upsert finds a single document matching the provided selector document // and modifies it according to the update document. If no document matching // the selector is found, the update document is applied to the selector // document and the result is inserted in the collection. // If the session is in safe mode (see SetSafe) details of the executed // operation are returned in info, or an error of type *LastError when // some problem is detected. // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/Updating // http://www.mongodb.org/display/DOCS/Atomic+Operations // func (c *Collection) Upsert(selector interface{}, update interface{}) (info *ChangeInfo, err error) { if selector == nil { selector = bson.D{} } op := updateOp{ Collection: c.FullName, Selector: selector, Update: update, Flags: 1, Upsert: true, } lerr, err := c.writeOp(&op, true) if err == nil && lerr != nil { info = &ChangeInfo{} if lerr.UpdatedExisting { info.Updated = lerr.N } else { info.UpsertedId = lerr.UpsertedId } } return info, err } // UpsertId is a convenience helper equivalent to: // // info, err := collection.Upsert(bson.M{"_id": id}, update) // // See the Upsert method for more details. func (c *Collection) UpsertId(id interface{}, update interface{}) (info *ChangeInfo, err error) { return c.Upsert(bson.D{{"_id", id}}, update) } // Remove finds a single document matching the provided selector document // and removes it from the database. // If the session is in safe mode (see SetSafe) a ErrNotFound error is // returned if a document isn't found, or a value of type *LastError // when some other error is detected. // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/Removing // func (c *Collection) Remove(selector interface{}) error { lerr, err := c.writeOp(&deleteOp{c.FullName, selector, 1}, true) if err == nil && lerr != nil && lerr.N == 0 { return ErrNotFound } return err } // RemoveId is a convenience helper equivalent to: // // err := collection.Remove(bson.M{"_id": id}) // // See the Remove method for more details. func (c *Collection) RemoveId(id interface{}) error { return c.Remove(bson.D{{"_id", id}}) } // RemoveAll finds all documents matching the provided selector document // and removes them from the database. In case the session is in safe mode // (see the SetSafe method) and an error happens when attempting the change, // the returned error will be of type *LastError. // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/Removing // func (c *Collection) RemoveAll(selector interface{}) (info *ChangeInfo, err error) { lerr, err := c.writeOp(&deleteOp{c.FullName, selector, 0}, true) if err == nil && lerr != nil { info = &ChangeInfo{Removed: lerr.N} } return info, err } // DropDatabase removes the entire database including all of its collections. func (db *Database) DropDatabase() error { return db.Run(bson.D{{"dropDatabase", 1}}, nil) } // DropCollection removes the entire collection including all of its documents. func (c *Collection) DropCollection() error { return c.Database.Run(bson.D{{"drop", c.Name}}, nil) } // The CollectionInfo type holds metadata about a collection. // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/createCollection+Command // http://www.mongodb.org/display/DOCS/Capped+Collections // type CollectionInfo struct { // DisableIdIndex prevents the automatic creation of the index // on the _id field for the collection. DisableIdIndex bool // ForceIdIndex enforces the automatic creation of the index // on the _id field for the collection. Capped collections, // for example, do not have such an index by default. ForceIdIndex bool // If Capped is true new documents will replace old ones when // the collection is full. MaxBytes must necessarily be set // to define the size when the collection wraps around. // MaxDocs optionally defines the number of documents when it // wraps, but MaxBytes still needs to be set. Capped bool MaxBytes int MaxDocs int } // Create explicitly creates the c collection with details of info. // MongoDB creates collections automatically on use, so this method // is only necessary when creating collection with non-default // characteristics, such as capped collections. // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/createCollection+Command // http://www.mongodb.org/display/DOCS/Capped+Collections // func (c *Collection) Create(info *CollectionInfo) error { cmd := make(bson.D, 0, 4) cmd = append(cmd, bson.DocElem{"create", c.Name}) if info.Capped { if info.MaxBytes < 1 { return fmt.Errorf("Collection.Create: with Capped, MaxBytes must also be set") } cmd = append(cmd, bson.DocElem{"capped", true}) cmd = append(cmd, bson.DocElem{"size", info.MaxBytes}) if info.MaxDocs > 0 { cmd = append(cmd, bson.DocElem{"max", info.MaxDocs}) } } if info.DisableIdIndex { cmd = append(cmd, bson.DocElem{"autoIndexId", false}) } if info.ForceIdIndex { cmd = append(cmd, bson.DocElem{"autoIndexId", true}) } return c.Database.Run(cmd, nil) } // Batch sets the batch size used when fetching documents from the database. // It's possible to change this setting on a per-session basis as well, using // the Batch method of Session. // // The default batch size is defined by the database itself. As of this // writing, MongoDB will use an initial size of min(100 docs, 4MB) on the // first batch, and 4MB on remaining ones. func (q *Query) Batch(n int) *Query { if n == 1 { // Server interprets 1 as -1 and closes the cursor (!?) n = 2 } q.m.Lock() q.op.limit = int32(n) q.m.Unlock() return q } // Prefetch sets the point at which the next batch of results will be requested. // When there are p*batch_size remaining documents cached in an Iter, the next // batch will be requested in background. For instance, when using this: // // query.Batch(200).Prefetch(0.25) // // and there are only 50 documents cached in the Iter to be processed, the // next batch of 200 will be requested. It's possible to change this setting on // a per-session basis as well, using the SetPrefetch method of Session. // // The default prefetch value is 0.25. func (q *Query) Prefetch(p float64) *Query { q.m.Lock() q.prefetch = p q.m.Unlock() return q } // Skip skips over the n initial documents from the query results. Note that // this only makes sense with capped collections where documents are naturally // ordered by insertion time, or with sorted results. func (q *Query) Skip(n int) *Query { q.m.Lock() q.op.skip = int32(n) q.m.Unlock() return q } // Limit restricts the maximum number of documents retrieved to n, and also // changes the batch size to the same value. Once n documents have been // returned by Next, the following call will return ErrNotFound. func (q *Query) Limit(n int) *Query { q.m.Lock() switch { case n == 1: q.limit = 1 q.op.limit = -1 case n == math.MinInt32: // -MinInt32 == -MinInt32 q.limit = math.MaxInt32 q.op.limit = math.MinInt32 + 1 case n < 0: q.limit = int32(-n) q.op.limit = int32(n) default: q.limit = int32(n) q.op.limit = int32(n) } q.m.Unlock() return q } // Select enables selecting which fields should be retrieved for the results // found. For example, the following query would only retrieve the name field: // // err := collection.Find(nil).Select(bson.M{"name": 1}).One(&result) // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/Retrieving+a+Subset+of+Fields // func (q *Query) Select(selector interface{}) *Query { q.m.Lock() q.op.selector = selector q.m.Unlock() return q } // Sort asks the database to order returned documents according to the // provided field names. A field name may be prefixed by - (minus) for // it to be sorted in reverse order. // // For example: // // query1 := collection.Find(nil).Sort("firstname", "lastname") // query2 := collection.Find(nil).Sort("-age") // query3 := collection.Find(nil).Sort("$natural") // query4 := collection.Find(nil).Select(bson.M{"score": bson.M{"$meta": "textScore"}}).Sort("$textScore:score") // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/Sorting+and+Natural+Order // func (q *Query) Sort(fields ...string) *Query { q.m.Lock() var order bson.D for _, field := range fields { n := 1 var kind string if field != "" { if field[0] == '$' { if c := strings.Index(field, ":"); c > 1 && c < len(field)-1 { kind = field[1:c] field = field[c+1:] } } switch field[0] { case '+': field = field[1:] case '-': n = -1 field = field[1:] } } if field == "" { panic("Sort: empty field name") } if kind == "textScore" { order = append(order, bson.DocElem{field, bson.M{"$meta": kind}}) } else { order = append(order, bson.DocElem{field, n}) } } q.op.options.OrderBy = order q.op.hasOptions = true q.m.Unlock() return q } // Explain returns a number of details about how the MongoDB server would // execute the requested query, such as the number of objects examined, // the number of times the read lock was yielded to allow writes to go in, // and so on. // // For example: // // m := bson.M{} // err := collection.Find(bson.M{"filename": name}).Explain(m) // if err == nil { // fmt.Printf("Explain: %#v\n", m) // } // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/Optimization // http://www.mongodb.org/display/DOCS/Query+Optimizer // func (q *Query) Explain(result interface{}) error { q.m.Lock() clone := &Query{session: q.session, query: q.query} q.m.Unlock() clone.op.options.Explain = true clone.op.hasOptions = true if clone.op.limit > 0 { clone.op.limit = -q.op.limit } iter := clone.Iter() if iter.Next(result) { return nil } return iter.Close() } // Hint will include an explicit "hint" in the query to force the server // to use a specified index, potentially improving performance in some // situations. The provided parameters are the fields that compose the // key of the index to be used. For details on how the indexKey may be // built, see the EnsureIndex method. // // For example: // // query := collection.Find(bson.M{"firstname": "Joe", "lastname": "Winter"}) // query.Hint("lastname", "firstname") // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/Optimization // http://www.mongodb.org/display/DOCS/Query+Optimizer // func (q *Query) Hint(indexKey ...string) *Query { q.m.Lock() keyInfo, err := parseIndexKey(indexKey) q.op.options.Hint = keyInfo.key q.op.hasOptions = true q.m.Unlock() if err != nil { panic(err) } return q } // SetMaxScan constrains the query to stop after scanning the specified // number of documents. // // This modifier is generally used to prevent potentially long running // queries from disrupting performance by scanning through too much data. func (q *Query) SetMaxScan(n int) *Query { q.m.Lock() q.op.options.MaxScan = n q.op.hasOptions = true q.m.Unlock() return q } // SetMaxTime constrains the query to stop after running for the specified time. // // When the time limit is reached MongoDB automatically cancels the query. // This can be used to efficiently prevent and identify unexpectedly slow queries. // // A few important notes about the mechanism enforcing this limit: // // - Requests can block behind locking operations on the server, and that blocking // time is not accounted for. In other words, the timer starts ticking only after // the actual start of the query when it initially acquires the appropriate lock; // // - Operations are interrupted only at interrupt points where an operation can be // safely aborted – the total execution time may exceed the specified value; // // - The limit can be applied to both CRUD operations and commands, but not all // commands are interruptible; // // - While iterating over results, computing follow up batches is included in the // total time and the iteration continues until the alloted time is over, but // network roundtrips are not taken into account for the limit. // // - This limit does not override the inactive cursor timeout for idle cursors // (default is 10 min). // // This mechanism was introduced in MongoDB 2.6. // // Relevant documentation: // // http://blog.mongodb.org/post/83621787773/maxtimems-and-query-optimizer-introspection-in // func (q *Query) SetMaxTime(d time.Duration) *Query { q.m.Lock() q.op.options.MaxTimeMS = int(d / time.Millisecond) q.op.hasOptions = true q.m.Unlock() return q } // Snapshot will force the performed query to make use of an available // index on the _id field to prevent the same document from being returned // more than once in a single iteration. This might happen without this // setting in situations when the document changes in size and thus has to // be moved while the iteration is running. // // Because snapshot mode traverses the _id index, it may not be used with // sorting or explicit hints. It also cannot use any other index for the // query. // // Even with snapshot mode, items inserted or deleted during the query may // or may not be returned; that is, this mode is not a true point-in-time // snapshot. // // The same effect of Snapshot may be obtained by using any unique index on // field(s) that will not be modified (best to use Hint explicitly too). // A non-unique index (such as creation time) may be made unique by // appending _id to the index when creating it. // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/How+to+do+Snapshotted+Queries+in+the+Mongo+Database // func (q *Query) Snapshot() *Query { q.m.Lock() q.op.options.Snapshot = true q.op.hasOptions = true q.m.Unlock() return q } // Comment adds a comment to the query to identify it in the database profiler output. // // Relevant documentation: // // http://docs.mongodb.org/manual/reference/operator/meta/comment // http://docs.mongodb.org/manual/reference/command/profile // http://docs.mongodb.org/manual/administration/analyzing-mongodb-performance/#database-profiling // func (q *Query) Comment(comment string) *Query { q.m.Lock() q.op.options.Comment = comment q.op.hasOptions = true q.m.Unlock() return q } // LogReplay enables an option that optimizes queries that are typically // made on the MongoDB oplog for replaying it. This is an internal // implementation aspect and most likely uninteresting for other uses. // It has seen at least one use case, though, so it's exposed via the API. func (q *Query) LogReplay() *Query { q.m.Lock() q.op.flags |= flagLogReplay q.m.Unlock() return q } func checkQueryError(fullname string, d []byte) error { l := len(d) if l < 16 { return nil } if d[5] == '$' && d[6] == 'e' && d[7] == 'r' && d[8] == 'r' && d[9] == '\x00' && d[4] == '\x02' { goto Error } if len(fullname) < 5 || fullname[len(fullname)-5:] != ".$cmd" { return nil } for i := 0; i+8 < l; i++ { if d[i] == '\x02' && d[i+1] == 'e' && d[i+2] == 'r' && d[i+3] == 'r' && d[i+4] == 'm' && d[i+5] == 's' && d[i+6] == 'g' && d[i+7] == '\x00' { goto Error } } return nil Error: result := &queryError{} bson.Unmarshal(d, result) if result.LastError != nil { return result.LastError } if result.Err == "" && result.ErrMsg == "" { return nil } if result.AssertionCode != 0 && result.Assertion != "" { return &QueryError{Code: result.AssertionCode, Message: result.Assertion, Assertion: true} } if result.Err != "" { return &QueryError{Code: result.Code, Message: result.Err} } return &QueryError{Code: result.Code, Message: result.ErrMsg} } // One executes the query and unmarshals the first obtained document into the // result argument. The result must be a struct or map value capable of being // unmarshalled into by gobson. This function blocks until either a result // is available or an error happens. For example: // // err := collection.Find(bson.M{"a", 1}).One(&result) // // In case the resulting document includes a field named $err or errmsg, which // are standard ways for MongoDB to return query errors, the returned err will // be set to a *QueryError value including the Err message and the Code. In // those cases, the result argument is still unmarshalled into with the // received document so that any other custom values may be obtained if // desired. // func (q *Query) One(result interface{}) (err error) { q.m.Lock() session := q.session op := q.op // Copy. q.m.Unlock() socket, err := session.acquireSocket(true) if err != nil { return err } defer socket.Release() session.prepareQuery(&op) op.limit = -1 data, err := socket.SimpleQuery(&op) if err != nil { return err } if data == nil { return ErrNotFound } if result != nil { err = bson.Unmarshal(data, result) if err == nil { debugf("Query %p document unmarshaled: %#v", q, result) } else { debugf("Query %p document unmarshaling failed: %#v", q, err) return err } } return checkQueryError(op.collection, data) } // run duplicates the behavior of collection.Find(query).One(&result) // as performed by Database.Run, specializing the logic for running // database commands on a given socket. func (db *Database) run(socket *mongoSocket, cmd, result interface{}) (err error) { // Database.Run: if name, ok := cmd.(string); ok { cmd = bson.D{{name, 1}} } // Collection.Find: session := db.Session session.m.RLock() op := session.queryConfig.op // Copy. session.m.RUnlock() op.query = cmd op.collection = db.Name + ".$cmd" // Query.One: session.prepareQuery(&op) op.limit = -1 data, err := socket.SimpleQuery(&op) if err != nil { return err } if data == nil { return ErrNotFound } if result != nil { err = bson.Unmarshal(data, result) if err == nil { var res bson.M bson.Unmarshal(data, &res) debugf("Run command unmarshaled: %#v, result: %#v", op, res) } else { debugf("Run command unmarshaling failed: %#v", op, err) return err } } return checkQueryError(op.collection, data) } // The DBRef type implements support for the database reference MongoDB // convention as supported by multiple drivers. This convention enables // cross-referencing documents between collections and databases using // a structure which includes a collection name, a document id, and // optionally a database name. // // See the FindRef methods on Session and on Database. // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/Database+References // type DBRef struct { Collection string `bson:"$ref"` Id interface{} `bson:"$id"` Database string `bson:"$db,omitempty"` } // NOTE: Order of fields for DBRef above does matter, per documentation. // FindRef returns a query that looks for the document in the provided // reference. If the reference includes the DB field, the document will // be retrieved from the respective database. // // See also the DBRef type and the FindRef method on Session. // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/Database+References // func (db *Database) FindRef(ref *DBRef) *Query { var c *Collection if ref.Database == "" { c = db.C(ref.Collection) } else { c = db.Session.DB(ref.Database).C(ref.Collection) } return c.FindId(ref.Id) } // FindRef returns a query that looks for the document in the provided // reference. For a DBRef to be resolved correctly at the session level // it must necessarily have the optional DB field defined. // // See also the DBRef type and the FindRef method on Database. // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/Database+References // func (s *Session) FindRef(ref *DBRef) *Query { if ref.Database == "" { panic(errors.New(fmt.Sprintf("Can't resolve database for %#v", ref))) } c := s.DB(ref.Database).C(ref.Collection) return c.FindId(ref.Id) } // CollectionNames returns the collection names present in the db database. func (db *Database) CollectionNames() (names []string, err error) { // Clone session and set it to Monotonic mode so that the server // used for the query may be safely obtained afterwards, if // necessary for iteration when a cursor is received. session := db.Session cloned := session.Clone() cloned.SetMode(Monotonic, false) defer cloned.Close() batchSize := int(cloned.queryConfig.op.limit) // Try with a command. var result struct { Collections []bson.Raw Cursor struct { FirstBatch []bson.Raw "firstBatch" NS string Id int64 } } err = db.With(cloned).Run(bson.D{{"listCollections", 1}, {"cursor", bson.D{{"batchSize", batchSize}}}}, &result) if err == nil { firstBatch := result.Collections if firstBatch == nil { firstBatch = result.Cursor.FirstBatch } var iter *Iter ns := strings.SplitN(result.Cursor.NS, ".", 2) if len(ns) < 2 { iter = db.With(cloned).C("").NewIter(nil, firstBatch, result.Cursor.Id, nil) } else { iter = cloned.DB(ns[0]).C(ns[1]).NewIter(nil, firstBatch, result.Cursor.Id, nil) } var coll struct{ Name string } for iter.Next(&coll) { names = append(names, coll.Name) } if err := iter.Close(); err != nil { return nil, err } sort.Strings(names) return names, err } if err != nil && !isNoCmd(err) { return nil, err } // Command not yet supported. Query the database instead. nameIndex := len(db.Name) + 1 iter := db.C("system.namespaces").Find(nil).Iter() var coll struct{ Name string } for iter.Next(&coll) { if strings.Index(coll.Name, "$") < 0 || strings.Index(coll.Name, ".oplog.$") >= 0 { names = append(names, coll.Name[nameIndex:]) } } if err := iter.Close(); err != nil { return nil, err } sort.Strings(names) return names, nil } type dbNames struct { Databases []struct { Name string Empty bool } } // DatabaseNames returns the names of non-empty databases present in the cluster. func (s *Session) DatabaseNames() (names []string, err error) { var result dbNames err = s.Run("listDatabases", &result) if err != nil { return nil, err } for _, db := range result.Databases { if !db.Empty { names = append(names, db.Name) } } sort.Strings(names) return names, nil } // Iter executes the query and returns an iterator capable of going over all // the results. Results will be returned in batches of configurable // size (see the Batch method) and more documents will be requested when a // configurable number of documents is iterated over (see the Prefetch method). func (q *Query) Iter() *Iter { q.m.Lock() session := q.session op := q.op prefetch := q.prefetch limit := q.limit q.m.Unlock() iter := &Iter{ session: session, prefetch: prefetch, limit: limit, timeout: -1, } iter.gotReply.L = &iter.m iter.op.collection = op.collection iter.op.limit = op.limit iter.op.replyFunc = iter.replyFunc() iter.docsToReceive++ session.prepareQuery(&op) op.replyFunc = iter.op.replyFunc socket, err := session.acquireSocket(true) if err != nil { iter.err = err } else { iter.server = socket.Server() err = socket.Query(&op) if err != nil { // Must lock as the query above may call replyFunc. iter.m.Lock() iter.err = err iter.m.Unlock() } socket.Release() } return iter } // Tail returns a tailable iterator. Unlike a normal iterator, a // tailable iterator may wait for new values to be inserted in the // collection once the end of the current result set is reached, // A tailable iterator may only be used with capped collections. // // The timeout parameter indicates how long Next will block waiting // for a result before timing out. If set to -1, Next will not // timeout, and will continue waiting for a result for as long as // the cursor is valid and the session is not closed. If set to 0, // Next times out as soon as it reaches the end of the result set. // Otherwise, Next will wait for at least the given number of // seconds for a new document to be available before timing out. // // On timeouts, Next will unblock and return false, and the Timeout // method will return true if called. In these cases, Next may still // be called again on the same iterator to check if a new value is // available at the current cursor position, and again it will block // according to the specified timeoutSecs. If the cursor becomes // invalid, though, both Next and Timeout will return false and // the query must be restarted. // // The following example demonstrates timeout handling and query // restarting: // // iter := collection.Find(nil).Sort("$natural").Tail(5 * time.Second) // for { // for iter.Next(&result) { // fmt.Println(result.Id) // lastId = result.Id // } // if iter.Err() != nil { // return iter.Close() // } // if iter.Timeout() { // continue // } // query := collection.Find(bson.M{"_id": bson.M{"$gt": lastId}}) // iter = query.Sort("$natural").Tail(5 * time.Second) // } // iter.Close() // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/Tailable+Cursors // http://www.mongodb.org/display/DOCS/Capped+Collections // http://www.mongodb.org/display/DOCS/Sorting+and+Natural+Order // func (q *Query) Tail(timeout time.Duration) *Iter { q.m.Lock() session := q.session op := q.op prefetch := q.prefetch q.m.Unlock() iter := &Iter{session: session, prefetch: prefetch} iter.gotReply.L = &iter.m iter.timeout = timeout iter.op.collection = op.collection iter.op.limit = op.limit iter.op.replyFunc = iter.replyFunc() iter.docsToReceive++ session.prepareQuery(&op) op.replyFunc = iter.op.replyFunc op.flags |= flagTailable | flagAwaitData socket, err := session.acquireSocket(true) if err != nil { iter.err = err } else { iter.server = socket.Server() err = socket.Query(&op) if err != nil { // Must lock as the query above may call replyFunc. iter.m.Lock() iter.err = err iter.m.Unlock() } socket.Release() } return iter } func (s *Session) prepareQuery(op *queryOp) { s.m.RLock() op.mode = s.consistency if s.slaveOk { op.flags |= flagSlaveOk } s.m.RUnlock() return } // Err returns nil if no errors happened during iteration, or the actual // error otherwise. // // In case a resulting document included a field named $err or errmsg, which are // standard ways for MongoDB to report an improper query, the returned value has // a *QueryError type, and includes the Err message and the Code. func (iter *Iter) Err() error { iter.m.Lock() err := iter.err iter.m.Unlock() if err == ErrNotFound { return nil } return err } // Close kills the server cursor used by the iterator, if any, and returns // nil if no errors happened during iteration, or the actual error otherwise. // // Server cursors are automatically closed at the end of an iteration, which // means close will do nothing unless the iteration was interrupted before // the server finished sending results to the driver. If Close is not called // in such a situation, the cursor will remain available at the server until // the default cursor timeout period is reached. No further problems arise. // // Close is idempotent. That means it can be called repeatedly and will // return the same result every time. // // In case a resulting document included a field named $err or errmsg, which are // standard ways for MongoDB to report an improper query, the returned value has // a *QueryError type. func (iter *Iter) Close() error { iter.m.Lock() cursorId := iter.op.cursorId iter.op.cursorId = 0 err := iter.err iter.m.Unlock() if cursorId == 0 { if err == ErrNotFound { return nil } return err } socket, err := iter.acquireSocket() if err == nil { // TODO Batch kills. err = socket.Query(&killCursorsOp{[]int64{cursorId}}) socket.Release() } iter.m.Lock() if err != nil && (iter.err == nil || iter.err == ErrNotFound) { iter.err = err } else if iter.err != ErrNotFound { err = iter.err } iter.m.Unlock() return err } // Timeout returns true if Next returned false due to a timeout of // a tailable cursor. In those cases, Next may be called again to continue // the iteration at the previous cursor position. func (iter *Iter) Timeout() bool { iter.m.Lock() result := iter.timedout iter.m.Unlock() return result } // Next retrieves the next document from the result set, blocking if necessary. // This method will also automatically retrieve another batch of documents from // the server when the current one is exhausted, or before that in background // if pre-fetching is enabled (see the Query.Prefetch and Session.SetPrefetch // methods). // // Next returns true if a document was successfully unmarshalled onto result, // and false at the end of the result set or if an error happened. // When Next returns false, the Err method should be called to verify if // there was an error during iteration. // // For example: // // iter := collection.Find(nil).Iter() // for iter.Next(&result) { // fmt.Printf("Result: %v\n", result.Id) // } // if err := iter.Close(); err != nil { // return err // } // func (iter *Iter) Next(result interface{}) bool { iter.m.Lock() iter.timedout = false timeout := time.Time{} for iter.err == nil && iter.docData.Len() == 0 && (iter.docsToReceive > 0 || iter.op.cursorId != 0) { if iter.docsToReceive == 0 { if iter.timeout >= 0 { if timeout.IsZero() { timeout = time.Now().Add(iter.timeout) } if time.Now().After(timeout) { iter.timedout = true iter.m.Unlock() return false } } iter.getMore() if iter.err != nil { break } } iter.gotReply.Wait() } // Exhaust available data before reporting any errors. if docData, ok := iter.docData.Pop().([]byte); ok { close := false if iter.limit > 0 { iter.limit-- if iter.limit == 0 { if iter.docData.Len() > 0 { iter.m.Unlock() panic(fmt.Errorf("data remains after limit exhausted: %d", iter.docData.Len())) } iter.err = ErrNotFound close = true } } if iter.op.cursorId != 0 && iter.err == nil { iter.docsBeforeMore-- if iter.docsBeforeMore == -1 { iter.getMore() } } iter.m.Unlock() if close { iter.Close() } err := bson.Unmarshal(docData, result) if err != nil { debugf("Iter %p document unmarshaling failed: %#v", iter, err) iter.m.Lock() if iter.err == nil { iter.err = err } iter.m.Unlock() return false } debugf("Iter %p document unmarshaled: %#v", iter, result) // XXX Only have to check first document for a query error? err = checkQueryError(iter.op.collection, docData) if err != nil { iter.m.Lock() if iter.err == nil { iter.err = err } iter.m.Unlock() return false } return true } else if iter.err != nil { debugf("Iter %p returning false: %s", iter, iter.err) iter.m.Unlock() return false } else if iter.op.cursorId == 0 { iter.err = ErrNotFound debugf("Iter %p exhausted with cursor=0", iter) iter.m.Unlock() return false } panic("unreachable") } // All retrieves all documents from the result set into the provided slice // and closes the iterator. // // The result argument must necessarily be the address for a slice. The slice // may be nil or previously allocated. // // WARNING: Obviously, All must not be used with result sets that may be // potentially large, since it may consume all memory until the system // crashes. Consider building the query with a Limit clause to ensure the // result size is bounded. // // For instance: // // var result []struct{ Value int } // iter := collection.Find(nil).Limit(100).Iter() // err := iter.All(&result) // if err != nil { // return err // } // func (iter *Iter) All(result interface{}) error { resultv := reflect.ValueOf(result) if resultv.Kind() != reflect.Ptr || resultv.Elem().Kind() != reflect.Slice { panic("result argument must be a slice address") } slicev := resultv.Elem() slicev = slicev.Slice(0, slicev.Cap()) elemt := slicev.Type().Elem() i := 0 for { if slicev.Len() == i { elemp := reflect.New(elemt) if !iter.Next(elemp.Interface()) { break } slicev = reflect.Append(slicev, elemp.Elem()) slicev = slicev.Slice(0, slicev.Cap()) } else { if !iter.Next(slicev.Index(i).Addr().Interface()) { break } } i++ } resultv.Elem().Set(slicev.Slice(0, i)) return iter.Close() } // All works like Iter.All. func (q *Query) All(result interface{}) error { return q.Iter().All(result) } // The For method is obsolete and will be removed in a future release. // See Iter as an elegant replacement. func (q *Query) For(result interface{}, f func() error) error { return q.Iter().For(result, f) } // The For method is obsolete and will be removed in a future release. // See Iter as an elegant replacement. func (iter *Iter) For(result interface{}, f func() error) (err error) { valid := false v := reflect.ValueOf(result) if v.Kind() == reflect.Ptr { v = v.Elem() switch v.Kind() { case reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice: valid = v.IsNil() } } if !valid { panic("For needs a pointer to nil reference value. See the documentation.") } zero := reflect.Zero(v.Type()) for { v.Set(zero) if !iter.Next(result) { break } err = f() if err != nil { return err } } return iter.Err() } // acquireSocket acquires a socket from the same server that the iterator // cursor was obtained from. // // WARNING: This method must not be called with iter.m locked. Acquiring the // socket depends on the cluster sync loop, and the cluster sync loop might // attempt actions which cause replyFunc to be called, inducing a deadlock. func (iter *Iter) acquireSocket() (*mongoSocket, error) { socket, err := iter.session.acquireSocket(true) if err != nil { return nil, err } if socket.Server() != iter.server { // Socket server changed during iteration. This may happen // with Eventual sessions, if a Refresh is done, or if a // monotonic session gets a write and shifts from secondary // to primary. Our cursor is in a specific server, though. iter.session.m.Lock() sockTimeout := iter.session.sockTimeout iter.session.m.Unlock() socket.Release() socket, _, err = iter.server.AcquireSocket(0, sockTimeout) if err != nil { return nil, err } err := iter.session.socketLogin(socket) if err != nil { socket.Release() return nil, err } } return socket, nil } func (iter *Iter) getMore() { // Increment now so that unlocking the iterator won't cause a // different goroutine to get here as well. iter.docsToReceive++ iter.m.Unlock() socket, err := iter.acquireSocket() iter.m.Lock() if err != nil { iter.err = err return } defer socket.Release() debugf("Iter %p requesting more documents", iter) if iter.limit > 0 { // The -1 below accounts for the fact docsToReceive was incremented above. limit := iter.limit - int32(iter.docsToReceive-1) - int32(iter.docData.Len()) if limit < iter.op.limit { iter.op.limit = limit } } if err := socket.Query(&iter.op); err != nil { iter.docsToReceive-- iter.err = err } } type countCmd struct { Count string Query interface{} Limit int32 ",omitempty" Skip int32 ",omitempty" } // Count returns the total number of documents in the result set. func (q *Query) Count() (n int, err error) { q.m.Lock() session := q.session op := q.op limit := q.limit q.m.Unlock() c := strings.Index(op.collection, ".") if c < 0 { return 0, errors.New("Bad collection name: " + op.collection) } dbname := op.collection[:c] cname := op.collection[c+1:] query := op.query if query == nil { query = bson.D{} } result := struct{ N int }{} err = session.DB(dbname).Run(countCmd{cname, query, limit, op.skip}, &result) return result.N, err } // Count returns the total number of documents in the collection. func (c *Collection) Count() (n int, err error) { return c.Find(nil).Count() } type distinctCmd struct { Collection string "distinct" Key string Query interface{} ",omitempty" } // Distinct unmarshals into result the list of distinct values for the given key. // // For example: // // var result []int // err := collection.Find(bson.M{"gender": "F"}).Distinct("age", &result) // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/Aggregation // func (q *Query) Distinct(key string, result interface{}) error { q.m.Lock() session := q.session op := q.op // Copy. q.m.Unlock() c := strings.Index(op.collection, ".") if c < 0 { return errors.New("Bad collection name: " + op.collection) } dbname := op.collection[:c] cname := op.collection[c+1:] var doc struct{ Values bson.Raw } err := session.DB(dbname).Run(distinctCmd{cname, key, op.query}, &doc) if err != nil { return err } return doc.Values.Unmarshal(result) } type mapReduceCmd struct { Collection string "mapreduce" Map string ",omitempty" Reduce string ",omitempty" Finalize string ",omitempty" Limit int32 ",omitempty" Out interface{} Query interface{} ",omitempty" Sort interface{} ",omitempty" Scope interface{} ",omitempty" Verbose bool ",omitempty" } type mapReduceResult struct { Results bson.Raw Result bson.Raw TimeMillis int64 "timeMillis" Counts struct{ Input, Emit, Output int } Ok bool Err string Timing *MapReduceTime } type MapReduce struct { Map string // Map Javascript function code (required) Reduce string // Reduce Javascript function code (required) Finalize string // Finalize Javascript function code (optional) Out interface{} // Output collection name or document. If nil, results are inlined into the result parameter. Scope interface{} // Optional global scope for Javascript functions Verbose bool } type MapReduceInfo struct { InputCount int // Number of documents mapped EmitCount int // Number of times reduce called emit OutputCount int // Number of documents in resulting collection Database string // Output database, if results are not inlined Collection string // Output collection, if results are not inlined Time int64 // Time to run the job, in nanoseconds VerboseTime *MapReduceTime // Only defined if Verbose was true } type MapReduceTime struct { Total int64 // Total time, in nanoseconds Map int64 "mapTime" // Time within map function, in nanoseconds EmitLoop int64 "emitLoop" // Time within the emit/map loop, in nanoseconds } // MapReduce executes a map/reduce job for documents covered by the query. // That kind of job is suitable for very flexible bulk aggregation of data // performed at the server side via Javascript functions. // // Results from the job may be returned as a result of the query itself // through the result parameter in case they'll certainly fit in memory // and in a single document. If there's the possibility that the amount // of data might be too large, results must be stored back in an alternative // collection or even a separate database, by setting the Out field of the // provided MapReduce job. In that case, provide nil as the result parameter. // // These are some of the ways to set Out: // // nil // Inline results into the result parameter. // // bson.M{"replace": "mycollection"} // The output will be inserted into a collection which replaces any // existing collection with the same name. // // bson.M{"merge": "mycollection"} // This option will merge new data into the old output collection. In // other words, if the same key exists in both the result set and the // old collection, the new key will overwrite the old one. // // bson.M{"reduce": "mycollection"} // If documents exist for a given key in the result set and in the old // collection, then a reduce operation (using the specified reduce // function) will be performed on the two values and the result will be // written to the output collection. If a finalize function was // provided, this will be run after the reduce as well. // // bson.M{...., "db": "mydb"} // Any of the above options can have the "db" key included for doing // the respective action in a separate database. // // The following is a trivial example which will count the number of // occurrences of a field named n on each document in a collection, and // will return results inline: // // job := &mgo.MapReduce{ // Map: "function() { emit(this.n, 1) }", // Reduce: "function(key, values) { return Array.sum(values) }", // } // var result []struct { Id int "_id"; Value int } // _, err := collection.Find(nil).MapReduce(job, &result) // if err != nil { // return err // } // for _, item := range result { // fmt.Println(item.Value) // } // // This function is compatible with MongoDB 1.7.4+. // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/MapReduce // func (q *Query) MapReduce(job *MapReduce, result interface{}) (info *MapReduceInfo, err error) { q.m.Lock() session := q.session op := q.op // Copy. limit := q.limit q.m.Unlock() c := strings.Index(op.collection, ".") if c < 0 { return nil, errors.New("Bad collection name: " + op.collection) } dbname := op.collection[:c] cname := op.collection[c+1:] cmd := mapReduceCmd{ Collection: cname, Map: job.Map, Reduce: job.Reduce, Finalize: job.Finalize, Out: fixMROut(job.Out), Scope: job.Scope, Verbose: job.Verbose, Query: op.query, Sort: op.options.OrderBy, Limit: limit, } if cmd.Out == nil { cmd.Out = bson.D{{"inline", 1}} } var doc mapReduceResult err = session.DB(dbname).Run(&cmd, &doc) if err != nil { return nil, err } if doc.Err != "" { return nil, errors.New(doc.Err) } info = &MapReduceInfo{ InputCount: doc.Counts.Input, EmitCount: doc.Counts.Emit, OutputCount: doc.Counts.Output, Time: doc.TimeMillis * 1e6, } if doc.Result.Kind == 0x02 { err = doc.Result.Unmarshal(&info.Collection) info.Database = dbname } else if doc.Result.Kind == 0x03 { var v struct{ Collection, Db string } err = doc.Result.Unmarshal(&v) info.Collection = v.Collection info.Database = v.Db } if doc.Timing != nil { info.VerboseTime = doc.Timing info.VerboseTime.Total *= 1e6 info.VerboseTime.Map *= 1e6 info.VerboseTime.EmitLoop *= 1e6 } if err != nil { return nil, err } if result != nil { return info, doc.Results.Unmarshal(result) } return info, nil } // The "out" option in the MapReduce command must be ordered. This was // found after the implementation was accepting maps for a long time, // so rather than breaking the API, we'll fix the order if necessary. // Details about the order requirement may be seen in MongoDB's code: // // http://goo.gl/L8jwJX // func fixMROut(out interface{}) interface{} { outv := reflect.ValueOf(out) if outv.Kind() != reflect.Map || outv.Type().Key() != reflect.TypeOf("") { return out } outs := make(bson.D, outv.Len()) outTypeIndex := -1 for i, k := range outv.MapKeys() { ks := k.String() outs[i].Name = ks outs[i].Value = outv.MapIndex(k).Interface() switch ks { case "normal", "replace", "merge", "reduce", "inline": outTypeIndex = i } } if outTypeIndex > 0 { outs[0], outs[outTypeIndex] = outs[outTypeIndex], outs[0] } return outs } // Change holds fields for running a findAndModify MongoDB command via // the Query.Apply method. type Change struct { Update interface{} // The update document Upsert bool // Whether to insert in case the document isn't found Remove bool // Whether to remove the document found rather than updating ReturnNew bool // Should the modified document be returned rather than the old one } type findModifyCmd struct { Collection string "findAndModify" Query, Update, Sort, Fields interface{} ",omitempty" Upsert, Remove, New bool ",omitempty" } type valueResult struct { Value bson.Raw LastError LastError "lastErrorObject" } // Apply runs the findAndModify MongoDB command, which allows updating, upserting // or removing a document matching a query and atomically returning either the old // version (the default) or the new version of the document (when ReturnNew is true). // If no objects are found Apply returns ErrNotFound. // // The Sort and Select query methods affect the result of Apply. In case // multiple documents match the query, Sort enables selecting which document to // act upon by ordering it first. Select enables retrieving only a selection // of fields of the new or old document. // // This simple example increments a counter and prints its new value: // // change := mgo.Change{ // Update: bson.M{"$inc": bson.M{"n": 1}}, // ReturnNew: true, // } // info, err = col.Find(M{"_id": id}).Apply(change, &doc) // fmt.Println(doc.N) // // This method depends on MongoDB >= 2.0 to work properly. // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/findAndModify+Command // http://www.mongodb.org/display/DOCS/Updating // http://www.mongodb.org/display/DOCS/Atomic+Operations // func (q *Query) Apply(change Change, result interface{}) (info *ChangeInfo, err error) { q.m.Lock() session := q.session op := q.op // Copy. q.m.Unlock() c := strings.Index(op.collection, ".") if c < 0 { return nil, errors.New("bad collection name: " + op.collection) } dbname := op.collection[:c] cname := op.collection[c+1:] cmd := findModifyCmd{ Collection: cname, Update: change.Update, Upsert: change.Upsert, Remove: change.Remove, New: change.ReturnNew, Query: op.query, Sort: op.options.OrderBy, Fields: op.selector, } session = session.Clone() defer session.Close() session.SetMode(Strong, false) var doc valueResult err = session.DB(dbname).Run(&cmd, &doc) if err != nil { if qerr, ok := err.(*QueryError); ok && qerr.Message == "No matching object found" { return nil, ErrNotFound } return nil, err } if doc.LastError.N == 0 { return nil, ErrNotFound } if doc.Value.Kind != 0x0A && result != nil { err = doc.Value.Unmarshal(result) if err != nil { return nil, err } } info = &ChangeInfo{} lerr := &doc.LastError if lerr.UpdatedExisting { info.Updated = lerr.N } else if change.Remove { info.Removed = lerr.N } else if change.Upsert { info.UpsertedId = lerr.UpsertedId } return info, nil } // The BuildInfo type encapsulates details about the running MongoDB server. // // Note that the VersionArray field was introduced in MongoDB 2.0+, but it is // internally assembled from the Version information for previous versions. // In both cases, VersionArray is guaranteed to have at least 4 entries. type BuildInfo struct { Version string VersionArray []int `bson:"versionArray"` // On MongoDB 2.0+; assembled from Version otherwise GitVersion string `bson:"gitVersion"` OpenSSLVersion string `bson:"OpenSSLVersion"` SysInfo string `bson:"sysInfo"` Bits int Debug bool MaxObjectSize int `bson:"maxBsonObjectSize"` } // VersionAtLeast returns whether the BuildInfo version is greater than or // equal to the provided version number. If more than one number is // provided, numbers will be considered as major, minor, and so on. func (bi *BuildInfo) VersionAtLeast(version ...int) bool { for i := range version { if i == len(bi.VersionArray) { return false } if bi.VersionArray[i] < version[i] { return false } } return true } // BuildInfo retrieves the version and other details about the // running MongoDB server. func (s *Session) BuildInfo() (info BuildInfo, err error) { err = s.Run(bson.D{{"buildInfo", "1"}}, &info) if len(info.VersionArray) == 0 { for _, a := range strings.Split(info.Version, ".") { i, err := strconv.Atoi(a) if err != nil { break } info.VersionArray = append(info.VersionArray, i) } } for len(info.VersionArray) < 4 { info.VersionArray = append(info.VersionArray, 0) } if i := strings.IndexByte(info.GitVersion, ' '); i >= 0 { // Strip off the " modules: enterprise" suffix. This is a _git version_. // That information may be moved to another field if people need it. info.GitVersion = info.GitVersion[:i] } return } // --------------------------------------------------------------------------- // Internal session handling helpers. func (s *Session) acquireSocket(slaveOk bool) (*mongoSocket, error) { // Read-only lock to check for previously reserved socket. s.m.RLock() // If there is a slave socket reserved and its use is acceptable, take it as long // as there isn't a master socket which would be preferred by the read preference mode. if s.slaveSocket != nil && s.slaveOk && slaveOk && (s.masterSocket == nil || s.consistency != PrimaryPreferred && s.consistency != Monotonic) { socket := s.slaveSocket socket.Acquire() s.m.RUnlock() return socket, nil } if s.masterSocket != nil { socket := s.masterSocket socket.Acquire() s.m.RUnlock() return socket, nil } s.m.RUnlock() // No go. We may have to request a new socket and change the session, // so try again but with an exclusive lock now. s.m.Lock() defer s.m.Unlock() if s.slaveSocket != nil && s.slaveOk && slaveOk && (s.masterSocket == nil || s.consistency != PrimaryPreferred && s.consistency != Monotonic) { s.slaveSocket.Acquire() return s.slaveSocket, nil } if s.masterSocket != nil { s.masterSocket.Acquire() return s.masterSocket, nil } // Still not good. We need a new socket. sock, err := s.cluster().AcquireSocket(s.consistency, slaveOk && s.slaveOk, s.syncTimeout, s.sockTimeout, s.queryConfig.op.serverTags, s.poolLimit) if err != nil { return nil, err } // Authenticate the new socket. if err = s.socketLogin(sock); err != nil { sock.Release() return nil, err } // Keep track of the new socket, if necessary. // Note that, as a special case, if the Eventual session was // not refreshed (s.slaveSocket != nil), it means the developer // asked to preserve an existing reserved socket, so we'll // keep a master one around too before a Refresh happens. if s.consistency != Eventual || s.slaveSocket != nil { s.setSocket(sock) } // Switch over a Monotonic session to the master. if !slaveOk && s.consistency == Monotonic { s.slaveOk = false } return sock, nil } // setSocket binds socket to this section. func (s *Session) setSocket(socket *mongoSocket) { info := socket.Acquire() if info.Master { if s.masterSocket != nil { panic("setSocket(master) with existing master socket reserved") } s.masterSocket = socket } else { if s.slaveSocket != nil { panic("setSocket(slave) with existing slave socket reserved") } s.slaveSocket = socket } } // unsetSocket releases any slave and/or master sockets reserved. func (s *Session) unsetSocket() { if s.masterSocket != nil { s.masterSocket.Release() } if s.slaveSocket != nil { s.slaveSocket.Release() } s.masterSocket = nil s.slaveSocket = nil } func (iter *Iter) replyFunc() replyFunc { return func(err error, op *replyOp, docNum int, docData []byte) { iter.m.Lock() iter.docsToReceive-- if err != nil { iter.err = err debugf("Iter %p received an error: %s", iter, err.Error()) } else if docNum == -1 { debugf("Iter %p received no documents (cursor=%d).", iter, op.cursorId) if op != nil && op.cursorId != 0 { // It's a tailable cursor. iter.op.cursorId = op.cursorId } else if op != nil && op.cursorId == 0 && op.flags&1 == 1 { // Cursor likely timed out. iter.err = ErrCursor } else { iter.err = ErrNotFound } } else { rdocs := int(op.replyDocs) if docNum == 0 { iter.docsToReceive += rdocs - 1 docsToProcess := iter.docData.Len() + rdocs if iter.limit == 0 || int32(docsToProcess) < iter.limit { iter.docsBeforeMore = docsToProcess - int(iter.prefetch*float64(rdocs)) } else { iter.docsBeforeMore = -1 } iter.op.cursorId = op.cursorId } // XXX Handle errors and flags. debugf("Iter %p received reply document %d/%d (cursor=%d)", iter, docNum+1, rdocs, op.cursorId) iter.docData.Push(docData) } iter.gotReply.Broadcast() iter.m.Unlock() } } type writeCmdResult struct { Ok bool N int NModified int `bson:"nModified"` Upserted []struct { Index int Id interface{} `_id` } ConcernError writeConcernError `bson:"writeConcernError"` Errors []writeCmdError `bson:"writeErrors"` } type writeConcernError struct { Code int ErrMsg string } type writeCmdError struct { Index int Code int ErrMsg string } func (r *writeCmdResult) QueryErrors() []error { var errs []error for _, err := range r.Errors { errs = append(errs, &QueryError{Code: err.Code, Message: err.ErrMsg}) } return errs } // writeOp runs the given modifying operation, potentially followed up // by a getLastError command in case the session is in safe mode. The // LastError result is made available in lerr, and if lerr.Err is set it // will also be returned as err. func (c *Collection) writeOp(op interface{}, ordered bool) (lerr *LastError, err error) { s := c.Database.Session socket, err := s.acquireSocket(c.Database.Name == "local") if err != nil { return nil, err } defer socket.Release() s.m.RLock() safeOp := s.safeOp s.m.RUnlock() if socket.ServerInfo().MaxWireVersion >= 2 { // Servers with a more recent write protocol benefit from write commands. if op, ok := op.(*insertOp); ok && len(op.documents) > 1000 { var errors []error // Maximum batch size is 1000. Must split out in separate operations for compatibility. all := op.documents for i := 0; i < len(all); i += 1000 { l := i + 1000 if l > len(all) { l = len(all) } op.documents = all[i:l] lerr, err := c.writeOpCommand(socket, safeOp, op, ordered) if err != nil { errors = append(errors, lerr.errors...) if op.flags&1 == 0 { return &LastError{errors: errors}, err } } } if len(errors) == 0 { return nil, nil } return &LastError{errors: errors}, errors[0] } return c.writeOpCommand(socket, safeOp, op, ordered) } else if updateOps, ok := op.(bulkUpdateOp); ok { var errors []error for _, updateOp := range updateOps { lerr, err := c.writeOpQuery(socket, safeOp, updateOp, ordered) if err != nil { errors = append(errors, lerr.errors...) if ordered { return &LastError{errors: errors}, err } } } if len(errors) == 0 { return nil, nil } return &LastError{errors: errors}, errors[0] } return c.writeOpQuery(socket, safeOp, op, ordered) } func (c *Collection) writeOpQuery(socket *mongoSocket, safeOp *queryOp, op interface{}, ordered bool) (lerr *LastError, err error) { if safeOp == nil { return nil, socket.Query(op) } var mutex sync.Mutex var replyData []byte var replyErr error mutex.Lock() query := *safeOp // Copy the data. query.collection = c.Database.Name + ".$cmd" query.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { replyData = docData replyErr = err mutex.Unlock() } err = socket.Query(op, &query) if err != nil { return nil, err } mutex.Lock() // Wait. if replyErr != nil { return nil, replyErr // XXX TESTME } if hasErrMsg(replyData) { // Looks like getLastError itself failed. err = checkQueryError(query.collection, replyData) if err != nil { return nil, err } } result := &LastError{} bson.Unmarshal(replyData, &result) debugf("Result from writing query: %#v", result) if result.Err != "" { return result, result } return result, nil } func (c *Collection) writeOpCommand(socket *mongoSocket, safeOp *queryOp, op interface{}, ordered bool) (lerr *LastError, err error) { var writeConcern interface{} if safeOp == nil { writeConcern = bson.D{{"w", 0}} } else { writeConcern = safeOp.query.(*getLastError) } var cmd bson.D switch op := op.(type) { case *insertOp: // http://docs.mongodb.org/manual/reference/command/insert cmd = bson.D{ {"insert", c.Name}, {"documents", op.documents}, {"writeConcern", writeConcern}, {"ordered", op.flags&1 == 0}, } case *updateOp: // http://docs.mongodb.org/manual/reference/command/update cmd = bson.D{ {"update", c.Name}, {"updates", []interface{}{op}}, {"writeConcern", writeConcern}, {"ordered", ordered}, } case bulkUpdateOp: // http://docs.mongodb.org/manual/reference/command/update cmd = bson.D{ {"update", c.Name}, {"updates", op}, {"writeConcern", writeConcern}, {"ordered", ordered}, } case *deleteOp: // http://docs.mongodb.org/manual/reference/command/delete selector := op.selector if selector == nil { selector = bson.D{} } cmd = bson.D{ {"delete", c.Name}, {"deletes", []bson.D{{{"q", selector}, {"limit", op.flags & 1}}}}, {"writeConcern", writeConcern}, //{"ordered", }, } } var result writeCmdResult err = c.Database.run(socket, cmd, &result) debugf("Write command result: %#v (err=%v)", result, err) lerr = &LastError{ UpdatedExisting: result.N > 0 && len(result.Upserted) == 0, N: result.N, modified: result.NModified, errors: result.QueryErrors(), } if len(result.Upserted) > 0 { lerr.UpsertedId = result.Upserted[0].Id } if len(result.Errors) > 0 { e := result.Errors[0] lerr.Code = e.Code lerr.Err = e.ErrMsg err = lerr } else if result.ConcernError.Code != 0 { e := result.ConcernError lerr.Code = e.Code lerr.Err = e.ErrMsg err = lerr } if err == nil && safeOp == nil { return nil, nil } return lerr, err } func hasErrMsg(d []byte) bool { l := len(d) for i := 0; i+8 < l; i++ { if d[i] == '\x02' && d[i+1] == 'e' && d[i+2] == 'r' && d[i+3] == 'r' && d[i+4] == 'm' && d[i+5] == 's' && d[i+6] == 'g' && d[i+7] == '\x00' { return true } } return false } charm-2.1.1/src/gopkg.in/mgo.v2/README.md0000664000175000017500000000021012672604565016473 0ustar marcomarcoThe MongoDB driver for Go ------------------------- Please go to [http://labix.org/mgo](http://labix.org/mgo) for all project details. charm-2.1.1/src/gopkg.in/mgo.v2/cluster_test.go0000664000175000017500000014142012672604565020274 0ustar marcomarco// mgo - MongoDB driver for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo_test import ( "fmt" "io" "net" "strings" "sync" "time" . "gopkg.in/check.v1" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" ) func (s *S) TestNewSession(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() // Do a dummy operation to wait for connection. coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"_id": 1}) c.Assert(err, IsNil) // Tweak safety and query settings to ensure other has copied those. session.SetSafe(nil) session.SetBatch(-1) other := session.New() defer other.Close() session.SetSafe(&mgo.Safe{}) // Clone was copied while session was unsafe, so no errors. otherColl := other.DB("mydb").C("mycoll") err = otherColl.Insert(M{"_id": 1}) c.Assert(err, IsNil) // Original session was made safe again. err = coll.Insert(M{"_id": 1}) c.Assert(err, NotNil) // With New(), each session has its own socket now. stats := mgo.GetStats() c.Assert(stats.MasterConns, Equals, 2) c.Assert(stats.SocketsInUse, Equals, 2) // Ensure query parameters were cloned. err = otherColl.Insert(M{"_id": 2}) c.Assert(err, IsNil) // Ping the database to ensure the nonce has been received already. c.Assert(other.Ping(), IsNil) mgo.ResetStats() iter := otherColl.Find(M{}).Iter() c.Assert(err, IsNil) m := M{} ok := iter.Next(m) c.Assert(ok, Equals, true) err = iter.Close() c.Assert(err, IsNil) // If Batch(-1) is in effect, a single document must have been received. stats = mgo.GetStats() c.Assert(stats.ReceivedDocs, Equals, 1) } func (s *S) TestCloneSession(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() // Do a dummy operation to wait for connection. coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"_id": 1}) c.Assert(err, IsNil) // Tweak safety and query settings to ensure clone is copying those. session.SetSafe(nil) session.SetBatch(-1) clone := session.Clone() defer clone.Close() session.SetSafe(&mgo.Safe{}) // Clone was copied while session was unsafe, so no errors. cloneColl := clone.DB("mydb").C("mycoll") err = cloneColl.Insert(M{"_id": 1}) c.Assert(err, IsNil) // Original session was made safe again. err = coll.Insert(M{"_id": 1}) c.Assert(err, NotNil) // With Clone(), same socket is shared between sessions now. stats := mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 1) c.Assert(stats.SocketRefs, Equals, 2) // Refreshing one of them should let the original socket go, // while preserving the safety settings. clone.Refresh() err = cloneColl.Insert(M{"_id": 1}) c.Assert(err, IsNil) // Must have used another connection now. stats = mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 2) c.Assert(stats.SocketRefs, Equals, 2) // Ensure query parameters were cloned. err = cloneColl.Insert(M{"_id": 2}) c.Assert(err, IsNil) // Ping the database to ensure the nonce has been received already. c.Assert(clone.Ping(), IsNil) mgo.ResetStats() iter := cloneColl.Find(M{}).Iter() c.Assert(err, IsNil) m := M{} ok := iter.Next(m) c.Assert(ok, Equals, true) err = iter.Close() c.Assert(err, IsNil) // If Batch(-1) is in effect, a single document must have been received. stats = mgo.GetStats() c.Assert(stats.ReceivedDocs, Equals, 1) } func (s *S) TestModeStrong(c *C) { session, err := mgo.Dial("localhost:40012") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, false) session.SetMode(mgo.Strong, false) c.Assert(session.Mode(), Equals, mgo.Strong) result := M{} cmd := session.DB("admin").C("$cmd") err = cmd.Find(M{"ismaster": 1}).One(&result) c.Assert(err, IsNil) c.Assert(result["ismaster"], Equals, true) coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) // Wait since the sync also uses sockets. for len(session.LiveServers()) != 3 { c.Log("Waiting for cluster sync to finish...") time.Sleep(5e8) } stats := mgo.GetStats() c.Assert(stats.MasterConns, Equals, 1) c.Assert(stats.SlaveConns, Equals, 2) c.Assert(stats.SocketsInUse, Equals, 1) session.SetMode(mgo.Strong, true) stats = mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 0) } func (s *S) TestModeMonotonic(c *C) { // Must necessarily connect to a slave, otherwise the // master connection will be available first. session, err := mgo.Dial("localhost:40012") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, false) c.Assert(session.Mode(), Equals, mgo.Monotonic) var result struct{ IsMaster bool } cmd := session.DB("admin").C("$cmd") err = cmd.Find(M{"ismaster": 1}).One(&result) c.Assert(err, IsNil) c.Assert(result.IsMaster, Equals, false) coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) err = cmd.Find(M{"ismaster": 1}).One(&result) c.Assert(err, IsNil) c.Assert(result.IsMaster, Equals, true) // Wait since the sync also uses sockets. for len(session.LiveServers()) != 3 { c.Log("Waiting for cluster sync to finish...") time.Sleep(5e8) } stats := mgo.GetStats() c.Assert(stats.MasterConns, Equals, 1) c.Assert(stats.SlaveConns, Equals, 2) c.Assert(stats.SocketsInUse, Equals, 2) session.SetMode(mgo.Monotonic, true) stats = mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 0) } func (s *S) TestModeMonotonicAfterStrong(c *C) { // Test that a strong session shifting to a monotonic // one preserves the socket untouched. session, err := mgo.Dial("localhost:40012") c.Assert(err, IsNil) defer session.Close() // Insert something to force a connection to the master. coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) session.SetMode(mgo.Monotonic, false) // Wait since the sync also uses sockets. for len(session.LiveServers()) != 3 { c.Log("Waiting for cluster sync to finish...") time.Sleep(5e8) } // Master socket should still be reserved. stats := mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 1) // Confirm it's the master even though it's Monotonic by now. result := M{} cmd := session.DB("admin").C("$cmd") err = cmd.Find(M{"ismaster": 1}).One(&result) c.Assert(err, IsNil) c.Assert(result["ismaster"], Equals, true) } func (s *S) TestModeStrongAfterMonotonic(c *C) { // Test that shifting from Monotonic to Strong while // using a slave socket will keep the socket reserved // until the master socket is necessary, so that no // switch over occurs unless it's actually necessary. // Must necessarily connect to a slave, otherwise the // master connection will be available first. session, err := mgo.Dial("localhost:40012") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, false) // Ensure we're talking to a slave, and reserve the socket. result := M{} err = session.Run("ismaster", &result) c.Assert(err, IsNil) c.Assert(result["ismaster"], Equals, false) // Switch to a Strong session. session.SetMode(mgo.Strong, false) // Wait since the sync also uses sockets. for len(session.LiveServers()) != 3 { c.Log("Waiting for cluster sync to finish...") time.Sleep(5e8) } // Slave socket should still be reserved. stats := mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 1) // But any operation will switch it to the master. result = M{} err = session.Run("ismaster", &result) c.Assert(err, IsNil) c.Assert(result["ismaster"], Equals, true) } func (s *S) TestModeMonotonicWriteOnIteration(c *C) { // Must necessarily connect to a slave, otherwise the // master connection will be available first. session, err := mgo.Dial("localhost:40012") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, false) c.Assert(session.Mode(), Equals, mgo.Monotonic) coll1 := session.DB("mydb").C("mycoll1") coll2 := session.DB("mydb").C("mycoll2") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { err := coll1.Insert(M{"n": n}) c.Assert(err, IsNil) } // Release master so we can grab a slave again. session.Refresh() // Wait until synchronization is done. for { n, err := coll1.Count() c.Assert(err, IsNil) if n == len(ns) { break } } iter := coll1.Find(nil).Batch(2).Iter() i := 0 m := M{} for iter.Next(&m) { i++ if i > 3 { err := coll2.Insert(M{"n": 47 + i}) c.Assert(err, IsNil) } } c.Assert(i, Equals, len(ns)) } func (s *S) TestModeEventual(c *C) { // Must necessarily connect to a slave, otherwise the // master connection will be available first. session, err := mgo.Dial("localhost:40012") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Eventual, false) c.Assert(session.Mode(), Equals, mgo.Eventual) result := M{} err = session.Run("ismaster", &result) c.Assert(err, IsNil) c.Assert(result["ismaster"], Equals, false) coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) result = M{} err = session.Run("ismaster", &result) c.Assert(err, IsNil) c.Assert(result["ismaster"], Equals, false) // Wait since the sync also uses sockets. for len(session.LiveServers()) != 3 { c.Log("Waiting for cluster sync to finish...") time.Sleep(5e8) } stats := mgo.GetStats() c.Assert(stats.MasterConns, Equals, 1) c.Assert(stats.SlaveConns, Equals, 2) c.Assert(stats.SocketsInUse, Equals, 0) } func (s *S) TestModeEventualAfterStrong(c *C) { // Test that a strong session shifting to an eventual // one preserves the socket untouched. session, err := mgo.Dial("localhost:40012") c.Assert(err, IsNil) defer session.Close() // Insert something to force a connection to the master. coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) session.SetMode(mgo.Eventual, false) // Wait since the sync also uses sockets. for len(session.LiveServers()) != 3 { c.Log("Waiting for cluster sync to finish...") time.Sleep(5e8) } // Master socket should still be reserved. stats := mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 1) // Confirm it's the master even though it's Eventual by now. result := M{} cmd := session.DB("admin").C("$cmd") err = cmd.Find(M{"ismaster": 1}).One(&result) c.Assert(err, IsNil) c.Assert(result["ismaster"], Equals, true) session.SetMode(mgo.Eventual, true) stats = mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 0) } func (s *S) TestModeStrongFallover(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40021") c.Assert(err, IsNil) defer session.Close() // With strong consistency, this will open a socket to the master. result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) // Kill the master. host := result.Host s.Stop(host) // This must fail, since the connection was broken. err = session.Run("serverStatus", result) c.Assert(err, Equals, io.EOF) // With strong consistency, it fails again until reset. err = session.Run("serverStatus", result) c.Assert(err, Equals, io.EOF) session.Refresh() // Now we should be able to talk to the new master. // Increase the timeout since this may take quite a while. session.SetSyncTimeout(3 * time.Minute) err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(result.Host, Not(Equals), host) // Insert some data to confirm it's indeed a master. err = session.DB("mydb").C("mycoll").Insert(M{"n": 42}) c.Assert(err, IsNil) } func (s *S) TestModePrimaryHiccup(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40021") c.Assert(err, IsNil) defer session.Close() // With strong consistency, this will open a socket to the master. result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) // Establish a few extra sessions to create spare sockets to // the master. This increases a bit the chances of getting an // incorrect cached socket. var sessions []*mgo.Session for i := 0; i < 20; i++ { sessions = append(sessions, session.Copy()) err = sessions[len(sessions)-1].Run("serverStatus", result) c.Assert(err, IsNil) } for i := range sessions { sessions[i].Close() } // Kill the master, but bring it back immediatelly. host := result.Host s.Stop(host) s.StartAll() // This must fail, since the connection was broken. err = session.Run("serverStatus", result) c.Assert(err, Equals, io.EOF) // With strong consistency, it fails again until reset. err = session.Run("serverStatus", result) c.Assert(err, Equals, io.EOF) session.Refresh() // Now we should be able to talk to the new master. // Increase the timeout since this may take quite a while. session.SetSyncTimeout(3 * time.Minute) // Insert some data to confirm it's indeed a master. err = session.DB("mydb").C("mycoll").Insert(M{"n": 42}) c.Assert(err, IsNil) } func (s *S) TestModeMonotonicFallover(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40021") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, true) // Insert something to force a switch to the master. coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) // Wait a bit for this to be synchronized to slaves. time.Sleep(3 * time.Second) result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) // Kill the master. host := result.Host s.Stop(host) // This must fail, since the connection was broken. err = session.Run("serverStatus", result) c.Assert(err, Equals, io.EOF) // With monotonic consistency, it fails again until reset. err = session.Run("serverStatus", result) c.Assert(err, Equals, io.EOF) session.Refresh() // Now we should be able to talk to the new master. err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(result.Host, Not(Equals), host) } func (s *S) TestModeMonotonicWithSlaveFallover(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40021") c.Assert(err, IsNil) defer session.Close() ssresult := &struct{ Host string }{} imresult := &struct{ IsMaster bool }{} // Figure the master while still using the strong session. err = session.Run("serverStatus", ssresult) c.Assert(err, IsNil) err = session.Run("isMaster", imresult) c.Assert(err, IsNil) master := ssresult.Host c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) // Create new monotonic session with an explicit address to ensure // a slave is synchronized before the master, otherwise a connection // with the master may be used below for lack of other options. var addr string switch { case strings.HasSuffix(ssresult.Host, ":40021"): addr = "localhost:40022" case strings.HasSuffix(ssresult.Host, ":40022"): addr = "localhost:40021" case strings.HasSuffix(ssresult.Host, ":40023"): addr = "localhost:40021" default: c.Fatal("Unknown host: ", ssresult.Host) } session, err = mgo.Dial(addr) c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, true) // Check the address of the socket associated with the monotonic session. c.Log("Running serverStatus and isMaster with monotonic session") err = session.Run("serverStatus", ssresult) c.Assert(err, IsNil) err = session.Run("isMaster", imresult) c.Assert(err, IsNil) slave := ssresult.Host c.Assert(imresult.IsMaster, Equals, false, Commentf("%s is not a slave", slave)) c.Assert(master, Not(Equals), slave) // Kill the master. s.Stop(master) // Session must still be good, since we were talking to a slave. err = session.Run("serverStatus", ssresult) c.Assert(err, IsNil) c.Assert(ssresult.Host, Equals, slave, Commentf("Monotonic session moved from %s to %s", slave, ssresult.Host)) // If we try to insert something, it'll have to hold until the new // master is available to move the connection, and work correctly. coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) // Must now be talking to the new master. err = session.Run("serverStatus", ssresult) c.Assert(err, IsNil) err = session.Run("isMaster", imresult) c.Assert(err, IsNil) c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) // ... which is not the old one, since it's still dead. c.Assert(ssresult.Host, Not(Equals), master) } func (s *S) TestModeEventualFallover(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40021") c.Assert(err, IsNil) defer session.Close() result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) master := result.Host session.SetMode(mgo.Eventual, true) // Should connect to the master when needed. coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) // Wait a bit for this to be synchronized to slaves. time.Sleep(3 * time.Second) // Kill the master. s.Stop(master) // Should still work, with the new master now. coll = session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(result.Host, Not(Equals), master) } func (s *S) TestModeSecondaryJustPrimary(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Secondary, true) err = session.Ping() c.Assert(err, ErrorMatches, "no reachable servers") } func (s *S) TestModeSecondaryPreferredJustPrimary(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.SecondaryPreferred, true) result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) } func (s *S) TestModeSecondaryPreferredFallover(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() // Ensure secondaries are available for being picked up. for len(session.LiveServers()) != 3 { c.Log("Waiting for cluster sync to finish...") time.Sleep(5e8) } session.SetMode(mgo.SecondaryPreferred, true) result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(supvName(result.Host), Not(Equals), "rs1a") secondary := result.Host // Should connect to the primary when needed. coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) // Wait a bit for this to be synchronized to slaves. time.Sleep(3 * time.Second) // Kill the primary. s.Stop("localhost:40011") // It can still talk to the selected secondary. err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(result.Host, Equals, secondary) // But cannot speak to the primary until reset. coll = session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, Equals, io.EOF) session.Refresh() // Can still talk to a secondary. err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(supvName(result.Host), Not(Equals), "rs1a") s.StartAll() // Should now be able to talk to the primary again. coll = session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) } func (s *S) TestModePrimaryPreferredFallover(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.PrimaryPreferred, true) result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(supvName(result.Host), Equals, "rs1a") // Kill the primary. s.Stop("localhost:40011") // Should now fail as there was a primary socket in use already. err = session.Run("serverStatus", result) c.Assert(err, Equals, io.EOF) // Refresh so the reserved primary socket goes away. session.Refresh() // Should be able to talk to the secondary. err = session.Run("serverStatus", result) c.Assert(err, IsNil) s.StartAll() // Should wait for the new primary to become available. coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) // And should use the new primary in general, as it is preferred. err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(supvName(result.Host), Equals, "rs1a") } func (s *S) TestModePrimaryFallover(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() session.SetSyncTimeout(3 * time.Second) session.SetMode(mgo.Primary, true) result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(supvName(result.Host), Equals, "rs1a") // Kill the primary. s.Stop("localhost:40011") session.Refresh() err = session.Ping() c.Assert(err, ErrorMatches, "no reachable servers") } func (s *S) TestModeSecondary(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Secondary, true) result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(supvName(result.Host), Not(Equals), "rs1a") secondary := result.Host coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(result.Host, Equals, secondary) } func (s *S) TestPreserveSocketCountOnSync(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() stats := mgo.GetStats() for stats.MasterConns+stats.SlaveConns != 3 { stats = mgo.GetStats() c.Log("Waiting for all connections to be established...") time.Sleep(5e8) } c.Assert(stats.SocketsAlive, Equals, 3) // Kill the master (with rs1, 'a' is always the master). s.Stop("localhost:40011") // Wait for the logic to run for a bit and bring it back. startedAll := make(chan bool) go func() { time.Sleep(5e9) s.StartAll() startedAll <- true }() // Do not allow the test to return before the goroutine above is done. defer func() { <-startedAll }() // Do an action to kick the resync logic in, and also to // wait until the cluster recognizes the server is back. result := struct{ Ok bool }{} err = session.Run("getLastError", &result) c.Assert(err, IsNil) c.Assert(result.Ok, Equals, true) for i := 0; i != 20; i++ { stats = mgo.GetStats() if stats.SocketsAlive == 3 { break } c.Logf("Waiting for 3 sockets alive, have %d", stats.SocketsAlive) time.Sleep(5e8) } // Ensure the number of sockets is preserved after syncing. stats = mgo.GetStats() c.Assert(stats.SocketsAlive, Equals, 3) c.Assert(stats.SocketsInUse, Equals, 1) c.Assert(stats.SocketRefs, Equals, 1) } // Connect to the master of a deployment with a single server, // run an insert, and then ensure the insert worked and that a // single connection was established. func (s *S) TestTopologySyncWithSingleMaster(c *C) { // Use hostname here rather than IP, to make things trickier. session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1, "b": 2}) c.Assert(err, IsNil) // One connection used for discovery. Master socket recycled for // insert. Socket is reserved after insert. stats := mgo.GetStats() c.Assert(stats.MasterConns, Equals, 1) c.Assert(stats.SlaveConns, Equals, 0) c.Assert(stats.SocketsInUse, Equals, 1) // Refresh session and socket must be released. session.Refresh() stats = mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 0) } func (s *S) TestTopologySyncWithSlaveSeed(c *C) { // That's supposed to be a slave. Must run discovery // and find out master to insert successfully. session, err := mgo.Dial("localhost:40012") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") coll.Insert(M{"a": 1, "b": 2}) result := struct{ Ok bool }{} err = session.Run("getLastError", &result) c.Assert(err, IsNil) c.Assert(result.Ok, Equals, true) // One connection to each during discovery. Master // socket recycled for insert. stats := mgo.GetStats() c.Assert(stats.MasterConns, Equals, 1) c.Assert(stats.SlaveConns, Equals, 2) // Only one socket reference alive, in the master socket owned // by the above session. c.Assert(stats.SocketsInUse, Equals, 1) // Refresh it, and it must be gone. session.Refresh() stats = mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 0) } func (s *S) TestSyncTimeout(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() s.Stop("localhost:40001") timeout := 3 * time.Second session.SetSyncTimeout(timeout) started := time.Now() // Do something. result := struct{ Ok bool }{} err = session.Run("getLastError", &result) c.Assert(err, ErrorMatches, "no reachable servers") c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true) } func (s *S) TestDialWithTimeout(c *C) { if *fast { c.Skip("-fast") } timeout := 2 * time.Second started := time.Now() // 40009 isn't used by the test servers. session, err := mgo.DialWithTimeout("localhost:40009", timeout) if session != nil { session.Close() } c.Assert(err, ErrorMatches, "no reachable servers") c.Assert(session, IsNil) c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true) } func (s *S) TestSocketTimeout(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() s.Freeze("localhost:40001") timeout := 3 * time.Second session.SetSocketTimeout(timeout) started := time.Now() // Do something. result := struct{ Ok bool }{} err = session.Run("getLastError", &result) c.Assert(err, ErrorMatches, ".*: i/o timeout") c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true) } func (s *S) TestSocketTimeoutOnDial(c *C) { if *fast { c.Skip("-fast") } timeout := 1 * time.Second defer mgo.HackSyncSocketTimeout(timeout)() s.Freeze("localhost:40001") started := time.Now() session, err := mgo.DialWithTimeout("localhost:40001", timeout) c.Assert(err, ErrorMatches, "no reachable servers") c.Assert(session, IsNil) c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) c.Assert(started.After(time.Now().Add(-20*time.Second)), Equals, true) } func (s *S) TestSocketTimeoutOnInactiveSocket(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() timeout := 2 * time.Second session.SetSocketTimeout(timeout) // Do something that relies on the timeout and works. c.Assert(session.Ping(), IsNil) // Freeze and wait for the timeout to go by. s.Freeze("localhost:40001") time.Sleep(timeout + 500*time.Millisecond) s.Thaw("localhost:40001") // Do something again. The timeout above should not have killed // the socket as there was nothing to be done. c.Assert(session.Ping(), IsNil) } func (s *S) TestDialWithReplicaSetName(c *C) { seedLists := [][]string{ // rs1 primary and rs2 primary []string{"localhost:40011", "localhost:40021"}, // rs1 primary and rs2 secondary []string{"localhost:40011", "localhost:40022"}, // rs1 secondary and rs2 primary []string{"localhost:40012", "localhost:40021"}, // rs1 secondary and rs2 secondary []string{"localhost:40012", "localhost:40022"}, } rs2Members := []string{":40021", ":40022", ":40023"} verifySyncedServers := func(session *mgo.Session, numServers int) { // wait for the server(s) to be synced for len(session.LiveServers()) != numServers { c.Log("Waiting for cluster sync to finish...") time.Sleep(5e8) } // ensure none of the rs2 set members are communicated with for _, addr := range session.LiveServers() { for _, rs2Member := range rs2Members { c.Assert(strings.HasSuffix(addr, rs2Member), Equals, false) } } } // only communication with rs1 members is expected for _, seedList := range seedLists { info := mgo.DialInfo{ Addrs: seedList, Timeout: 5 * time.Second, ReplicaSetName: "rs1", } session, err := mgo.DialWithInfo(&info) c.Assert(err, IsNil) verifySyncedServers(session, 3) session.Close() info.Direct = true session, err = mgo.DialWithInfo(&info) c.Assert(err, IsNil) verifySyncedServers(session, 1) session.Close() connectionUrl := fmt.Sprintf("mongodb://%v/?replicaSet=rs1", strings.Join(seedList, ",")) session, err = mgo.Dial(connectionUrl) c.Assert(err, IsNil) verifySyncedServers(session, 3) session.Close() connectionUrl += "&connect=direct" session, err = mgo.Dial(connectionUrl) c.Assert(err, IsNil) verifySyncedServers(session, 1) session.Close() } } func (s *S) TestDirect(c *C) { session, err := mgo.Dial("localhost:40012?connect=direct") c.Assert(err, IsNil) defer session.Close() // We know that server is a slave. session.SetMode(mgo.Monotonic, true) result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(strings.HasSuffix(result.Host, ":40012"), Equals, true) stats := mgo.GetStats() c.Assert(stats.SocketsAlive, Equals, 1) c.Assert(stats.SocketsInUse, Equals, 1) c.Assert(stats.SocketRefs, Equals, 1) // We've got no master, so it'll timeout. session.SetSyncTimeout(5e8 * time.Nanosecond) coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"test": 1}) c.Assert(err, ErrorMatches, "no reachable servers") // Writing to the local database is okay. coll = session.DB("local").C("mycoll") defer coll.RemoveAll(nil) id := bson.NewObjectId() err = coll.Insert(M{"_id": id}) c.Assert(err, IsNil) // Data was stored in the right server. n, err := coll.Find(M{"_id": id}).Count() c.Assert(err, IsNil) c.Assert(n, Equals, 1) // Server hasn't changed. result.Host = "" err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(strings.HasSuffix(result.Host, ":40012"), Equals, true) } func (s *S) TestDirectToUnknownStateMember(c *C) { session, err := mgo.Dial("localhost:40041?connect=direct") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, true) result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(strings.HasSuffix(result.Host, ":40041"), Equals, true) // We've got no master, so it'll timeout. session.SetSyncTimeout(5e8 * time.Nanosecond) coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"test": 1}) c.Assert(err, ErrorMatches, "no reachable servers") // Slave is still reachable. result.Host = "" err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(strings.HasSuffix(result.Host, ":40041"), Equals, true) } func (s *S) TestFailFast(c *C) { info := mgo.DialInfo{ Addrs: []string{"localhost:99999"}, Timeout: 5 * time.Second, FailFast: true, } started := time.Now() _, err := mgo.DialWithInfo(&info) c.Assert(err, ErrorMatches, "no reachable servers") c.Assert(started.After(time.Now().Add(-time.Second)), Equals, true) } type OpCounters struct { Insert int Query int Update int Delete int GetMore int Command int } func getOpCounters(server string) (c *OpCounters, err error) { session, err := mgo.Dial(server + "?connect=direct") if err != nil { return nil, err } defer session.Close() session.SetMode(mgo.Monotonic, true) result := struct{ OpCounters }{} err = session.Run("serverStatus", &result) return &result.OpCounters, err } func (s *S) TestMonotonicSlaveOkFlagWithMongos(c *C) { session, err := mgo.Dial("localhost:40021") c.Assert(err, IsNil) defer session.Close() ssresult := &struct{ Host string }{} imresult := &struct{ IsMaster bool }{} // Figure the master while still using the strong session. err = session.Run("serverStatus", ssresult) c.Assert(err, IsNil) err = session.Run("isMaster", imresult) c.Assert(err, IsNil) master := ssresult.Host c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) // Collect op counters for everyone. opc21a, err := getOpCounters("localhost:40021") c.Assert(err, IsNil) opc22a, err := getOpCounters("localhost:40022") c.Assert(err, IsNil) opc23a, err := getOpCounters("localhost:40023") c.Assert(err, IsNil) // Do a SlaveOk query through MongoS mongos, err := mgo.Dial("localhost:40202") c.Assert(err, IsNil) defer mongos.Close() mongos.SetMode(mgo.Monotonic, true) coll := mongos.DB("mydb").C("mycoll") result := &struct{}{} for i := 0; i != 5; i++ { err := coll.Find(nil).One(result) c.Assert(err, Equals, mgo.ErrNotFound) } // Collect op counters for everyone again. opc21b, err := getOpCounters("localhost:40021") c.Assert(err, IsNil) opc22b, err := getOpCounters("localhost:40022") c.Assert(err, IsNil) opc23b, err := getOpCounters("localhost:40023") c.Assert(err, IsNil) var masterDelta, slaveDelta int switch hostPort(master) { case "40021": masterDelta = opc21b.Query - opc21a.Query slaveDelta = (opc22b.Query - opc22a.Query) + (opc23b.Query - opc23a.Query) case "40022": masterDelta = opc22b.Query - opc22a.Query slaveDelta = (opc21b.Query - opc21a.Query) + (opc23b.Query - opc23a.Query) case "40023": masterDelta = opc23b.Query - opc23a.Query slaveDelta = (opc21b.Query - opc21a.Query) + (opc22b.Query - opc22a.Query) default: c.Fatal("Uh?") } c.Check(masterDelta, Equals, 0) // Just the counting itself. c.Check(slaveDelta, Equals, 5) // The counting for both, plus 5 queries above. } func (s *S) TestRemovalOfClusterMember(c *C) { if *fast { c.Skip("-fast") } master, err := mgo.Dial("localhost:40021") c.Assert(err, IsNil) defer master.Close() // Wait for cluster to fully sync up. for i := 0; i < 10; i++ { if len(master.LiveServers()) == 3 { break } time.Sleep(5e8) } if len(master.LiveServers()) != 3 { c.Fatalf("Test started with bad cluster state: %v", master.LiveServers()) } result := &struct { IsMaster bool Me string }{} slave := master.Copy() slave.SetMode(mgo.Monotonic, true) // Monotonic can hold a non-master socket persistently. err = slave.Run("isMaster", result) c.Assert(err, IsNil) c.Assert(result.IsMaster, Equals, false) slaveAddr := result.Me defer func() { config := map[string]string{ "40021": `{_id: 1, host: "127.0.0.1:40021", priority: 1, tags: {rs2: "a"}}`, "40022": `{_id: 2, host: "127.0.0.1:40022", priority: 0, tags: {rs2: "b"}}`, "40023": `{_id: 3, host: "127.0.0.1:40023", priority: 0, tags: {rs2: "c"}}`, } master.Refresh() master.Run(bson.D{{"$eval", `rs.add(` + config[hostPort(slaveAddr)] + `)`}}, nil) master.Close() slave.Close() // Ensure suite syncs up with the changes before next test. s.Stop(":40201") s.StartAll() time.Sleep(8 * time.Second) // TODO Find a better way to find out when mongos is fully aware that all // servers are up. Without that follow up tests that depend on mongos will // break due to their expectation of things being in a working state. }() c.Logf("========== Removing slave: %s ==========", slaveAddr) master.Run(bson.D{{"$eval", `rs.remove("` + slaveAddr + `")`}}, nil) master.Refresh() // Give the cluster a moment to catch up by doing a roundtrip to the master. err = master.Ping() c.Assert(err, IsNil) time.Sleep(3e9) // This must fail since the slave has been taken off the cluster. err = slave.Ping() c.Assert(err, NotNil) for i := 0; i < 15; i++ { if len(master.LiveServers()) == 2 { break } time.Sleep(time.Second) } live := master.LiveServers() if len(live) != 2 { c.Errorf("Removed server still considered live: %#s", live) } c.Log("========== Test succeeded. ==========") } func (s *S) TestPoolLimitSimple(c *C) { for test := 0; test < 2; test++ { var session *mgo.Session var err error if test == 0 { session, err = mgo.Dial("localhost:40001") c.Assert(err, IsNil) session.SetPoolLimit(1) } else { session, err = mgo.Dial("localhost:40001?maxPoolSize=1") c.Assert(err, IsNil) } defer session.Close() // Put one socket in use. c.Assert(session.Ping(), IsNil) done := make(chan time.Duration) // Now block trying to get another one due to the pool limit. go func() { copy := session.Copy() defer copy.Close() started := time.Now() c.Check(copy.Ping(), IsNil) done <- time.Now().Sub(started) }() time.Sleep(300 * time.Millisecond) // Put the one socket back in the pool, freeing it for the copy. session.Refresh() delay := <-done c.Assert(delay > 300*time.Millisecond, Equals, true, Commentf("Delay: %s", delay)) } } func (s *S) TestPoolLimitMany(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() stats := mgo.GetStats() for stats.MasterConns+stats.SlaveConns != 3 { stats = mgo.GetStats() c.Log("Waiting for all connections to be established...") time.Sleep(500 * time.Millisecond) } c.Assert(stats.SocketsAlive, Equals, 3) const poolLimit = 64 session.SetPoolLimit(poolLimit) // Consume the whole limit for the master. var master []*mgo.Session for i := 0; i < poolLimit; i++ { s := session.Copy() defer s.Close() c.Assert(s.Ping(), IsNil) master = append(master, s) } before := time.Now() go func() { time.Sleep(3e9) master[0].Refresh() }() // Then, a single ping must block, since it would need another // connection to the master, over the limit. Once the goroutine // above releases its socket, it should move on. session.Ping() delay := time.Now().Sub(before) c.Assert(delay > 3e9, Equals, true) c.Assert(delay < 6e9, Equals, true) } func (s *S) TestSetModeEventualIterBug(c *C) { session1, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session1.Close() session1.SetMode(mgo.Eventual, false) coll1 := session1.DB("mydb").C("mycoll") const N = 100 for i := 0; i < N; i++ { err = coll1.Insert(M{"_id": i}) c.Assert(err, IsNil) } c.Logf("Waiting until secondary syncs") for { n, err := coll1.Count() c.Assert(err, IsNil) if n == N { c.Logf("Found all") break } } session2, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session2.Close() session2.SetMode(mgo.Eventual, false) coll2 := session2.DB("mydb").C("mycoll") i := 0 iter := coll2.Find(nil).Batch(10).Iter() var result struct{} for iter.Next(&result) { i++ } c.Assert(iter.Close(), Equals, nil) c.Assert(i, Equals, N) } func (s *S) TestCustomDialOld(c *C) { dials := make(chan bool, 16) dial := func(addr net.Addr) (net.Conn, error) { tcpaddr, ok := addr.(*net.TCPAddr) if !ok { return nil, fmt.Errorf("unexpected address type: %T", addr) } dials <- true return net.DialTCP("tcp", nil, tcpaddr) } info := mgo.DialInfo{ Addrs: []string{"localhost:40012"}, Dial: dial, } // Use hostname here rather than IP, to make things trickier. session, err := mgo.DialWithInfo(&info) c.Assert(err, IsNil) defer session.Close() const N = 3 for i := 0; i < N; i++ { select { case <-dials: case <-time.After(5 * time.Second): c.Fatalf("expected %d dials, got %d", N, i) } } select { case <-dials: c.Fatalf("got more dials than expected") case <-time.After(100 * time.Millisecond): } } func (s *S) TestCustomDialNew(c *C) { dials := make(chan bool, 16) dial := func(addr *mgo.ServerAddr) (net.Conn, error) { dials <- true if addr.TCPAddr().Port == 40012 { c.Check(addr.String(), Equals, "localhost:40012") } return net.DialTCP("tcp", nil, addr.TCPAddr()) } info := mgo.DialInfo{ Addrs: []string{"localhost:40012"}, DialServer: dial, } // Use hostname here rather than IP, to make things trickier. session, err := mgo.DialWithInfo(&info) c.Assert(err, IsNil) defer session.Close() const N = 3 for i := 0; i < N; i++ { select { case <-dials: case <-time.After(5 * time.Second): c.Fatalf("expected %d dials, got %d", N, i) } } select { case <-dials: c.Fatalf("got more dials than expected") case <-time.After(100 * time.Millisecond): } } func (s *S) TestPrimaryShutdownOnAuthShard(c *C) { if *fast { c.Skip("-fast") } // Dial the shard. session, err := mgo.Dial("localhost:40203") c.Assert(err, IsNil) defer session.Close() // Login and insert something to make it more realistic. session.DB("admin").Login("root", "rapadura") coll := session.DB("mydb").C("mycoll") err = coll.Insert(bson.M{"n": 1}) c.Assert(err, IsNil) // Dial the replica set to figure the master out. rs, err := mgo.Dial("root:rapadura@localhost:40031") c.Assert(err, IsNil) defer rs.Close() // With strong consistency, this will open a socket to the master. result := &struct{ Host string }{} err = rs.Run("serverStatus", result) c.Assert(err, IsNil) // Kill the master. host := result.Host s.Stop(host) // This must fail, since the connection was broken. err = rs.Run("serverStatus", result) c.Assert(err, Equals, io.EOF) // This won't work because the master just died. err = coll.Insert(bson.M{"n": 2}) c.Assert(err, NotNil) // Refresh session and wait for re-election. session.Refresh() for i := 0; i < 60; i++ { err = coll.Insert(bson.M{"n": 3}) if err == nil { break } c.Logf("Waiting for replica set to elect a new master. Last error: %v", err) time.Sleep(500 * time.Millisecond) } c.Assert(err, IsNil) count, err := coll.Count() c.Assert(count > 1, Equals, true) } func (s *S) TestNearestSecondary(c *C) { defer mgo.HackPingDelay(300 * time.Millisecond)() rs1a := "127.0.0.1:40011" rs1b := "127.0.0.1:40012" rs1c := "127.0.0.1:40013" s.Freeze(rs1b) session, err := mgo.Dial(rs1a) c.Assert(err, IsNil) defer session.Close() // Wait for the sync up to run through the first couple of servers. for len(session.LiveServers()) != 2 { c.Log("Waiting for two servers to be alive...") time.Sleep(100 * time.Millisecond) } // Extra delay to ensure the third server gets penalized. time.Sleep(500 * time.Millisecond) // Release third server. s.Thaw(rs1b) // Wait for it to come up. for len(session.LiveServers()) != 3 { c.Log("Waiting for all servers to be alive...") time.Sleep(100 * time.Millisecond) } session.SetMode(mgo.Monotonic, true) var result struct{ Host string } // See which slave picks the line, several times to avoid chance. for i := 0; i < 10; i++ { session.Refresh() err = session.Run("serverStatus", &result) c.Assert(err, IsNil) c.Assert(hostPort(result.Host), Equals, hostPort(rs1c)) } if *fast { // Don't hold back for several seconds. return } // Now hold the other server for long enough to penalize it. s.Freeze(rs1c) time.Sleep(5 * time.Second) s.Thaw(rs1c) // Wait for the ping to be processed. time.Sleep(500 * time.Millisecond) // Repeating the test should now pick the former server consistently. for i := 0; i < 10; i++ { session.Refresh() err = session.Run("serverStatus", &result) c.Assert(err, IsNil) c.Assert(hostPort(result.Host), Equals, hostPort(rs1b)) } } func (s *S) TestNearestServer(c *C) { defer mgo.HackPingDelay(300 * time.Millisecond)() rs1a := "127.0.0.1:40011" rs1b := "127.0.0.1:40012" rs1c := "127.0.0.1:40013" session, err := mgo.Dial(rs1a) c.Assert(err, IsNil) defer session.Close() s.Freeze(rs1a) s.Freeze(rs1b) // Extra delay to ensure the first two servers get penalized. time.Sleep(500 * time.Millisecond) // Release them. s.Thaw(rs1a) s.Thaw(rs1b) // Wait for everyone to come up. for len(session.LiveServers()) != 3 { c.Log("Waiting for all servers to be alive...") time.Sleep(100 * time.Millisecond) } session.SetMode(mgo.Nearest, true) var result struct{ Host string } // See which server picks the line, several times to avoid chance. for i := 0; i < 10; i++ { session.Refresh() err = session.Run("serverStatus", &result) c.Assert(err, IsNil) c.Assert(hostPort(result.Host), Equals, hostPort(rs1c)) } if *fast { // Don't hold back for several seconds. return } // Now hold the two secondaries for long enough to penalize them. s.Freeze(rs1b) s.Freeze(rs1c) time.Sleep(5 * time.Second) s.Thaw(rs1b) s.Thaw(rs1c) // Wait for the ping to be processed. time.Sleep(500 * time.Millisecond) // Repeating the test should now pick the primary server consistently. for i := 0; i < 10; i++ { session.Refresh() err = session.Run("serverStatus", &result) c.Assert(err, IsNil) c.Assert(hostPort(result.Host), Equals, hostPort(rs1a)) } } func (s *S) TestConnectCloseConcurrency(c *C) { restore := mgo.HackPingDelay(500 * time.Millisecond) defer restore() var wg sync.WaitGroup const n = 500 wg.Add(n) for i := 0; i < n; i++ { go func() { defer wg.Done() session, err := mgo.Dial("localhost:40001") if err != nil { c.Fatal(err) } time.Sleep(1) session.Close() }() } wg.Wait() } func (s *S) TestSelectServers(c *C) { if !s.versionAtLeast(2, 2) { c.Skip("read preferences introduced in 2.2") } session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Eventual, true) var result struct{ Host string } session.Refresh() session.SelectServers(bson.D{{"rs1", "b"}}) err = session.Run("serverStatus", &result) c.Assert(err, IsNil) c.Assert(hostPort(result.Host), Equals, "40012") session.Refresh() session.SelectServers(bson.D{{"rs1", "c"}}) err = session.Run("serverStatus", &result) c.Assert(err, IsNil) c.Assert(hostPort(result.Host), Equals, "40013") } func (s *S) TestSelectServersWithMongos(c *C) { if !s.versionAtLeast(2, 2) { c.Skip("read preferences introduced in 2.2") } session, err := mgo.Dial("localhost:40021") c.Assert(err, IsNil) defer session.Close() ssresult := &struct{ Host string }{} imresult := &struct{ IsMaster bool }{} // Figure the master while still using the strong session. err = session.Run("serverStatus", ssresult) c.Assert(err, IsNil) err = session.Run("isMaster", imresult) c.Assert(err, IsNil) master := ssresult.Host c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) var slave1, slave2 string switch hostPort(master) { case "40021": slave1, slave2 = "b", "c" case "40022": slave1, slave2 = "a", "c" case "40023": slave1, slave2 = "a", "b" } // Collect op counters for everyone. opc21a, err := getOpCounters("localhost:40021") c.Assert(err, IsNil) opc22a, err := getOpCounters("localhost:40022") c.Assert(err, IsNil) opc23a, err := getOpCounters("localhost:40023") c.Assert(err, IsNil) // Do a SlaveOk query through MongoS mongos, err := mgo.Dial("localhost:40202") c.Assert(err, IsNil) defer mongos.Close() mongos.SetMode(mgo.Monotonic, true) mongos.Refresh() mongos.SelectServers(bson.D{{"rs2", slave1}}) coll := mongos.DB("mydb").C("mycoll") result := &struct{}{} for i := 0; i != 5; i++ { err := coll.Find(nil).One(result) c.Assert(err, Equals, mgo.ErrNotFound) } mongos.Refresh() mongos.SelectServers(bson.D{{"rs2", slave2}}) coll = mongos.DB("mydb").C("mycoll") for i := 0; i != 7; i++ { err := coll.Find(nil).One(result) c.Assert(err, Equals, mgo.ErrNotFound) } // Collect op counters for everyone again. opc21b, err := getOpCounters("localhost:40021") c.Assert(err, IsNil) opc22b, err := getOpCounters("localhost:40022") c.Assert(err, IsNil) opc23b, err := getOpCounters("localhost:40023") c.Assert(err, IsNil) switch hostPort(master) { case "40021": c.Check(opc21b.Query-opc21a.Query, Equals, 0) c.Check(opc22b.Query-opc22a.Query, Equals, 5) c.Check(opc23b.Query-opc23a.Query, Equals, 7) case "40022": c.Check(opc21b.Query-opc21a.Query, Equals, 5) c.Check(opc22b.Query-opc22a.Query, Equals, 0) c.Check(opc23b.Query-opc23a.Query, Equals, 7) case "40023": c.Check(opc21b.Query-opc21a.Query, Equals, 5) c.Check(opc22b.Query-opc22a.Query, Equals, 7) c.Check(opc23b.Query-opc23a.Query, Equals, 0) default: c.Fatal("Uh?") } } charm-2.1.1/src/gopkg.in/mgo.v2/txn/0000775000175000017500000000000012672604565016034 5ustar marcomarcocharm-2.1.1/src/gopkg.in/mgo.v2/txn/debug.go0000664000175000017500000000412012672604565017446 0ustar marcomarcopackage txn import ( "bytes" "fmt" "sort" "sync/atomic" "gopkg.in/mgo.v2/bson" ) var ( debugEnabled bool logger log_Logger ) type log_Logger interface { Output(calldepth int, s string) error } // Specify the *log.Logger where logged messages should be sent to. func SetLogger(l log_Logger) { logger = l } // SetDebug enables or disables debugging. func SetDebug(debug bool) { debugEnabled = debug } var ErrChaos = fmt.Errorf("interrupted by chaos") var debugId uint32 func debugPrefix() string { d := atomic.AddUint32(&debugId, 1) - 1 s := make([]byte, 0, 10) for i := uint(0); i < 8; i++ { s = append(s, "abcdefghijklmnop"[(d>>(4*i))&0xf]) if d>>(4*(i+1)) == 0 { break } } s = append(s, ')', ' ') return string(s) } func logf(format string, args ...interface{}) { if logger != nil { logger.Output(2, fmt.Sprintf(format, argsForLog(args)...)) } } func debugf(format string, args ...interface{}) { if debugEnabled && logger != nil { logger.Output(2, fmt.Sprintf(format, argsForLog(args)...)) } } func argsForLog(args []interface{}) []interface{} { for i, arg := range args { switch v := arg.(type) { case bson.ObjectId: args[i] = v.Hex() case []bson.ObjectId: lst := make([]string, len(v)) for j, id := range v { lst[j] = id.Hex() } args[i] = lst case map[docKey][]bson.ObjectId: buf := &bytes.Buffer{} var dkeys docKeys for dkey := range v { dkeys = append(dkeys, dkey) } sort.Sort(dkeys) for i, dkey := range dkeys { if i > 0 { buf.WriteByte(' ') } buf.WriteString(fmt.Sprintf("%v: {", dkey)) for j, id := range v[dkey] { if j > 0 { buf.WriteByte(' ') } buf.WriteString(id.Hex()) } buf.WriteByte('}') } args[i] = buf.String() case map[docKey][]int64: buf := &bytes.Buffer{} var dkeys docKeys for dkey := range v { dkeys = append(dkeys, dkey) } sort.Sort(dkeys) for i, dkey := range dkeys { if i > 0 { buf.WriteByte(' ') } buf.WriteString(fmt.Sprintf("%v: %v", dkey, v[dkey])) } args[i] = buf.String() } } return args } charm-2.1.1/src/gopkg.in/mgo.v2/txn/tarjan.go0000664000175000017500000000425612672604565017651 0ustar marcomarcopackage txn import ( "gopkg.in/mgo.v2/bson" "sort" ) func tarjanSort(successors map[bson.ObjectId][]bson.ObjectId) [][]bson.ObjectId { // http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm data := &tarjanData{ successors: successors, nodes: make([]tarjanNode, 0, len(successors)), index: make(map[bson.ObjectId]int, len(successors)), } for id := range successors { id := bson.ObjectId(string(id)) if _, seen := data.index[id]; !seen { data.strongConnect(id) } } // Sort connected components to stabilize the algorithm. for _, ids := range data.output { if len(ids) > 1 { sort.Sort(idList(ids)) } } return data.output } type tarjanData struct { successors map[bson.ObjectId][]bson.ObjectId output [][]bson.ObjectId nodes []tarjanNode stack []bson.ObjectId index map[bson.ObjectId]int } type tarjanNode struct { lowlink int stacked bool } type idList []bson.ObjectId func (l idList) Len() int { return len(l) } func (l idList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } func (l idList) Less(i, j int) bool { return l[i] < l[j] } func (data *tarjanData) strongConnect(id bson.ObjectId) *tarjanNode { index := len(data.nodes) data.index[id] = index data.stack = append(data.stack, id) data.nodes = append(data.nodes, tarjanNode{index, true}) node := &data.nodes[index] for _, succid := range data.successors[id] { succindex, seen := data.index[succid] if !seen { succnode := data.strongConnect(succid) if succnode.lowlink < node.lowlink { node.lowlink = succnode.lowlink } } else if data.nodes[succindex].stacked { // Part of the current strongly-connected component. if succindex < node.lowlink { node.lowlink = succindex } } } if node.lowlink == index { // Root node; pop stack and output new // strongly-connected component. var scc []bson.ObjectId i := len(data.stack) - 1 for { stackid := data.stack[i] stackindex := data.index[stackid] data.nodes[stackindex].stacked = false scc = append(scc, stackid) if stackindex == index { break } i-- } data.stack = data.stack[:i] data.output = append(data.output, scc) } return node } charm-2.1.1/src/gopkg.in/mgo.v2/txn/tarjan_test.go0000664000175000017500000000134612672604565020705 0ustar marcomarcopackage txn import ( "fmt" "gopkg.in/mgo.v2/bson" . "gopkg.in/check.v1" ) type TarjanSuite struct{} var _ = Suite(TarjanSuite{}) func bid(n int) bson.ObjectId { return bson.ObjectId(fmt.Sprintf("%024d", n)) } func bids(ns ...int) (ids []bson.ObjectId) { for _, n := range ns { ids = append(ids, bid(n)) } return } func (TarjanSuite) TestExample(c *C) { successors := map[bson.ObjectId][]bson.ObjectId{ bid(1): bids(2, 3), bid(2): bids(1, 5), bid(3): bids(4), bid(4): bids(3, 5), bid(5): bids(6), bid(6): bids(7), bid(7): bids(8), bid(8): bids(6, 9), bid(9): bids(), } c.Assert(tarjanSort(successors), DeepEquals, [][]bson.ObjectId{ bids(9), bids(6, 7, 8), bids(5), bids(3, 4), bids(1, 2), }) } charm-2.1.1/src/gopkg.in/mgo.v2/txn/flusher.go0000664000175000017500000006451612672604565020047 0ustar marcomarcopackage txn import ( "fmt" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" ) func flush(r *Runner, t *transaction) error { f := &flusher{ Runner: r, goal: t, goalKeys: make(map[docKey]bool), queue: make(map[docKey][]token), debugId: debugPrefix(), } for _, dkey := range f.goal.docKeys() { f.goalKeys[dkey] = true } return f.run() } type flusher struct { *Runner goal *transaction goalKeys map[docKey]bool queue map[docKey][]token debugId string } func (f *flusher) run() (err error) { if chaosEnabled { defer f.handleChaos(&err) } f.debugf("Processing %s", f.goal) seen := make(map[bson.ObjectId]*transaction) if err := f.recurse(f.goal, seen); err != nil { return err } if f.goal.done() { return nil } // Sparse workloads will generally be managed entirely by recurse. // Getting here means one or more transactions have dependencies // and perhaps cycles. // Build successors data for Tarjan's sort. Must consider // that entries in txn-queue are not necessarily valid. successors := make(map[bson.ObjectId][]bson.ObjectId) ready := true for _, dqueue := range f.queue { NextPair: for i := 0; i < len(dqueue); i++ { pred := dqueue[i] predid := pred.id() predt := seen[predid] if predt == nil || predt.Nonce != pred.nonce() { continue } predsuccids, ok := successors[predid] if !ok { successors[predid] = nil } for j := i + 1; j < len(dqueue); j++ { succ := dqueue[j] succid := succ.id() succt := seen[succid] if succt == nil || succt.Nonce != succ.nonce() { continue } if _, ok := successors[succid]; !ok { successors[succid] = nil } // Found a valid pred/succ pair. i = j - 1 for _, predsuccid := range predsuccids { if predsuccid == succid { continue NextPair } } successors[predid] = append(predsuccids, succid) if succid == f.goal.Id { // There are still pre-requisites to handle. ready = false } continue NextPair } } } f.debugf("Queues: %v", f.queue) f.debugf("Successors: %v", successors) if ready { f.debugf("Goal %s has no real pre-requisites", f.goal) return f.advance(f.goal, nil, true) } // Robert Tarjan's algorithm for detecting strongly-connected // components is used for topological sorting and detecting // cycles at once. The order in which transactions are applied // in commonly affected documents must be a global agreement. sorted := tarjanSort(successors) if debugEnabled { f.debugf("Tarjan output: %v", sorted) } pull := make(map[bson.ObjectId]*transaction) for i := len(sorted) - 1; i >= 0; i-- { scc := sorted[i] f.debugf("Flushing %v", scc) if len(scc) == 1 { pull[scc[0]] = seen[scc[0]] } for _, id := range scc { if err := f.advance(seen[id], pull, true); err != nil { return err } } if len(scc) > 1 { for _, id := range scc { pull[id] = seen[id] } } } return nil } func (f *flusher) recurse(t *transaction, seen map[bson.ObjectId]*transaction) error { seen[t.Id] = t err := f.advance(t, nil, false) if err != errPreReqs { return err } for _, dkey := range t.docKeys() { for _, dtt := range f.queue[dkey] { id := dtt.id() if seen[id] != nil { continue } qt, err := f.load(id) if err != nil { return err } err = f.recurse(qt, seen) if err != nil { return err } } } return nil } func (f *flusher) advance(t *transaction, pull map[bson.ObjectId]*transaction, force bool) error { for { switch t.State { case tpreparing, tprepared: revnos, err := f.prepare(t, force) if err != nil { return err } if t.State != tprepared { continue } if err = f.assert(t, revnos, pull); err != nil { return err } if t.State != tprepared { continue } if err = f.checkpoint(t, revnos); err != nil { return err } case tapplying: return f.apply(t, pull) case taborting: return f.abortOrReload(t, nil, pull) case tapplied, taborted: return nil default: panic(fmt.Errorf("transaction in unknown state: %q", t.State)) } } panic("unreachable") } type stash string const ( stashStable stash = "" stashInsert stash = "insert" stashRemove stash = "remove" ) type txnInfo struct { Queue []token `bson:"txn-queue"` Revno int64 `bson:"txn-revno,omitempty"` Insert bson.ObjectId `bson:"txn-insert,omitempty"` Remove bson.ObjectId `bson:"txn-remove,omitempty"` } type stashState string const ( stashNew stashState = "" stashInserting stashState = "inserting" ) var txnFields = bson.D{{"txn-queue", 1}, {"txn-revno", 1}, {"txn-remove", 1}, {"txn-insert", 1}} var errPreReqs = fmt.Errorf("transaction has pre-requisites and force is false") // prepare injects t's id onto txn-queue for all affected documents // and collects the current txn-queue and txn-revno values during // the process. If the prepared txn-queue indicates that there are // pre-requisite transactions to be applied and the force parameter // is false, errPreReqs will be returned. Otherwise, the current // tip revision numbers for all the documents are returned. func (f *flusher) prepare(t *transaction, force bool) (revnos []int64, err error) { if t.State != tpreparing { return f.rescan(t, force) } f.debugf("Preparing %s", t) // dkeys being sorted means stable iteration across all runners. This // isn't strictly required, but reduces the chances of cycles. dkeys := t.docKeys() revno := make(map[docKey]int64) info := txnInfo{} tt := tokenFor(t) NextDoc: for _, dkey := range dkeys { change := mgo.Change{ Update: bson.D{{"$addToSet", bson.D{{"txn-queue", tt}}}}, ReturnNew: true, } c := f.tc.Database.C(dkey.C) cquery := c.FindId(dkey.Id).Select(txnFields) RetryDoc: change.Upsert = false chaos("") if _, err := cquery.Apply(change, &info); err == nil { if info.Remove == "" { // Fast path, unless workload is insert/remove heavy. revno[dkey] = info.Revno f.queue[dkey] = info.Queue f.debugf("[A] Prepared document %v with revno %d and queue: %v", dkey, info.Revno, info.Queue) continue NextDoc } else { // Handle remove in progress before preparing it. if err := f.loadAndApply(info.Remove); err != nil { return nil, err } goto RetryDoc } } else if err != mgo.ErrNotFound { return nil, err } // Document missing. Use stash collection. change.Upsert = true chaos("") _, err := f.sc.FindId(dkey).Apply(change, &info) if err != nil { return nil, err } if info.Insert != "" { // Handle insert in progress before preparing it. if err := f.loadAndApply(info.Insert); err != nil { return nil, err } goto RetryDoc } // Must confirm stash is still in use and is the same one // prepared, since applying a remove overwrites the stash. docFound := false stashFound := false if err = c.FindId(dkey.Id).Select(txnFields).One(&info); err == nil { docFound = true } else if err != mgo.ErrNotFound { return nil, err } else if err = f.sc.FindId(dkey).One(&info); err == nil { stashFound = true if info.Revno == 0 { // Missing revno in the stash only happens when it // has been upserted, in which case it defaults to -1. // Txn-inserted documents get revno -1 while in the stash // for the first time, and -revno-1 == 2 when they go live. info.Revno = -1 } } else if err != mgo.ErrNotFound { return nil, err } if docFound && info.Remove == "" || stashFound && info.Insert == "" { for _, dtt := range info.Queue { if dtt != tt { continue } // Found tt properly prepared. if stashFound { f.debugf("[B] Prepared document %v on stash with revno %d and queue: %v", dkey, info.Revno, info.Queue) } else { f.debugf("[B] Prepared document %v with revno %d and queue: %v", dkey, info.Revno, info.Queue) } revno[dkey] = info.Revno f.queue[dkey] = info.Queue continue NextDoc } } // The stash wasn't valid and tt got overwriten. Try again. f.unstashToken(tt, dkey) goto RetryDoc } // Save the prepared nonce onto t. nonce := tt.nonce() qdoc := bson.D{{"_id", t.Id}, {"s", tpreparing}} udoc := bson.D{{"$set", bson.D{{"s", tprepared}, {"n", nonce}}}} chaos("set-prepared") err = f.tc.Update(qdoc, udoc) if err == nil { t.State = tprepared t.Nonce = nonce } else if err == mgo.ErrNotFound { f.debugf("Can't save nonce of %s: LOST RACE", tt) if err := f.reload(t); err != nil { return nil, err } else if t.State == tpreparing { panic("can't save nonce yet transaction is still preparing") } else if t.State != tprepared { return t.Revnos, nil } tt = t.token() } else if err != nil { return nil, err } prereqs, found := f.hasPreReqs(tt, dkeys) if !found { // Must only happen when reloading above. return f.rescan(t, force) } else if prereqs && !force { f.debugf("Prepared queue with %s [has prereqs & not forced].", tt) return nil, errPreReqs } revnos = assembledRevnos(t.Ops, revno) if !prereqs { f.debugf("Prepared queue with %s [no prereqs]. Revnos: %v", tt, revnos) } else { f.debugf("Prepared queue with %s [forced] Revnos: %v", tt, revnos) } return revnos, nil } func (f *flusher) unstashToken(tt token, dkey docKey) error { qdoc := bson.D{{"_id", dkey}, {"txn-queue", tt}} udoc := bson.D{{"$pull", bson.D{{"txn-queue", tt}}}} chaos("") if err := f.sc.Update(qdoc, udoc); err == nil { chaos("") err = f.sc.Remove(bson.D{{"_id", dkey}, {"txn-queue", bson.D{}}}) } else if err != mgo.ErrNotFound { return err } return nil } func (f *flusher) rescan(t *transaction, force bool) (revnos []int64, err error) { f.debugf("Rescanning %s", t) if t.State != tprepared { panic(fmt.Errorf("rescanning transaction in invalid state: %q", t.State)) } // dkeys being sorted means stable iteration across all // runners. This isn't strictly required, but reduces the chances // of cycles. dkeys := t.docKeys() tt := t.token() if !force { prereqs, found := f.hasPreReqs(tt, dkeys) if found && prereqs { // Its state is already known. return nil, errPreReqs } } revno := make(map[docKey]int64) info := txnInfo{} for _, dkey := range dkeys { const retries = 3 retry := -1 RetryDoc: retry++ c := f.tc.Database.C(dkey.C) if err := c.FindId(dkey.Id).Select(txnFields).One(&info); err == mgo.ErrNotFound { // Document is missing. Look in stash. chaos("") if err := f.sc.FindId(dkey).One(&info); err == mgo.ErrNotFound { // Stash also doesn't exist. Maybe someone applied it. if err := f.reload(t); err != nil { return nil, err } else if t.State != tprepared { return t.Revnos, err } // Not applying either. if retry < retries { // Retry since there might be an insert/remove race. goto RetryDoc } // Neither the doc nor the stash seem to exist. return nil, fmt.Errorf("cannot find document %v for applying transaction %s", dkey, t) } else if err != nil { return nil, err } // Stash found. if info.Insert != "" { // Handle insert in progress before assuming ordering is good. if err := f.loadAndApply(info.Insert); err != nil { return nil, err } goto RetryDoc } if info.Revno == 0 { // Missing revno in the stash means -1. info.Revno = -1 } } else if err != nil { return nil, err } else if info.Remove != "" { // Handle remove in progress before assuming ordering is good. if err := f.loadAndApply(info.Remove); err != nil { return nil, err } goto RetryDoc } revno[dkey] = info.Revno found := false for _, id := range info.Queue { if id == tt { found = true break } } f.queue[dkey] = info.Queue if !found { // Rescanned transaction id was not in the queue. This could mean one // of three things: // 1) The transaction was applied and popped by someone else. This is // the common case. // 2) We've read an out-of-date queue from the stash. This can happen // when someone else was paused for a long while preparing another // transaction for this document, and improperly upserted to the // stash when unpaused (after someone else inserted the document). // This is rare but possible. // 3) There's an actual bug somewhere, or outside interference. Worst // possible case. f.debugf("Rescanned document %v misses %s in queue: %v", dkey, tt, info.Queue) err := f.reload(t) if t.State == tpreparing || t.State == tprepared { if retry < retries { // Case 2. goto RetryDoc } // Case 3. return nil, fmt.Errorf("cannot find transaction %s in queue for document %v", t, dkey) } // Case 1. return t.Revnos, err } } prereqs, found := f.hasPreReqs(tt, dkeys) if !found { panic("rescanning loop guarantees that this can't happen") } else if prereqs && !force { f.debugf("Rescanned queue with %s: has prereqs, not forced", tt) return nil, errPreReqs } revnos = assembledRevnos(t.Ops, revno) if !prereqs { f.debugf("Rescanned queue with %s: no prereqs, revnos: %v", tt, revnos) } else { f.debugf("Rescanned queue with %s: has prereqs, forced, revnos: %v", tt, revnos) } return revnos, nil } func assembledRevnos(ops []Op, revno map[docKey]int64) []int64 { revnos := make([]int64, len(ops)) for i, op := range ops { dkey := op.docKey() revnos[i] = revno[dkey] drevno := revno[dkey] switch { case op.Insert != nil && drevno < 0: revno[dkey] = -drevno + 1 case op.Update != nil && drevno >= 0: revno[dkey] = drevno + 1 case op.Remove && drevno >= 0: revno[dkey] = -drevno - 1 } } return revnos } func (f *flusher) hasPreReqs(tt token, dkeys docKeys) (prereqs, found bool) { found = true NextDoc: for _, dkey := range dkeys { for _, dtt := range f.queue[dkey] { if dtt == tt { continue NextDoc } else if dtt.id() != tt.id() { prereqs = true } } found = false } return } func (f *flusher) reload(t *transaction) error { var newt transaction query := f.tc.FindId(t.Id) query.Select(bson.D{{"s", 1}, {"n", 1}, {"r", 1}}) if err := query.One(&newt); err != nil { return fmt.Errorf("failed to reload transaction: %v", err) } t.State = newt.State t.Nonce = newt.Nonce t.Revnos = newt.Revnos f.debugf("Reloaded %s: %q", t, t.State) return nil } func (f *flusher) loadAndApply(id bson.ObjectId) error { t, err := f.load(id) if err != nil { return err } return f.advance(t, nil, true) } // assert verifies that all assertions in t match the content that t // will be applied upon. If an assertion fails, the transaction state // is changed to aborted. func (f *flusher) assert(t *transaction, revnos []int64, pull map[bson.ObjectId]*transaction) error { f.debugf("Asserting %s with revnos %v", t, revnos) if t.State != tprepared { panic(fmt.Errorf("asserting transaction in invalid state: %q", t.State)) } qdoc := make(bson.D, 3) revno := make(map[docKey]int64) for i, op := range t.Ops { dkey := op.docKey() if _, ok := revno[dkey]; !ok { revno[dkey] = revnos[i] } if op.Assert == nil { continue } if op.Assert == DocMissing { if revnos[i] >= 0 { return f.abortOrReload(t, revnos, pull) } continue } if op.Insert != nil { return fmt.Errorf("Insert can only Assert txn.DocMissing", op.Assert) } // if revnos[i] < 0 { abort }? qdoc = append(qdoc[:0], bson.DocElem{"_id", op.Id}) if op.Assert != DocMissing { var revnoq interface{} if n := revno[dkey]; n == 0 { revnoq = bson.D{{"$exists", false}} } else { revnoq = n } // XXX Add tt to the query here, once we're sure it's all working. // Not having it increases the chances of breaking on bad logic. qdoc = append(qdoc, bson.DocElem{"txn-revno", revnoq}) if op.Assert != DocExists { qdoc = append(qdoc, bson.DocElem{"$or", []interface{}{op.Assert}}) } } c := f.tc.Database.C(op.C) if err := c.Find(qdoc).Select(bson.D{{"_id", 1}}).One(nil); err == mgo.ErrNotFound { // Assertion failed or someone else started applying. return f.abortOrReload(t, revnos, pull) } else if err != nil { return err } } f.debugf("Asserting %s succeeded", t) return nil } func (f *flusher) abortOrReload(t *transaction, revnos []int64, pull map[bson.ObjectId]*transaction) (err error) { f.debugf("Aborting or reloading %s (was %q)", t, t.State) if t.State == tprepared { qdoc := bson.D{{"_id", t.Id}, {"s", tprepared}} udoc := bson.D{{"$set", bson.D{{"s", taborting}}}} chaos("set-aborting") if err = f.tc.Update(qdoc, udoc); err == nil { t.State = taborting } else if err == mgo.ErrNotFound { if err = f.reload(t); err != nil || t.State != taborting { f.debugf("Won't abort %s. Reloaded state: %q", t, t.State) return err } } else { return err } } else if t.State != taborting { panic(fmt.Errorf("aborting transaction in invalid state: %q", t.State)) } if len(revnos) > 0 { if pull == nil { pull = map[bson.ObjectId]*transaction{t.Id: t} } seen := make(map[docKey]bool) for i, op := range t.Ops { dkey := op.docKey() if seen[op.docKey()] { continue } seen[dkey] = true pullAll := tokensToPull(f.queue[dkey], pull, "") if len(pullAll) == 0 { continue } udoc := bson.D{{"$pullAll", bson.D{{"txn-queue", pullAll}}}} chaos("") if revnos[i] < 0 { err = f.sc.UpdateId(dkey, udoc) } else { c := f.tc.Database.C(dkey.C) err = c.UpdateId(dkey.Id, udoc) } if err != nil && err != mgo.ErrNotFound { return err } } } udoc := bson.D{{"$set", bson.D{{"s", taborted}}}} chaos("set-aborted") if err := f.tc.UpdateId(t.Id, udoc); err != nil && err != mgo.ErrNotFound { return err } t.State = taborted f.debugf("Aborted %s", t) return nil } func (f *flusher) checkpoint(t *transaction, revnos []int64) error { var debugRevnos map[docKey][]int64 if debugEnabled { debugRevnos = make(map[docKey][]int64) for i, op := range t.Ops { dkey := op.docKey() debugRevnos[dkey] = append(debugRevnos[dkey], revnos[i]) } f.debugf("Ready to apply %s. Saving revnos %v", t, debugRevnos) } // Save in t the txn-revno values the transaction must run on. qdoc := bson.D{{"_id", t.Id}, {"s", tprepared}} udoc := bson.D{{"$set", bson.D{{"s", tapplying}, {"r", revnos}}}} chaos("set-applying") err := f.tc.Update(qdoc, udoc) if err == nil { t.State = tapplying t.Revnos = revnos f.debugf("Ready to apply %s. Saving revnos %v: DONE", t, debugRevnos) } else if err == mgo.ErrNotFound { f.debugf("Ready to apply %s. Saving revnos %v: LOST RACE", t, debugRevnos) return f.reload(t) } return nil } func (f *flusher) apply(t *transaction, pull map[bson.ObjectId]*transaction) error { f.debugf("Applying transaction %s", t) if t.State != tapplying { panic(fmt.Errorf("applying transaction in invalid state: %q", t.State)) } if pull == nil { pull = map[bson.ObjectId]*transaction{t.Id: t} } logRevnos := append([]int64(nil), t.Revnos...) logDoc := bson.D{{"_id", t.Id}} tt := tokenFor(t) for i := range t.Ops { op := &t.Ops[i] dkey := op.docKey() dqueue := f.queue[dkey] revno := t.Revnos[i] var opName string if debugEnabled { opName = op.name() f.debugf("Applying %s op %d (%s) on %v with txn-revno %d", t, i, opName, dkey, revno) } c := f.tc.Database.C(op.C) qdoc := bson.D{{"_id", dkey.Id}, {"txn-revno", revno}, {"txn-queue", tt}} if op.Insert != nil { qdoc[0].Value = dkey if revno == -1 { qdoc[1].Value = bson.D{{"$exists", false}} } } else if revno == 0 { // There's no document with revno 0. The only way to see it is // when an existent document participates in a transaction the // first time. Txn-inserted documents get revno -1 while in the // stash for the first time, and -revno-1 == 2 when they go live. qdoc[1].Value = bson.D{{"$exists", false}} } pullAll := tokensToPull(dqueue, pull, tt) var d bson.D var outcome string var err error switch { case op.Update != nil: if revno < 0 { err = mgo.ErrNotFound f.debugf("Won't try to apply update op; negative revision means the document is missing or stashed") } else { newRevno := revno + 1 logRevnos[i] = newRevno if d, err = objToDoc(op.Update); err != nil { return err } if d, err = addToDoc(d, "$pullAll", bson.D{{"txn-queue", pullAll}}); err != nil { return err } if d, err = addToDoc(d, "$set", bson.D{{"txn-revno", newRevno}}); err != nil { return err } chaos("") err = c.Update(qdoc, d) } case op.Remove: if revno < 0 { err = mgo.ErrNotFound } else { newRevno := -revno - 1 logRevnos[i] = newRevno nonce := newNonce() stash := txnInfo{} change := mgo.Change{ Update: bson.D{{"$push", bson.D{{"n", nonce}}}}, Upsert: true, ReturnNew: true, } if _, err = f.sc.FindId(dkey).Apply(change, &stash); err != nil { return err } change = mgo.Change{ Update: bson.D{{"$set", bson.D{{"txn-remove", t.Id}}}}, ReturnNew: true, } var info txnInfo if _, err = c.Find(qdoc).Apply(change, &info); err == nil { // The document still exists so the stash previously // observed was either out of date or necessarily // contained the token being applied. f.debugf("Marked document %v to be removed on revno %d with queue: %v", dkey, info.Revno, info.Queue) updated := false if !hasToken(stash.Queue, tt) { var set, unset bson.D if revno == 0 { // Missing revno in stash means -1. set = bson.D{{"txn-queue", info.Queue}} unset = bson.D{{"n", 1}, {"txn-revno", 1}} } else { set = bson.D{{"txn-queue", info.Queue}, {"txn-revno", newRevno}} unset = bson.D{{"n", 1}} } qdoc := bson.D{{"_id", dkey}, {"n", nonce}} udoc := bson.D{{"$set", set}, {"$unset", unset}} if err = f.sc.Update(qdoc, udoc); err == nil { updated = true } else if err != mgo.ErrNotFound { return err } } if updated { f.debugf("Updated stash for document %v with revno %d and queue: %v", dkey, newRevno, info.Queue) } else { f.debugf("Stash for document %v was up-to-date", dkey) } err = c.Remove(qdoc) } } case op.Insert != nil: if revno >= 0 { err = mgo.ErrNotFound } else { newRevno := -revno + 1 logRevnos[i] = newRevno if d, err = objToDoc(op.Insert); err != nil { return err } change := mgo.Change{ Update: bson.D{{"$set", bson.D{{"txn-insert", t.Id}}}}, ReturnNew: true, } chaos("") var info txnInfo if _, err = f.sc.Find(qdoc).Apply(change, &info); err == nil { f.debugf("Stash for document %v has revno %d and queue: %v", dkey, info.Revno, info.Queue) d = setInDoc(d, bson.D{{"_id", op.Id}, {"txn-revno", newRevno}, {"txn-queue", info.Queue}}) // Unlikely yet unfortunate race in here if this gets seriously // delayed. If someone inserts+removes meanwhile, this will // reinsert, and there's no way to avoid that while keeping the // collection clean or compromising sharding. applyOps can solve // the former, but it can't shard (SERVER-1439). chaos("insert") err = c.Insert(d) if err == nil || mgo.IsDup(err) { if err == nil { f.debugf("New document %v inserted with revno %d and queue: %v", dkey, info.Revno, info.Queue) } else { f.debugf("Document %v already existed", dkey) } chaos("") if err = f.sc.Remove(qdoc); err == nil { f.debugf("Stash for document %v removed", dkey) } } } } case op.Assert != nil: // Pure assertion. No changes to apply. } if err == nil { outcome = "DONE" } else if err == mgo.ErrNotFound || mgo.IsDup(err) { outcome = "MISS" err = nil } else { outcome = err.Error() } if debugEnabled { f.debugf("Applying %s op %d (%s) on %v with txn-revno %d: %s", t, i, opName, dkey, revno, outcome) } if err != nil { return err } if f.lc != nil && op.isChange() { // Add change to the log document. var dr bson.D for li := range logDoc { elem := &logDoc[li] if elem.Name == op.C { dr = elem.Value.(bson.D) break } } if dr == nil { logDoc = append(logDoc, bson.DocElem{op.C, bson.D{{"d", []interface{}{}}, {"r", []int64{}}}}) dr = logDoc[len(logDoc)-1].Value.(bson.D) } dr[0].Value = append(dr[0].Value.([]interface{}), op.Id) dr[1].Value = append(dr[1].Value.([]int64), logRevnos[i]) } } t.State = tapplied if f.lc != nil { // Insert log document into the changelog collection. f.debugf("Inserting %s into change log", t) err := f.lc.Insert(logDoc) if err != nil && !mgo.IsDup(err) { return err } } // It's been applied, so errors are ignored here. It's fine for someone // else to win the race and mark it as applied, and it's also fine for // it to remain pending until a later point when someone will perceive // it has been applied and mark it at such. f.debugf("Marking %s as applied", t) chaos("set-applied") f.tc.Update(bson.D{{"_id", t.Id}, {"s", tapplying}}, bson.D{{"$set", bson.D{{"s", tapplied}}}}) return nil } func tokensToPull(dqueue []token, pull map[bson.ObjectId]*transaction, dontPull token) []token { var result []token for j := len(dqueue) - 1; j >= 0; j-- { dtt := dqueue[j] if dtt == dontPull { continue } if _, ok := pull[dtt.id()]; ok { // It was handled before and this is a leftover invalid // nonce in the queue. Cherry-pick it out. result = append(result, dtt) } } return result } func objToDoc(obj interface{}) (d bson.D, err error) { data, err := bson.Marshal(obj) if err != nil { return nil, err } err = bson.Unmarshal(data, &d) if err != nil { return nil, err } return d, err } func addToDoc(doc bson.D, key string, add bson.D) (bson.D, error) { for i := range doc { elem := &doc[i] if elem.Name != key { continue } if old, ok := elem.Value.(bson.D); ok { elem.Value = append(old, add...) return doc, nil } else { return nil, fmt.Errorf("invalid %q value in change document: %#v", key, elem.Value) } } return append(doc, bson.DocElem{key, add}), nil } func setInDoc(doc bson.D, set bson.D) bson.D { dlen := len(doc) NextS: for s := range set { sname := set[s].Name for d := 0; d < dlen; d++ { if doc[d].Name == sname { doc[d].Value = set[s].Value continue NextS } } doc = append(doc, set[s]) } return doc } func hasToken(tokens []token, tt token) bool { for _, ttt := range tokens { if ttt == tt { return true } } return false } func (f *flusher) debugf(format string, args ...interface{}) { if !debugEnabled { return } debugf(f.debugId+format, args...) } charm-2.1.1/src/gopkg.in/mgo.v2/txn/txn_test.go0000664000175000017500000004104012672604565020232 0ustar marcomarcopackage txn_test import ( "fmt" "sync" "testing" "time" . "gopkg.in/check.v1" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" ) func TestAll(t *testing.T) { TestingT(t) } type S struct { MgoSuite db *mgo.Database tc, sc *mgo.Collection accounts *mgo.Collection runner *txn.Runner } var _ = Suite(&S{}) type M map[string]interface{} func (s *S) SetUpTest(c *C) { txn.SetChaos(txn.Chaos{}) txn.SetLogger(c) txn.SetDebug(true) s.MgoSuite.SetUpTest(c) s.db = s.session.DB("test") s.tc = s.db.C("tc") s.sc = s.db.C("tc.stash") s.accounts = s.db.C("accounts") s.runner = txn.NewRunner(s.tc) } func (s *S) TearDownTest(c *C) { txn.SetLogger(nil) txn.SetDebug(false) } type Account struct { Id int `bson:"_id"` Balance int } func (s *S) TestDocExists(c *C) { err := s.accounts.Insert(M{"_id": 0, "balance": 300}) c.Assert(err, IsNil) exists := []txn.Op{{ C: "accounts", Id: 0, Assert: txn.DocExists, }} missing := []txn.Op{{ C: "accounts", Id: 0, Assert: txn.DocMissing, }} err = s.runner.Run(exists, "", nil) c.Assert(err, IsNil) err = s.runner.Run(missing, "", nil) c.Assert(err, Equals, txn.ErrAborted) err = s.accounts.RemoveId(0) c.Assert(err, IsNil) err = s.runner.Run(exists, "", nil) c.Assert(err, Equals, txn.ErrAborted) err = s.runner.Run(missing, "", nil) c.Assert(err, IsNil) } func (s *S) TestInsert(c *C) { err := s.accounts.Insert(M{"_id": 0, "balance": 300}) c.Assert(err, IsNil) ops := []txn.Op{{ C: "accounts", Id: 0, Insert: M{"balance": 200}, }} err = s.runner.Run(ops, "", nil) c.Assert(err, IsNil) var account Account err = s.accounts.FindId(0).One(&account) c.Assert(err, IsNil) c.Assert(account.Balance, Equals, 300) ops[0].Id = 1 err = s.runner.Run(ops, "", nil) c.Assert(err, IsNil) err = s.accounts.FindId(1).One(&account) c.Assert(err, IsNil) c.Assert(account.Balance, Equals, 200) } func (s *S) TestInsertStructID(c *C) { type id struct { FirstName string LastName string } ops := []txn.Op{{ C: "accounts", Id: id{FirstName: "John", LastName: "Jones"}, Assert: txn.DocMissing, Insert: M{"balance": 200}, }, { C: "accounts", Id: id{FirstName: "Sally", LastName: "Smith"}, Assert: txn.DocMissing, Insert: M{"balance": 800}, }} err := s.runner.Run(ops, "", nil) c.Assert(err, IsNil) n, err := s.accounts.Find(nil).Count() c.Assert(err, IsNil) c.Assert(n, Equals, 2) } func (s *S) TestRemove(c *C) { err := s.accounts.Insert(M{"_id": 0, "balance": 300}) c.Assert(err, IsNil) ops := []txn.Op{{ C: "accounts", Id: 0, Remove: true, }} err = s.runner.Run(ops, "", nil) c.Assert(err, IsNil) err = s.accounts.FindId(0).One(nil) c.Assert(err, Equals, mgo.ErrNotFound) err = s.runner.Run(ops, "", nil) c.Assert(err, IsNil) } func (s *S) TestUpdate(c *C) { var err error err = s.accounts.Insert(M{"_id": 0, "balance": 200}) c.Assert(err, IsNil) err = s.accounts.Insert(M{"_id": 1, "balance": 200}) c.Assert(err, IsNil) ops := []txn.Op{{ C: "accounts", Id: 0, Update: M{"$inc": M{"balance": 100}}, }} err = s.runner.Run(ops, "", nil) c.Assert(err, IsNil) var account Account err = s.accounts.FindId(0).One(&account) c.Assert(err, IsNil) c.Assert(account.Balance, Equals, 300) ops[0].Id = 1 err = s.accounts.FindId(1).One(&account) c.Assert(err, IsNil) c.Assert(account.Balance, Equals, 200) } func (s *S) TestInsertUpdate(c *C) { ops := []txn.Op{{ C: "accounts", Id: 0, Insert: M{"_id": 0, "balance": 200}, }, { C: "accounts", Id: 0, Update: M{"$inc": M{"balance": 100}}, }} err := s.runner.Run(ops, "", nil) c.Assert(err, IsNil) var account Account err = s.accounts.FindId(0).One(&account) c.Assert(err, IsNil) c.Assert(account.Balance, Equals, 300) err = s.runner.Run(ops, "", nil) c.Assert(err, IsNil) err = s.accounts.FindId(0).One(&account) c.Assert(err, IsNil) c.Assert(account.Balance, Equals, 400) } func (s *S) TestUpdateInsert(c *C) { ops := []txn.Op{{ C: "accounts", Id: 0, Update: M{"$inc": M{"balance": 100}}, }, { C: "accounts", Id: 0, Insert: M{"_id": 0, "balance": 200}, }} err := s.runner.Run(ops, "", nil) c.Assert(err, IsNil) var account Account err = s.accounts.FindId(0).One(&account) c.Assert(err, IsNil) c.Assert(account.Balance, Equals, 200) err = s.runner.Run(ops, "", nil) c.Assert(err, IsNil) err = s.accounts.FindId(0).One(&account) c.Assert(err, IsNil) c.Assert(account.Balance, Equals, 300) } func (s *S) TestInsertRemoveInsert(c *C) { ops := []txn.Op{{ C: "accounts", Id: 0, Insert: M{"_id": 0, "balance": 200}, }, { C: "accounts", Id: 0, Remove: true, }, { C: "accounts", Id: 0, Insert: M{"_id": 0, "balance": 300}, }} err := s.runner.Run(ops, "", nil) c.Assert(err, IsNil) var account Account err = s.accounts.FindId(0).One(&account) c.Assert(err, IsNil) c.Assert(account.Balance, Equals, 300) } func (s *S) TestQueueStashing(c *C) { txn.SetChaos(txn.Chaos{ KillChance: 1, Breakpoint: "set-applying", }) opses := [][]txn.Op{{{ C: "accounts", Id: 0, Insert: M{"balance": 100}, }}, {{ C: "accounts", Id: 0, Remove: true, }}, {{ C: "accounts", Id: 0, Insert: M{"balance": 200}, }}, {{ C: "accounts", Id: 0, Update: M{"$inc": M{"balance": 100}}, }}} var last bson.ObjectId for _, ops := range opses { last = bson.NewObjectId() err := s.runner.Run(ops, last, nil) c.Assert(err, Equals, txn.ErrChaos) } txn.SetChaos(txn.Chaos{}) err := s.runner.Resume(last) c.Assert(err, IsNil) var account Account err = s.accounts.FindId(0).One(&account) c.Assert(err, IsNil) c.Assert(account.Balance, Equals, 300) } func (s *S) TestInfo(c *C) { ops := []txn.Op{{ C: "accounts", Id: 0, Assert: txn.DocMissing, }} id := bson.NewObjectId() err := s.runner.Run(ops, id, M{"n": 42}) c.Assert(err, IsNil) var t struct{ I struct{ N int } } err = s.tc.FindId(id).One(&t) c.Assert(err, IsNil) c.Assert(t.I.N, Equals, 42) } func (s *S) TestErrors(c *C) { doc := bson.M{"foo": 1} tests := []txn.Op{{ C: "c", Id: 0, }, { C: "c", Id: 0, Insert: doc, Remove: true, }, { C: "c", Id: 0, Insert: doc, Update: doc, }, { C: "c", Id: 0, Update: doc, Remove: true, }, { C: "c", Assert: doc, }, { Id: 0, Assert: doc, }} txn.SetChaos(txn.Chaos{KillChance: 1.0}) for _, op := range tests { c.Logf("op: %v", op) err := s.runner.Run([]txn.Op{op}, "", nil) c.Assert(err, ErrorMatches, "error in transaction op 0: .*") } } func (s *S) TestAssertNestedOr(c *C) { // Assert uses $or internally. Ensure nesting works. err := s.accounts.Insert(M{"_id": 0, "balance": 300}) c.Assert(err, IsNil) ops := []txn.Op{{ C: "accounts", Id: 0, Assert: bson.D{{"$or", []bson.D{{{"balance", 100}}, {{"balance", 300}}}}}, Update: bson.D{{"$inc", bson.D{{"balance", 100}}}}, }} err = s.runner.Run(ops, "", nil) c.Assert(err, IsNil) var account Account err = s.accounts.FindId(0).One(&account) c.Assert(err, IsNil) c.Assert(account.Balance, Equals, 400) } func (s *S) TestVerifyFieldOrdering(c *C) { // Used to have a map in certain operations, which means // the ordering of fields would be messed up. fields := bson.D{{"a", 1}, {"b", 2}, {"c", 3}} ops := []txn.Op{{ C: "accounts", Id: 0, Insert: fields, }} err := s.runner.Run(ops, "", nil) c.Assert(err, IsNil) var d bson.D err = s.accounts.FindId(0).One(&d) c.Assert(err, IsNil) var filtered bson.D for _, e := range d { switch e.Name { case "a", "b", "c": filtered = append(filtered, e) } } c.Assert(filtered, DeepEquals, fields) } func (s *S) TestChangeLog(c *C) { chglog := s.db.C("chglog") s.runner.ChangeLog(chglog) ops := []txn.Op{{ C: "debts", Id: 0, Assert: txn.DocMissing, }, { C: "accounts", Id: 0, Insert: M{"balance": 300}, }, { C: "accounts", Id: 1, Insert: M{"balance": 300}, }, { C: "people", Id: "joe", Insert: M{"accounts": []int64{0, 1}}, }} id := bson.NewObjectId() err := s.runner.Run(ops, id, nil) c.Assert(err, IsNil) type IdList []interface{} type Log struct { Docs IdList "d" Revnos []int64 "r" } var m map[string]*Log err = chglog.FindId(id).One(&m) c.Assert(err, IsNil) c.Assert(m["accounts"], DeepEquals, &Log{IdList{0, 1}, []int64{2, 2}}) c.Assert(m["people"], DeepEquals, &Log{IdList{"joe"}, []int64{2}}) c.Assert(m["debts"], IsNil) ops = []txn.Op{{ C: "accounts", Id: 0, Update: M{"$inc": M{"balance": 100}}, }, { C: "accounts", Id: 1, Update: M{"$inc": M{"balance": 100}}, }} id = bson.NewObjectId() err = s.runner.Run(ops, id, nil) c.Assert(err, IsNil) m = nil err = chglog.FindId(id).One(&m) c.Assert(err, IsNil) c.Assert(m["accounts"], DeepEquals, &Log{IdList{0, 1}, []int64{3, 3}}) c.Assert(m["people"], IsNil) ops = []txn.Op{{ C: "accounts", Id: 0, Remove: true, }, { C: "people", Id: "joe", Remove: true, }} id = bson.NewObjectId() err = s.runner.Run(ops, id, nil) c.Assert(err, IsNil) m = nil err = chglog.FindId(id).One(&m) c.Assert(err, IsNil) c.Assert(m["accounts"], DeepEquals, &Log{IdList{0}, []int64{-4}}) c.Assert(m["people"], DeepEquals, &Log{IdList{"joe"}, []int64{-3}}) } func (s *S) TestPurgeMissing(c *C) { txn.SetChaos(txn.Chaos{ KillChance: 1, Breakpoint: "set-applying", }) err := s.accounts.Insert(M{"_id": 0, "balance": 100}) c.Assert(err, IsNil) err = s.accounts.Insert(M{"_id": 1, "balance": 100}) c.Assert(err, IsNil) ops1 := []txn.Op{{ C: "accounts", Id: 3, Insert: M{"balance": 100}, }} ops2 := []txn.Op{{ C: "accounts", Id: 0, Remove: true, }, { C: "accounts", Id: 1, Update: M{"$inc": M{"balance": 100}}, }, { C: "accounts", Id: 2, Insert: M{"balance": 100}, }} first := bson.NewObjectId() c.Logf("---- Running ops1 under transaction %q, to be canceled by chaos", first.Hex()) err = s.runner.Run(ops1, first, nil) c.Assert(err, Equals, txn.ErrChaos) last := bson.NewObjectId() c.Logf("---- Running ops2 under transaction %q, to be canceled by chaos", last.Hex()) err = s.runner.Run(ops2, last, nil) c.Assert(err, Equals, txn.ErrChaos) c.Logf("---- Removing transaction %q", last.Hex()) err = s.tc.RemoveId(last) c.Assert(err, IsNil) c.Logf("---- Disabling chaos and attempting to resume all") txn.SetChaos(txn.Chaos{}) err = s.runner.ResumeAll() c.Assert(err, IsNil) again := bson.NewObjectId() c.Logf("---- Running ops2 again under transaction %q, to fail for missing transaction", again.Hex()) err = s.runner.Run(ops2, again, nil) c.Assert(err, ErrorMatches, "cannot find transaction .*") c.Logf("---- Purging missing transactions") err = s.runner.PurgeMissing("accounts") c.Assert(err, IsNil) c.Logf("---- Resuming pending transactions") err = s.runner.ResumeAll() c.Assert(err, IsNil) expect := []struct{ Id, Balance int }{ {0, -1}, {1, 200}, {2, 100}, {3, 100}, } var got Account for _, want := range expect { err = s.accounts.FindId(want.Id).One(&got) if want.Balance == -1 { if err != mgo.ErrNotFound { c.Errorf("Account %d should not exist, find got err=%#v", err) } } else if err != nil { c.Errorf("Account %d should have balance of %d, but wasn't found", want.Id, want.Balance) } else if got.Balance != want.Balance { c.Errorf("Account %d should have balance of %d, got %d", want.Id, want.Balance, got.Balance) } } } func (s *S) TestTxnQueueStashStressTest(c *C) { txn.SetChaos(txn.Chaos{ SlowdownChance: 0.3, Slowdown: 50 * time.Millisecond, }) defer txn.SetChaos(txn.Chaos{}) // So we can run more iterations of the test in less time. txn.SetDebug(false) const runners = 10 const inserts = 10 const repeat = 100 for r := 0; r < repeat; r++ { var wg sync.WaitGroup wg.Add(runners) for i := 0; i < runners; i++ { go func(i, r int) { defer wg.Done() session := s.session.New() defer session.Close() runner := txn.NewRunner(s.tc.With(session)) for j := 0; j < inserts; j++ { ops := []txn.Op{{ C: "accounts", Id: fmt.Sprintf("insert-%d-%d", r, j), Insert: bson.M{ "added-by": i, }, }} err := runner.Run(ops, "", nil) if err != txn.ErrAborted { c.Check(err, IsNil) } } }(i, r) } wg.Wait() } } func (s *S) TestPurgeMissingPipelineSizeLimit(c *C) { // This test ensures that PurgeMissing can handle very large // txn-queue fields. Previous iterations of PurgeMissing would // trigger a 16MB aggregation pipeline result size limit when run // against a documents or stashes with large numbers of txn-queue // entries. PurgeMissing now no longer uses aggregation pipelines // to work around this limit. // The pipeline result size limitation was removed from MongoDB in 2.6 so // this test is only run for older MongoDB version. build, err := s.session.BuildInfo() c.Assert(err, IsNil) if build.VersionAtLeast(2, 6) { c.Skip("This tests a problem that can only happen with MongoDB < 2.6 ") } // Insert a single document to work with. err = s.accounts.Insert(M{"_id": 0, "balance": 100}) c.Assert(err, IsNil) ops := []txn.Op{{ C: "accounts", Id: 0, Update: M{"$inc": M{"balance": 100}}, }} // Generate one successful transaction. good := bson.NewObjectId() c.Logf("---- Running ops under transaction %q", good.Hex()) err = s.runner.Run(ops, good, nil) c.Assert(err, IsNil) // Generate another transaction which which will go missing. missing := bson.NewObjectId() c.Logf("---- Running ops under transaction %q (which will go missing)", missing.Hex()) err = s.runner.Run(ops, missing, nil) c.Assert(err, IsNil) err = s.tc.RemoveId(missing) c.Assert(err, IsNil) // Generate a txn-queue on the test document that's large enough // that it used to cause PurgeMissing to exceed MongoDB's pipeline // result 16MB size limit (MongoDB 2.4 and older only). // // The contents of the txn-queue field doesn't matter, only that // it's big enough to trigger the size limit. The required size // can also be achieved by using multiple documents as long as the // cumulative size of all the txn-queue fields exceeds the // pipeline limit. A single document is easier to work with for // this test however. // // The txn id of the successful transaction is used fill the // txn-queue because this takes advantage of a short circuit in // PurgeMissing, dramatically speeding up the test run time. const fakeQueueLen = 250000 fakeTxnQueue := make([]string, fakeQueueLen) token := good.Hex() + "_12345678" // txn id + nonce for i := 0; i < fakeQueueLen; i++ { fakeTxnQueue[i] = token } err = s.accounts.UpdateId(0, bson.M{ "$set": bson.M{"txn-queue": fakeTxnQueue}, }) c.Assert(err, IsNil) // PurgeMissing could hit the same pipeline result size limit when // processing the txn-queue fields of stash documents so insert // the large txn-queue there too to ensure that no longer happens. err = s.sc.Insert( bson.D{{"c", "accounts"}, {"id", 0}}, bson.M{"txn-queue": fakeTxnQueue}, ) c.Assert(err, IsNil) c.Logf("---- Purging missing transactions") err = s.runner.PurgeMissing("accounts") c.Assert(err, IsNil) } func (s *S) TestTxnQueueStressTest(c *C) { txn.SetChaos(txn.Chaos{ SlowdownChance: 0.3, Slowdown: 50 * time.Millisecond, }) defer txn.SetChaos(txn.Chaos{}) // So we can run more iterations of the test in less time. txn.SetDebug(false) err := s.accounts.Insert(M{"_id": 0, "balance": 0}, M{"_id": 1, "balance": 0}) c.Assert(err, IsNil) // Run half of the operations changing account 0 and then 1, // and the other half in the opposite order. ops01 := []txn.Op{{ C: "accounts", Id: 0, Update: M{"$inc": M{"balance": 1}}, }, { C: "accounts", Id: 1, Update: M{"$inc": M{"balance": 1}}, }} ops10 := []txn.Op{{ C: "accounts", Id: 1, Update: M{"$inc": M{"balance": 1}}, }, { C: "accounts", Id: 0, Update: M{"$inc": M{"balance": 1}}, }} ops := [][]txn.Op{ops01, ops10} const runners = 4 const changes = 1000 var wg sync.WaitGroup wg.Add(runners) for n := 0; n < runners; n++ { n := n go func() { defer wg.Done() for i := 0; i < changes; i++ { err = s.runner.Run(ops[n%2], "", nil) c.Assert(err, IsNil) } }() } wg.Wait() for id := 0; id < 2; id++ { var account Account err = s.accounts.FindId(id).One(&account) if account.Balance != runners*changes { c.Errorf("Account should have balance of %d, got %d", runners*changes, account.Balance) } } } charm-2.1.1/src/gopkg.in/mgo.v2/txn/txn.go0000664000175000017500000004007212672604565017177 0ustar marcomarco// The txn package implements support for multi-document transactions. // // For details check the following blog post: // // http://blog.labix.org/2012/08/22/multi-doc-transactions-for-mongodb // package txn import ( "encoding/binary" "fmt" "reflect" "sort" "strings" "sync" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" crand "crypto/rand" mrand "math/rand" ) type state int const ( tpreparing state = 1 // One or more documents not prepared tprepared state = 2 // Prepared but not yet ready to run taborting state = 3 // Assertions failed, cleaning up tapplying state = 4 // Changes are in progress taborted state = 5 // Pre-conditions failed, nothing done tapplied state = 6 // All changes applied ) func (s state) String() string { switch s { case tpreparing: return "preparing" case tprepared: return "prepared" case taborting: return "aborting" case tapplying: return "applying" case taborted: return "aborted" case tapplied: return "applied" } panic(fmt.Errorf("unknown state: %d", s)) } var rand *mrand.Rand var randmu sync.Mutex func init() { var seed int64 err := binary.Read(crand.Reader, binary.BigEndian, &seed) if err != nil { panic(err) } rand = mrand.New(mrand.NewSource(seed)) } type transaction struct { Id bson.ObjectId `bson:"_id"` State state `bson:"s"` Info interface{} `bson:"i,omitempty"` Ops []Op `bson:"o"` Nonce string `bson:"n,omitempty"` Revnos []int64 `bson:"r,omitempty"` docKeysCached docKeys } func (t *transaction) String() string { if t.Nonce == "" { return t.Id.Hex() } return string(t.token()) } func (t *transaction) done() bool { return t.State == tapplied || t.State == taborted } func (t *transaction) token() token { if t.Nonce == "" { panic("transaction has no nonce") } return tokenFor(t) } func (t *transaction) docKeys() docKeys { if t.docKeysCached != nil { return t.docKeysCached } dkeys := make(docKeys, 0, len(t.Ops)) NextOp: for _, op := range t.Ops { dkey := op.docKey() for i := range dkeys { if dkey == dkeys[i] { continue NextOp } } dkeys = append(dkeys, dkey) } sort.Sort(dkeys) t.docKeysCached = dkeys return dkeys } // tokenFor returns a unique transaction token that // is composed by t's id and a nonce. If t already has // a nonce assigned to it, it will be used, otherwise // a new nonce will be generated. func tokenFor(t *transaction) token { nonce := t.Nonce if nonce == "" { nonce = newNonce() } return token(t.Id.Hex() + "_" + nonce) } func newNonce() string { randmu.Lock() r := rand.Uint32() randmu.Unlock() n := make([]byte, 8) for i := uint(0); i < 8; i++ { n[i] = "0123456789abcdef"[(r>>(4*i))&0xf] } return string(n) } type token string func (tt token) id() bson.ObjectId { return bson.ObjectIdHex(string(tt[:24])) } func (tt token) nonce() string { return string(tt[25:]) } // Op represents an operation to a single document that may be // applied as part of a transaction with other operations. type Op struct { // C and Id identify the collection and document this operation // refers to. Id is matched against the "_id" document field. C string `bson:"c"` Id interface{} `bson:"d"` // Assert optionally holds a query document that is used to // test the operation document at the time the transaction is // going to be applied. The assertions for all operations in // a transaction are tested before any changes take place, // and the transaction is entirely aborted if any of them // fails. This is also the only way to prevent a transaction // from being being applied (the transaction continues despite // the outcome of Insert, Update, and Remove). Assert interface{} `bson:"a,omitempty"` // The Insert, Update and Remove fields describe the mutation // intended by the operation. At most one of them may be set // per operation. If none are set, Assert must be set and the // operation becomes a read-only test. // // Insert holds the document to be inserted at the time the // transaction is applied. The Id field will be inserted // into the document automatically as its _id field. The // transaction will continue even if the document already // exists. Use Assert with txn.DocMissing if the insertion is // required. // // Update holds the update document to be applied at the time // the transaction is applied. The transaction will continue // even if a document with Id is missing. Use Assert to // test for the document presence or its contents. // // Remove indicates whether to remove the document with Id. // The transaction continues even if the document doesn't yet // exist at the time the transaction is applied. Use Assert // with txn.DocExists to make sure it will be removed. Insert interface{} `bson:"i,omitempty"` Update interface{} `bson:"u,omitempty"` Remove bool `bson:"r,omitempty"` } func (op *Op) isChange() bool { return op.Update != nil || op.Insert != nil || op.Remove } func (op *Op) docKey() docKey { return docKey{op.C, op.Id} } func (op *Op) name() string { switch { case op.Update != nil: return "update" case op.Insert != nil: return "insert" case op.Remove: return "remove" case op.Assert != nil: return "assert" } return "none" } const ( // DocExists and DocMissing may be used on an operation's // Assert value to assert that the document with the given // Id exists or does not exist, respectively. DocExists = "d+" DocMissing = "d-" ) // A Runner applies operations as part of a transaction onto any number // of collections within a database. See the Run method for details. type Runner struct { tc *mgo.Collection // txns sc *mgo.Collection // stash lc *mgo.Collection // log } // NewRunner returns a new transaction runner that uses tc to hold its // transactions. // // Multiple transaction collections may exist in a single database, but // all collections that are touched by operations in a given transaction // collection must be handled exclusively by it. // // A second collection with the same name of tc but suffixed by ".stash" // will be used for implementing the transactional behavior of insert // and remove operations. func NewRunner(tc *mgo.Collection) *Runner { return &Runner{tc, tc.Database.C(tc.Name + ".stash"), nil} } var ErrAborted = fmt.Errorf("transaction aborted") // Run creates a new transaction with ops and runs it immediately. // The id parameter specifies the transaction id, and may be written // down ahead of time to later verify the success of the change and // resume it, when the procedure is interrupted for any reason. If // empty, a random id will be generated. // The info parameter, if not nil, is included under the "i" // field of the transaction document. // // Operations across documents are not atomically applied, but are // guaranteed to be eventually all applied in the order provided or // all aborted, as long as the affected documents are only modified // through transactions. If documents are simultaneously modified // by transactions and out of transactions the behavior is undefined. // // If Run returns no errors, all operations were applied successfully. // If it returns ErrAborted, one or more operations can't be applied // and the transaction was entirely aborted with no changes performed. // Otherwise, if the transaction is interrupted while running for any // reason, it may be resumed explicitly or by attempting to apply // another transaction on any of the documents targeted by ops, as // long as the interruption was made after the transaction document // itself was inserted. Run Resume with the obtained transaction id // to confirm whether the transaction was applied or not. // // Any number of transactions may be run concurrently, with one // runner or many. func (r *Runner) Run(ops []Op, id bson.ObjectId, info interface{}) (err error) { const efmt = "error in transaction op %d: %s" for i := range ops { op := &ops[i] if op.C == "" || op.Id == nil { return fmt.Errorf(efmt, i, "C or Id missing") } changes := 0 if op.Insert != nil { changes++ } if op.Update != nil { changes++ } if op.Remove { changes++ } if changes > 1 { return fmt.Errorf(efmt, i, "more than one of Insert/Update/Remove set") } if changes == 0 && op.Assert == nil { return fmt.Errorf(efmt, i, "none of Assert/Insert/Update/Remove set") } } if id == "" { id = bson.NewObjectId() } // Insert transaction sooner rather than later, to stay on the safer side. t := transaction{ Id: id, Ops: ops, State: tpreparing, Info: info, } if err = r.tc.Insert(&t); err != nil { return err } if err = flush(r, &t); err != nil { return err } if t.State == taborted { return ErrAborted } else if t.State != tapplied { panic(fmt.Errorf("invalid state for %s after flush: %q", &t, t.State)) } return nil } // ResumeAll resumes all pending transactions. All ErrAborted errors // from individual transactions are ignored. func (r *Runner) ResumeAll() (err error) { debugf("Resuming all unfinished transactions") iter := r.tc.Find(bson.D{{"s", bson.D{{"$in", []state{tpreparing, tprepared, tapplying}}}}}).Iter() var t transaction for iter.Next(&t) { if t.State == tapplied || t.State == taborted { continue } debugf("Resuming %s from %q", t.Id, t.State) if err := flush(r, &t); err != nil { return err } if !t.done() { panic(fmt.Errorf("invalid state for %s after flush: %q", &t, t.State)) } } return nil } // Resume resumes the transaction with id. It returns mgo.ErrNotFound // if the transaction is not found. Otherwise, it has the same semantics // of the Run method after the transaction is inserted. func (r *Runner) Resume(id bson.ObjectId) (err error) { t, err := r.load(id) if err != nil { return err } if !t.done() { debugf("Resuming %s from %q", t, t.State) if err := flush(r, t); err != nil { return err } } if t.State == taborted { return ErrAborted } else if t.State != tapplied { panic(fmt.Errorf("invalid state for %s after flush: %q", t, t.State)) } return nil } // ChangeLog enables logging of changes to the given collection // every time a transaction that modifies content is done being // applied. // // Saved documents are in the format: // // {"_id": , : {"d": [, ...], "r": [, ...]}} // // The document revision is the value of the txn-revno field after // the change has been applied. Negative values indicate the document // was not present in the collection. Revisions will not change when // updates or removes are applied to missing documents or inserts are // attempted when the document isn't present. func (r *Runner) ChangeLog(logc *mgo.Collection) { r.lc = logc } // PurgeMissing removes from collections any state that refers to transaction // documents that for whatever reason have been lost from the system (removed // by accident or lost in a hard crash, for example). // // This method should very rarely be needed, if at all, and should never be // used during the normal operation of an application. Its purpose is to put // a system that has seen unavoidable corruption back in a working state. func (r *Runner) PurgeMissing(collections ...string) error { type M map[string]interface{} type S []interface{} type TDoc struct { Id interface{} "_id" TxnQueue []string "txn-queue" } found := make(map[bson.ObjectId]bool) sort.Strings(collections) for _, collection := range collections { c := r.tc.Database.C(collection) iter := c.Find(nil).Select(bson.M{"_id": 1, "txn-queue": 1}).Iter() var tdoc TDoc for iter.Next(&tdoc) { for _, txnToken := range tdoc.TxnQueue { txnId := bson.ObjectIdHex(txnToken[:24]) if found[txnId] { continue } if r.tc.FindId(txnId).One(nil) == nil { found[txnId] = true continue } logf("WARNING: purging from document %s/%v the missing transaction id %s", collection, tdoc.Id, txnId) err := c.UpdateId(tdoc.Id, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}}) if err != nil { return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err) } } } if err := iter.Close(); err != nil { return fmt.Errorf("transaction queue iteration error for %s: %v", collection, err) } } type StashTDoc struct { Id docKey "_id" TxnQueue []string "txn-queue" } iter := r.sc.Find(nil).Select(bson.M{"_id": 1, "txn-queue": 1}).Iter() var stdoc StashTDoc for iter.Next(&stdoc) { for _, txnToken := range stdoc.TxnQueue { txnId := bson.ObjectIdHex(txnToken[:24]) if found[txnId] { continue } if r.tc.FindId(txnId).One(nil) == nil { found[txnId] = true continue } logf("WARNING: purging from stash document %s/%v the missing transaction id %s", stdoc.Id.C, stdoc.Id.Id, txnId) err := r.sc.UpdateId(stdoc.Id, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}}) if err != nil { return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err) } } } if err := iter.Close(); err != nil { return fmt.Errorf("transaction stash iteration error: %v", err) } return nil } func (r *Runner) load(id bson.ObjectId) (*transaction, error) { var t transaction err := r.tc.FindId(id).One(&t) if err == mgo.ErrNotFound { return nil, fmt.Errorf("cannot find transaction %s", id) } else if err != nil { return nil, err } return &t, nil } type typeNature int const ( // The order of these values matters. Transactions // from applications using different ordering will // be incompatible with each other. _ typeNature = iota natureString natureInt natureFloat natureBool natureStruct ) func valueNature(v interface{}) (value interface{}, nature typeNature) { rv := reflect.ValueOf(v) switch rv.Kind() { case reflect.String: return rv.String(), natureString case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return rv.Int(), natureInt case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return int64(rv.Uint()), natureInt case reflect.Float32, reflect.Float64: return rv.Float(), natureFloat case reflect.Bool: return rv.Bool(), natureBool case reflect.Struct: return v, natureStruct } panic("document id type unsupported by txn: " + rv.Kind().String()) } type docKey struct { C string Id interface{} } type docKeys []docKey func (ks docKeys) Len() int { return len(ks) } func (ks docKeys) Swap(i, j int) { ks[i], ks[j] = ks[j], ks[i] } func (ks docKeys) Less(i, j int) bool { a, b := ks[i], ks[j] if a.C != b.C { return a.C < b.C } return valuecmp(a.Id, b.Id) == -1 } func valuecmp(a, b interface{}) int { av, an := valueNature(a) bv, bn := valueNature(b) if an < bn { return -1 } if an > bn { return 1 } if av == bv { return 0 } var less bool switch an { case natureString: less = av.(string) < bv.(string) case natureInt: less = av.(int64) < bv.(int64) case natureFloat: less = av.(float64) < bv.(float64) case natureBool: less = !av.(bool) && bv.(bool) case natureStruct: less = structcmp(av, bv) == -1 default: panic("unreachable") } if less { return -1 } return 1 } func structcmp(a, b interface{}) int { av := reflect.ValueOf(a) bv := reflect.ValueOf(b) var ai, bi = 0, 0 var an, bn = av.NumField(), bv.NumField() var avi, bvi interface{} var af, bf reflect.StructField for { for ai < an { af = av.Type().Field(ai) if isExported(af.Name) { avi = av.Field(ai).Interface() ai++ break } ai++ } for bi < bn { bf = bv.Type().Field(bi) if isExported(bf.Name) { bvi = bv.Field(bi).Interface() bi++ break } bi++ } if n := valuecmp(avi, bvi); n != 0 { return n } nameA := getFieldName(af) nameB := getFieldName(bf) if nameA < nameB { return -1 } if nameA > nameB { return 1 } if ai == an && bi == bn { return 0 } if ai == an || bi == bn { if ai == bn { return -1 } return 1 } } panic("unreachable") } func isExported(name string) bool { a := name[0] return a >= 'A' && a <= 'Z' } func getFieldName(f reflect.StructField) string { name := f.Tag.Get("bson") if i := strings.Index(name, ","); i >= 0 { name = name[:i] } if name == "" { name = strings.ToLower(f.Name) } return name } charm-2.1.1/src/gopkg.in/mgo.v2/txn/mgo_test.go0000664000175000017500000000351412672604565020207 0ustar marcomarcopackage txn_test import ( "bytes" "gopkg.in/mgo.v2" . "gopkg.in/check.v1" "os/exec" "time" ) // ---------------------------------------------------------------------------- // The mgo test suite type MgoSuite struct { output bytes.Buffer server *exec.Cmd session *mgo.Session } var mgoaddr = "127.0.0.1:50017" func (s *MgoSuite) SetUpSuite(c *C) { //mgo.SetDebug(true) mgo.SetStats(true) dbdir := c.MkDir() args := []string{ "--dbpath", dbdir, "--bind_ip", "127.0.0.1", "--port", "50017", "--nssize", "1", "--noprealloc", "--smallfiles", "--nojournal", "-vvvvv", } s.server = exec.Command("mongod", args...) s.server.Stdout = &s.output s.server.Stderr = &s.output err := s.server.Start() if err != nil { panic(err) } } func (s *MgoSuite) TearDownSuite(c *C) { s.server.Process.Kill() s.server.Process.Wait() } func (s *MgoSuite) SetUpTest(c *C) { err := DropAll(mgoaddr) if err != nil { panic(err) } mgo.SetLogger(c) mgo.ResetStats() s.session, err = mgo.Dial(mgoaddr) c.Assert(err, IsNil) } func (s *MgoSuite) TearDownTest(c *C) { if s.session != nil { s.session.Close() } for i := 0; ; i++ { stats := mgo.GetStats() if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 { break } if i == 20 { c.Fatal("Test left sockets in a dirty state") } c.Logf("Waiting for sockets to die: %d in use, %d alive", stats.SocketsInUse, stats.SocketsAlive) time.Sleep(500 * time.Millisecond) } } func DropAll(mongourl string) (err error) { session, err := mgo.Dial(mongourl) if err != nil { return err } defer session.Close() names, err := session.DatabaseNames() if err != nil { return err } for _, name := range names { switch name { case "admin", "local", "config": default: err = session.DB(name).DropDatabase() if err != nil { return err } } } return nil } charm-2.1.1/src/gopkg.in/mgo.v2/txn/sim_test.go0000664000175000017500000002132312672604565020213 0ustar marcomarcopackage txn_test import ( "flag" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" . "gopkg.in/check.v1" "math/rand" "time" ) var ( duration = flag.Duration("duration", 200*time.Millisecond, "duration for each simulation") seed = flag.Int64("seed", 0, "seed for rand") ) type params struct { killChance float64 slowdownChance float64 slowdown time.Duration unsafe bool workers int accounts int changeHalf bool reinsertCopy bool reinsertZeroed bool changelog bool changes int } func (s *S) TestSim1Worker(c *C) { simulate(c, params{ workers: 1, accounts: 4, killChance: 0.01, slowdownChance: 0.3, slowdown: 100 * time.Millisecond, }) } func (s *S) TestSim4WorkersDense(c *C) { simulate(c, params{ workers: 4, accounts: 2, killChance: 0.01, slowdownChance: 0.3, slowdown: 100 * time.Millisecond, }) } func (s *S) TestSim4WorkersSparse(c *C) { simulate(c, params{ workers: 4, accounts: 10, killChance: 0.01, slowdownChance: 0.3, slowdown: 100 * time.Millisecond, }) } func (s *S) TestSimHalf1Worker(c *C) { simulate(c, params{ workers: 1, accounts: 4, changeHalf: true, killChance: 0.01, slowdownChance: 0.3, slowdown: 100 * time.Millisecond, }) } func (s *S) TestSimHalf4WorkersDense(c *C) { simulate(c, params{ workers: 4, accounts: 2, changeHalf: true, killChance: 0.01, slowdownChance: 0.3, slowdown: 100 * time.Millisecond, }) } func (s *S) TestSimHalf4WorkersSparse(c *C) { simulate(c, params{ workers: 4, accounts: 10, changeHalf: true, killChance: 0.01, slowdownChance: 0.3, slowdown: 100 * time.Millisecond, }) } func (s *S) TestSimReinsertCopy1Worker(c *C) { simulate(c, params{ workers: 1, accounts: 10, reinsertCopy: true, killChance: 0.01, slowdownChance: 0.3, slowdown: 100 * time.Millisecond, }) } func (s *S) TestSimReinsertCopy4Workers(c *C) { simulate(c, params{ workers: 4, accounts: 10, reinsertCopy: true, killChance: 0.01, slowdownChance: 0.3, slowdown: 100 * time.Millisecond, }) } func (s *S) TestSimReinsertZeroed1Worker(c *C) { simulate(c, params{ workers: 1, accounts: 10, reinsertZeroed: true, killChance: 0.01, slowdownChance: 0.3, slowdown: 100 * time.Millisecond, }) } func (s *S) TestSimReinsertZeroed4Workers(c *C) { simulate(c, params{ workers: 4, accounts: 10, reinsertZeroed: true, killChance: 0.01, slowdownChance: 0.3, slowdown: 100 * time.Millisecond, }) } func (s *S) TestSimChangeLog(c *C) { simulate(c, params{ workers: 4, accounts: 10, killChance: 0.01, slowdownChance: 0.3, slowdown: 100 * time.Millisecond, changelog: true, }) } type balanceChange struct { id bson.ObjectId origin int target int amount int } func simulate(c *C, params params) { seed := *seed if seed == 0 { seed = time.Now().UnixNano() } rand.Seed(seed) c.Logf("Seed: %v", seed) txn.SetChaos(txn.Chaos{ KillChance: params.killChance, SlowdownChance: params.slowdownChance, Slowdown: params.slowdown, }) defer txn.SetChaos(txn.Chaos{}) session, err := mgo.Dial(mgoaddr) c.Assert(err, IsNil) defer session.Close() db := session.DB("test") tc := db.C("tc") runner := txn.NewRunner(tc) tclog := db.C("tc.log") if params.changelog { info := mgo.CollectionInfo{ Capped: true, MaxBytes: 1000000, } err := tclog.Create(&info) c.Assert(err, IsNil) runner.ChangeLog(tclog) } accounts := db.C("accounts") for i := 0; i < params.accounts; i++ { err := accounts.Insert(M{"_id": i, "balance": 300}) c.Assert(err, IsNil) } var stop time.Time if params.changes <= 0 { stop = time.Now().Add(*duration) } max := params.accounts if params.reinsertCopy || params.reinsertZeroed { max = int(float64(params.accounts) * 1.5) } changes := make(chan balanceChange, 1024) //session.SetMode(mgo.Eventual, true) for i := 0; i < params.workers; i++ { go func() { n := 0 for { if n > 0 && n == params.changes { break } if !stop.IsZero() && time.Now().After(stop) { break } change := balanceChange{ id: bson.NewObjectId(), origin: rand.Intn(max), target: rand.Intn(max), amount: 100, } var old Account var oldExists bool if params.reinsertCopy || params.reinsertZeroed { if err := accounts.FindId(change.origin).One(&old); err != mgo.ErrNotFound { c.Check(err, IsNil) change.amount = old.Balance oldExists = true } } var ops []txn.Op switch { case params.reinsertCopy && oldExists: ops = []txn.Op{{ C: "accounts", Id: change.origin, Assert: M{"balance": change.amount}, Remove: true, }, { C: "accounts", Id: change.target, Assert: txn.DocMissing, Insert: M{"balance": change.amount}, }} case params.reinsertZeroed && oldExists: ops = []txn.Op{{ C: "accounts", Id: change.target, Assert: txn.DocMissing, Insert: M{"balance": 0}, }, { C: "accounts", Id: change.origin, Assert: M{"balance": change.amount}, Remove: true, }, { C: "accounts", Id: change.target, Assert: txn.DocExists, Update: M{"$inc": M{"balance": change.amount}}, }} case params.changeHalf: ops = []txn.Op{{ C: "accounts", Id: change.origin, Assert: M{"balance": M{"$gte": change.amount}}, Update: M{"$inc": M{"balance": -change.amount / 2}}, }, { C: "accounts", Id: change.target, Assert: txn.DocExists, Update: M{"$inc": M{"balance": change.amount / 2}}, }, { C: "accounts", Id: change.origin, Update: M{"$inc": M{"balance": -change.amount / 2}}, }, { C: "accounts", Id: change.target, Update: M{"$inc": M{"balance": change.amount / 2}}, }} default: ops = []txn.Op{{ C: "accounts", Id: change.origin, Assert: M{"balance": M{"$gte": change.amount}}, Update: M{"$inc": M{"balance": -change.amount}}, }, { C: "accounts", Id: change.target, Assert: txn.DocExists, Update: M{"$inc": M{"balance": change.amount}}, }} } err = runner.Run(ops, change.id, nil) if err != nil && err != txn.ErrAborted && err != txn.ErrChaos { c.Check(err, IsNil) } n++ changes <- change } changes <- balanceChange{} }() } alive := params.workers changeLog := make([]balanceChange, 0, 1024) for alive > 0 { change := <-changes if change.id == "" { alive-- } else { changeLog = append(changeLog, change) } } c.Check(len(changeLog), Not(Equals), 0, Commentf("No operations were even attempted.")) txn.SetChaos(txn.Chaos{}) err = runner.ResumeAll() c.Assert(err, IsNil) n, err := accounts.Count() c.Check(err, IsNil) c.Check(n, Equals, params.accounts, Commentf("Number of accounts has changed.")) n, err = accounts.Find(M{"balance": M{"$lt": 0}}).Count() c.Check(err, IsNil) c.Check(n, Equals, 0, Commentf("There are %d accounts with negative balance.", n)) globalBalance := 0 iter := accounts.Find(nil).Iter() account := Account{} for iter.Next(&account) { globalBalance += account.Balance } c.Check(iter.Close(), IsNil) c.Check(globalBalance, Equals, params.accounts*300, Commentf("Total amount of money should be constant.")) // Compute and verify the exact final state of all accounts. balance := make(map[int]int) for i := 0; i < params.accounts; i++ { balance[i] += 300 } var applied, aborted int for _, change := range changeLog { err := runner.Resume(change.id) if err == txn.ErrAborted { aborted++ continue } else if err != nil { c.Fatalf("resuming %s failed: %v", change.id, err) } balance[change.origin] -= change.amount balance[change.target] += change.amount applied++ } iter = accounts.Find(nil).Iter() for iter.Next(&account) { c.Assert(account.Balance, Equals, balance[account.Id]) } c.Check(iter.Close(), IsNil) c.Logf("Total transactions: %d (%d applied, %d aborted)", len(changeLog), applied, aborted) if params.changelog { n, err := tclog.Count() c.Assert(err, IsNil) // Check if the capped collection is full. dummy := make([]byte, 1024) tclog.Insert(M{"_id": bson.NewObjectId(), "dummy": dummy}) m, err := tclog.Count() c.Assert(err, IsNil) if m == n+1 { // Wasn't full, so it must have seen it all. c.Assert(err, IsNil) c.Assert(n, Equals, applied) } } } charm-2.1.1/src/gopkg.in/mgo.v2/txn/dockey_test.go0000664000175000017500000000536512672604565020711 0ustar marcomarcopackage txn import ( "sort" . "gopkg.in/check.v1" ) type DocKeySuite struct{} var _ = Suite(&DocKeySuite{}) type T struct { A int B string } type T2 struct { A int B string } type T3 struct { A int B string } type T4 struct { A int B string } type T5 struct { F int Q string } type T6 struct { A int B string } type T7 struct { A bool B float64 } type T8 struct { A int B string } type T9 struct { A int B string C bool } type T10 struct { C int `bson:"a"` D string `bson:"b,omitempty"` } type T11 struct { C int D string } type T12 struct { S string } type T13 struct { p, q, r bool S string } var docKeysTests = [][]docKeys{ {{ {"c", 1}, {"c", 5}, {"c", 2}, }, { {"c", 1}, {"c", 2}, {"c", 5}, }}, {{ {"c", "foo"}, {"c", "bar"}, {"c", "bob"}, }, { {"c", "bar"}, {"c", "bob"}, {"c", "foo"}, }}, {{ {"c", 0.2}, {"c", 0.07}, {"c", 0.9}, }, { {"c", 0.07}, {"c", 0.2}, {"c", 0.9}, }}, {{ {"c", true}, {"c", false}, {"c", true}, }, { {"c", false}, {"c", true}, {"c", true}, }}, {{ {"c", T{1, "b"}}, {"c", T{1, "a"}}, {"c", T{0, "b"}}, {"c", T{0, "a"}}, }, { {"c", T{0, "a"}}, {"c", T{0, "b"}}, {"c", T{1, "a"}}, {"c", T{1, "b"}}, }}, {{ {"c", T{1, "a"}}, {"c", T{0, "a"}}, }, { {"c", T{0, "a"}}, {"c", T{1, "a"}}, }}, {{ {"c", T3{0, "b"}}, {"c", T2{1, "b"}}, {"c", T3{1, "a"}}, {"c", T2{0, "a"}}, }, { {"c", T2{0, "a"}}, {"c", T3{0, "b"}}, {"c", T3{1, "a"}}, {"c", T2{1, "b"}}, }}, {{ {"c", T5{1, "b"}}, {"c", T4{1, "b"}}, {"c", T5{0, "a"}}, {"c", T4{0, "a"}}, }, { {"c", T4{0, "a"}}, {"c", T5{0, "a"}}, {"c", T4{1, "b"}}, {"c", T5{1, "b"}}, }}, {{ {"c", T6{1, "b"}}, {"c", T7{true, 0.2}}, {"c", T6{0, "a"}}, {"c", T7{false, 0.04}}, }, { {"c", T6{0, "a"}}, {"c", T6{1, "b"}}, {"c", T7{false, 0.04}}, {"c", T7{true, 0.2}}, }}, {{ {"c", T9{1, "b", true}}, {"c", T8{1, "b"}}, {"c", T9{0, "a", false}}, {"c", T8{0, "a"}}, }, { {"c", T9{0, "a", false}}, {"c", T8{0, "a"}}, {"c", T9{1, "b", true}}, {"c", T8{1, "b"}}, }}, {{ {"b", 2}, {"a", 5}, {"c", 2}, {"b", 1}, }, { {"a", 5}, {"b", 1}, {"b", 2}, {"c", 2}, }}, {{ {"c", T11{1, "a"}}, {"c", T11{1, "a"}}, {"c", T10{1, "a"}}, }, { {"c", T10{1, "a"}}, {"c", T11{1, "a"}}, {"c", T11{1, "a"}}, }}, {{ {"c", T12{"a"}}, {"c", T13{false, true, false, "a"}}, {"c", T12{"b"}}, {"c", T13{false, true, false, "b"}}, }, { {"c", T12{"a"}}, {"c", T13{false, true, false, "a"}}, {"c", T12{"b"}}, {"c", T13{false, true, false, "b"}}, }}, } func (s *DocKeySuite) TestSort(c *C) { for _, test := range docKeysTests { keys := test[0] expected := test[1] sort.Sort(keys) c.Check(keys, DeepEquals, expected) } } charm-2.1.1/src/gopkg.in/mgo.v2/txn/chaos.go0000664000175000017500000000263312672604565017464 0ustar marcomarcopackage txn import ( mrand "math/rand" "time" ) var chaosEnabled = false var chaosSetting Chaos // Chaos holds parameters for the failure injection mechanism. type Chaos struct { // KillChance is the 0.0 to 1.0 chance that a given checkpoint // within the algorithm will raise an interruption that will // stop the procedure. KillChance float64 // SlowdownChance is the 0.0 to 1.0 chance that a given checkpoint // within the algorithm will be delayed by Slowdown before // continuing. SlowdownChance float64 Slowdown time.Duration // If Breakpoint is set, the above settings will only affect the // named breakpoint. Breakpoint string } // SetChaos sets the failure injection parameters to c. func SetChaos(c Chaos) { chaosSetting = c chaosEnabled = c.KillChance > 0 || c.SlowdownChance > 0 } func chaos(bpname string) { if !chaosEnabled { return } switch chaosSetting.Breakpoint { case "", bpname: kc := chaosSetting.KillChance if kc > 0 && mrand.Intn(1000) < int(kc*1000) { panic(chaosError{}) } if bpname == "insert" { return } sc := chaosSetting.SlowdownChance if sc > 0 && mrand.Intn(1000) < int(sc*1000) { time.Sleep(chaosSetting.Slowdown) } } } type chaosError struct{} func (f *flusher) handleChaos(err *error) { v := recover() if v == nil { return } if _, ok := v.(chaosError); ok { f.debugf("Killed by chaos!") *err = ErrChaos return } panic(v) } charm-2.1.1/src/gopkg.in/mgo.v2/internal/0000775000175000017500000000000012672604565017037 5ustar marcomarcocharm-2.1.1/src/gopkg.in/mgo.v2/internal/scram/0000775000175000017500000000000012672604565020144 5ustar marcomarcocharm-2.1.1/src/gopkg.in/mgo.v2/internal/scram/scram.go0000664000175000017500000001703612672604565021607 0ustar marcomarco// mgo - MongoDB driver for Go // // Copyright (c) 2014 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Pacakage scram implements a SCRAM-{SHA-1,etc} client per RFC5802. // // http://tools.ietf.org/html/rfc5802 // package scram import ( "bytes" "crypto/hmac" "crypto/rand" "encoding/base64" "fmt" "hash" "strconv" "strings" ) // Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc). // // A Client may be used within a SASL conversation with logic resembling: // // var in []byte // var client = scram.NewClient(sha1.New, user, pass) // for client.Step(in) { // out := client.Out() // // send out to server // in := serverOut // } // if client.Err() != nil { // // auth failed // } // type Client struct { newHash func() hash.Hash user string pass string step int out bytes.Buffer err error clientNonce []byte serverNonce []byte saltedPass []byte authMsg bytes.Buffer } // NewClient returns a new SCRAM-* client with the provided hash algorithm. // // For SCRAM-SHA-1, for example, use: // // client := scram.NewClient(sha1.New, user, pass) // func NewClient(newHash func() hash.Hash, user, pass string) *Client { c := &Client{ newHash: newHash, user: user, pass: pass, } c.out.Grow(256) c.authMsg.Grow(256) return c } // Out returns the data to be sent to the server in the current step. func (c *Client) Out() []byte { if c.out.Len() == 0 { return nil } return c.out.Bytes() } // Err returns the error that ocurred, or nil if there were no errors. func (c *Client) Err() error { return c.err } // SetNonce sets the client nonce to the provided value. // If not set, the nonce is generated automatically out of crypto/rand on the first step. func (c *Client) SetNonce(nonce []byte) { c.clientNonce = nonce } var escaper = strings.NewReplacer("=", "=3D", ",", "=2C") // Step processes the incoming data from the server and makes the // next round of data for the server available via Client.Out. // Step returns false if there are no errors and more data is // still expected. func (c *Client) Step(in []byte) bool { c.out.Reset() if c.step > 2 || c.err != nil { return false } c.step++ switch c.step { case 1: c.err = c.step1(in) case 2: c.err = c.step2(in) case 3: c.err = c.step3(in) } return c.step > 2 || c.err != nil } func (c *Client) step1(in []byte) error { if len(c.clientNonce) == 0 { const nonceLen = 6 buf := make([]byte, nonceLen + b64.EncodedLen(nonceLen)) if _, err := rand.Read(buf[:nonceLen]); err != nil { return fmt.Errorf("cannot read random SCRAM-SHA-1 nonce from operating system: %v", err) } c.clientNonce = buf[nonceLen:] b64.Encode(c.clientNonce, buf[:nonceLen]) } c.authMsg.WriteString("n=") escaper.WriteString(&c.authMsg, c.user) c.authMsg.WriteString(",r=") c.authMsg.Write(c.clientNonce) c.out.WriteString("n,,") c.out.Write(c.authMsg.Bytes()) return nil } var b64 = base64.StdEncoding func (c *Client) step2(in []byte) error { c.authMsg.WriteByte(',') c.authMsg.Write(in) fields := bytes.Split(in, []byte(",")) if len(fields) != 3 { return fmt.Errorf("expected 3 fields in first SCRAM-SHA-1 server message, got %d: %q", len(fields), in) } if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 { return fmt.Errorf("server sent an invalid SCRAM-SHA-1 nonce: %q", fields[0]) } if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 { return fmt.Errorf("server sent an invalid SCRAM-SHA-1 salt: %q", fields[1]) } if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 { return fmt.Errorf("server sent an invalid SCRAM-SHA-1 iteration count: %q", fields[2]) } c.serverNonce = fields[0][2:] if !bytes.HasPrefix(c.serverNonce, c.clientNonce) { return fmt.Errorf("server SCRAM-SHA-1 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce) } salt := make([]byte, b64.DecodedLen(len(fields[1][2:]))) n, err := b64.Decode(salt, fields[1][2:]) if err != nil { return fmt.Errorf("cannot decode SCRAM-SHA-1 salt sent by server: %q", fields[1]) } salt = salt[:n] iterCount, err := strconv.Atoi(string(fields[2][2:])) if err != nil { return fmt.Errorf("server sent an invalid SCRAM-SHA-1 iteration count: %q", fields[2]) } c.saltPassword(salt, iterCount) c.authMsg.WriteString(",c=biws,r=") c.authMsg.Write(c.serverNonce) c.out.WriteString("c=biws,r=") c.out.Write(c.serverNonce) c.out.WriteString(",p=") c.out.Write(c.clientProof()) return nil } func (c *Client) step3(in []byte) error { var isv, ise bool var fields = bytes.Split(in, []byte(",")) if len(fields) == 1 { isv = bytes.HasPrefix(fields[0], []byte("v=")) ise = bytes.HasPrefix(fields[0], []byte("e=")) } if ise { return fmt.Errorf("SCRAM-SHA-1 authentication error: %s", fields[0][2:]) } else if !isv { return fmt.Errorf("unsupported SCRAM-SHA-1 final message from server: %q", in) } if !bytes.Equal(c.serverSignature(), fields[0][2:]) { return fmt.Errorf("cannot authenticate SCRAM-SHA-1 server signature: %q", fields[0][2:]) } return nil } func (c *Client) saltPassword(salt []byte, iterCount int) { mac := hmac.New(c.newHash, []byte(c.pass)) mac.Write(salt) mac.Write([]byte{0, 0, 0, 1}) ui := mac.Sum(nil) hi := make([]byte, len(ui)) copy(hi, ui) for i := 1; i < iterCount; i++ { mac.Reset() mac.Write(ui) mac.Sum(ui[:0]) for j, b := range ui { hi[j] ^= b } } c.saltedPass = hi } func (c *Client) clientProof() []byte { mac := hmac.New(c.newHash, c.saltedPass) mac.Write([]byte("Client Key")) clientKey := mac.Sum(nil) hash := c.newHash() hash.Write(clientKey) storedKey := hash.Sum(nil) mac = hmac.New(c.newHash, storedKey) mac.Write(c.authMsg.Bytes()) clientProof := mac.Sum(nil) for i, b := range clientKey { clientProof[i] ^= b } clientProof64 := make([]byte, b64.EncodedLen(len(clientProof))) b64.Encode(clientProof64, clientProof) return clientProof64 } func (c *Client) serverSignature() []byte { mac := hmac.New(c.newHash, c.saltedPass) mac.Write([]byte("Server Key")) serverKey := mac.Sum(nil) mac = hmac.New(c.newHash, serverKey) mac.Write(c.authMsg.Bytes()) serverSignature := mac.Sum(nil) encoded := make([]byte, b64.EncodedLen(len(serverSignature))) b64.Encode(encoded, serverSignature) return encoded } charm-2.1.1/src/gopkg.in/mgo.v2/internal/scram/scram_test.go0000664000175000017500000000335112672604565022641 0ustar marcomarcopackage scram_test import ( "crypto/sha1" "testing" . "gopkg.in/check.v1" "gopkg.in/mgo.v2/internal/scram" "strings" ) var _ = Suite(&S{}) func Test(t *testing.T) { TestingT(t) } type S struct{} var tests = [][]string{{ "U: user pencil", "N: fyko+d2lbbFgONRv9qkxdawL", "C: n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL", "S: r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096", "C: c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=v0X8v3Bz2T0CJGbJQyF0X+HI4Ts=", "S: v=rmF9pqV8S7suAoZWja4dJRkFsKQ=", }, { "U: root fe8c89e308ec08763df36333cbf5d3a2", "N: OTcxNDk5NjM2MzE5", "C: n,,n=root,r=OTcxNDk5NjM2MzE5", "S: r=OTcxNDk5NjM2MzE581Ra3provgG0iDsMkDiIAlrh4532dDLp,s=XRDkVrFC9JuL7/F4tG0acQ==,i=10000", "C: c=biws,r=OTcxNDk5NjM2MzE581Ra3provgG0iDsMkDiIAlrh4532dDLp,p=6y1jp9R7ETyouTXS9fW9k5UHdBc=", "S: v=LBnd9dUJRxdqZiEq91NKP3z/bHA=", }} func (s *S) TestExamples(c *C) { for _, steps := range tests { if len(steps) < 2 || len(steps[0]) < 3 || !strings.HasPrefix(steps[0], "U: ") { c.Fatalf("Invalid test: %#v", steps) } auth := strings.Fields(steps[0][3:]) client := scram.NewClient(sha1.New, auth[0], auth[1]) first, done := true, false c.Logf("-----") c.Logf("%s", steps[0]) for _, step := range steps[1:] { c.Logf("%s", step) switch step[:3] { case "N: ": client.SetNonce([]byte(step[3:])) case "C: ": if first { first = false done = client.Step(nil) } c.Assert(done, Equals, false) c.Assert(client.Err(), IsNil) c.Assert(string(client.Out()), Equals, step[3:]) case "S: ": first = false done = client.Step([]byte(step[3:])) default: panic("invalid test line: " + step) } } c.Assert(done, Equals, true) c.Assert(client.Err(), IsNil) } } charm-2.1.1/src/gopkg.in/mgo.v2/internal/sasl/0000775000175000017500000000000012672604565020001 5ustar marcomarcocharm-2.1.1/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.h0000664000175000017500000000064412672604565022672 0ustar marcomarco#include #include "sspi_windows.h" SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle* cred_handle, char* username, char* password, char* domain); int sspi_step(CredHandle* cred_handle, int has_context, CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* target); int sspi_send_client_authz_id(CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* user_plus_realm); charm-2.1.1/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.go0000664000175000017500000000710612672604565023050 0ustar marcomarcopackage sasl // #include "sasl_windows.h" import "C" import ( "fmt" "strings" "sync" "unsafe" ) type saslStepper interface { Step(serverData []byte) (clientData []byte, done bool, err error) Close() } type saslSession struct { // Credentials mech string service string host string userPlusRealm string target string domain string // Internal state authComplete bool errored bool step int // C internal state credHandle C.CredHandle context C.CtxtHandle hasContext C.int // Keep track of pointers we need to explicitly free stringsToFree []*C.char } var initError error var initOnce sync.Once func initSSPI() { rc := C.load_secur32_dll() if rc != 0 { initError = fmt.Errorf("Error loading libraries: %v", rc) } } func New(username, password, mechanism, service, host string) (saslStepper, error) { initOnce.Do(initSSPI) ss := &saslSession{mech: mechanism, hasContext: 0, userPlusRealm: username} if service == "" { service = "mongodb" } if i := strings.Index(host, ":"); i >= 0 { host = host[:i] } ss.service = service ss.host = host usernameComponents := strings.Split(username, "@") if len(usernameComponents) < 2 { return nil, fmt.Errorf("Username '%v' doesn't contain a realm!", username) } user := usernameComponents[0] ss.domain = usernameComponents[1] ss.target = fmt.Sprintf("%s/%s", ss.service, ss.host) var status C.SECURITY_STATUS // Step 0: call AcquireCredentialsHandle to get a nice SSPI CredHandle if len(password) > 0 { status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), ss.cstr(password), ss.cstr(ss.domain)) } else { status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), nil, ss.cstr(ss.domain)) } if status != C.SEC_E_OK { ss.errored = true return nil, fmt.Errorf("Couldn't create new SSPI client, error code %v", status) } return ss, nil } func (ss *saslSession) cstr(s string) *C.char { cstr := C.CString(s) ss.stringsToFree = append(ss.stringsToFree, cstr) return cstr } func (ss *saslSession) Close() { for _, cstr := range ss.stringsToFree { C.free(unsafe.Pointer(cstr)) } } func (ss *saslSession) Step(serverData []byte) (clientData []byte, done bool, err error) { ss.step++ if ss.step > 10 { return nil, false, fmt.Errorf("too many SSPI steps without authentication") } var buffer C.PVOID var bufferLength C.ULONG if len(serverData) > 0 { buffer = (C.PVOID)(unsafe.Pointer(&serverData[0])) bufferLength = C.ULONG(len(serverData)) } var status C.int if ss.authComplete { // Step 3: last bit of magic to use the correct server credentials status = C.sspi_send_client_authz_id(&ss.context, &buffer, &bufferLength, ss.cstr(ss.userPlusRealm)) } else { // Step 1 + Step 2: set up security context with the server and TGT status = C.sspi_step(&ss.credHandle, ss.hasContext, &ss.context, &buffer, &bufferLength, ss.cstr(ss.target)) } if buffer != C.PVOID(nil) { defer C.free(unsafe.Pointer(buffer)) } if status != C.SEC_E_OK && status != C.SEC_I_CONTINUE_NEEDED { ss.errored = true return nil, false, ss.handleSSPIErrorCode(status) } clientData = C.GoBytes(unsafe.Pointer(buffer), C.int(bufferLength)) if status == C.SEC_E_OK { ss.authComplete = true return clientData, true, nil } else { ss.hasContext = 1 return clientData, false, nil } } func (ss *saslSession) handleSSPIErrorCode(code C.int) error { switch { case code == C.SEC_E_TARGET_UNKNOWN: return fmt.Errorf("Target %v@%v not found", ss.target, ss.domain) } return fmt.Errorf("Unknown error doing step %v, error code %v", ss.step, code) } charm-2.1.1/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.c0000664000175000017500000000747412672604565022675 0ustar marcomarco#include "sasl_windows.h" static const LPSTR SSPI_PACKAGE_NAME = "kerberos"; SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle *cred_handle, char *username, char *password, char *domain) { SEC_WINNT_AUTH_IDENTITY auth_identity; SECURITY_INTEGER ignored; auth_identity.Flags = SEC_WINNT_AUTH_IDENTITY_ANSI; auth_identity.User = (LPSTR) username; auth_identity.UserLength = strlen(username); auth_identity.Password = (LPSTR) password; auth_identity.PasswordLength = strlen(password); auth_identity.Domain = (LPSTR) domain; auth_identity.DomainLength = strlen(domain); return call_sspi_acquire_credentials_handle(NULL, SSPI_PACKAGE_NAME, SECPKG_CRED_OUTBOUND, NULL, &auth_identity, NULL, NULL, cred_handle, &ignored); } int sspi_step(CredHandle *cred_handle, int has_context, CtxtHandle *context, PVOID *buffer, ULONG *buffer_length, char *target) { SecBufferDesc inbuf; SecBuffer in_bufs[1]; SecBufferDesc outbuf; SecBuffer out_bufs[1]; if (has_context > 0) { // If we already have a context, we now have data to send. // Put this data in an inbuf. inbuf.ulVersion = SECBUFFER_VERSION; inbuf.cBuffers = 1; inbuf.pBuffers = in_bufs; in_bufs[0].pvBuffer = *buffer; in_bufs[0].cbBuffer = *buffer_length; in_bufs[0].BufferType = SECBUFFER_TOKEN; } outbuf.ulVersion = SECBUFFER_VERSION; outbuf.cBuffers = 1; outbuf.pBuffers = out_bufs; out_bufs[0].pvBuffer = NULL; out_bufs[0].cbBuffer = 0; out_bufs[0].BufferType = SECBUFFER_TOKEN; ULONG context_attr = 0; int ret = call_sspi_initialize_security_context(cred_handle, has_context > 0 ? context : NULL, (LPSTR) target, ISC_REQ_ALLOCATE_MEMORY | ISC_REQ_MUTUAL_AUTH, 0, SECURITY_NETWORK_DREP, has_context > 0 ? &inbuf : NULL, 0, context, &outbuf, &context_attr, NULL); *buffer = malloc(out_bufs[0].cbBuffer); *buffer_length = out_bufs[0].cbBuffer; memcpy(*buffer, out_bufs[0].pvBuffer, *buffer_length); return ret; } int sspi_send_client_authz_id(CtxtHandle *context, PVOID *buffer, ULONG *buffer_length, char *user_plus_realm) { SecPkgContext_Sizes sizes; SECURITY_STATUS status = call_sspi_query_context_attributes(context, SECPKG_ATTR_SIZES, &sizes); if (status != SEC_E_OK) { return status; } size_t user_plus_realm_length = strlen(user_plus_realm); int msgSize = 4 + user_plus_realm_length; char *msg = malloc((sizes.cbSecurityTrailer + msgSize + sizes.cbBlockSize) * sizeof(char)); msg[sizes.cbSecurityTrailer + 0] = 1; msg[sizes.cbSecurityTrailer + 1] = 0; msg[sizes.cbSecurityTrailer + 2] = 0; msg[sizes.cbSecurityTrailer + 3] = 0; memcpy(&msg[sizes.cbSecurityTrailer + 4], user_plus_realm, user_plus_realm_length); SecBuffer wrapBufs[3]; SecBufferDesc wrapBufDesc; wrapBufDesc.cBuffers = 3; wrapBufDesc.pBuffers = wrapBufs; wrapBufDesc.ulVersion = SECBUFFER_VERSION; wrapBufs[0].cbBuffer = sizes.cbSecurityTrailer; wrapBufs[0].BufferType = SECBUFFER_TOKEN; wrapBufs[0].pvBuffer = msg; wrapBufs[1].cbBuffer = msgSize; wrapBufs[1].BufferType = SECBUFFER_DATA; wrapBufs[1].pvBuffer = msg + sizes.cbSecurityTrailer; wrapBufs[2].cbBuffer = sizes.cbBlockSize; wrapBufs[2].BufferType = SECBUFFER_PADDING; wrapBufs[2].pvBuffer = msg + sizes.cbSecurityTrailer + msgSize; status = call_sspi_encrypt_message(context, SECQOP_WRAP_NO_ENCRYPT, &wrapBufDesc, 0); if (status != SEC_E_OK) { free(msg); return status; } *buffer_length = wrapBufs[0].cbBuffer + wrapBufs[1].cbBuffer + wrapBufs[2].cbBuffer; *buffer = malloc(*buffer_length); memcpy(*buffer, wrapBufs[0].pvBuffer, wrapBufs[0].cbBuffer); memcpy(*buffer + wrapBufs[0].cbBuffer, wrapBufs[1].pvBuffer, wrapBufs[1].cbBuffer); memcpy(*buffer + wrapBufs[0].cbBuffer + wrapBufs[1].cbBuffer, wrapBufs[2].pvBuffer, wrapBufs[2].cbBuffer); free(msg); return SEC_E_OK; } charm-2.1.1/src/gopkg.in/mgo.v2/internal/sasl/sspi_windows.h0000664000175000017500000000576612672604565022720 0ustar marcomarco// Code adapted from the NodeJS kerberos library: // // https://github.com/christkv/kerberos/tree/master/lib/win32/kerberos_sspi.h // // Under the terms of the Apache License, Version 2.0: // // http://www.apache.org/licenses/LICENSE-2.0 // #ifndef SSPI_WINDOWS_H #define SSPI_WINDOWS_H #define SECURITY_WIN32 1 #include #include int load_secur32_dll(); SECURITY_STATUS SEC_ENTRY call_sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo); typedef DWORD (WINAPI *encryptMessage_fn)(PCtxtHandle phContext, ULONG fQOP, PSecBufferDesc pMessage, ULONG MessageSeqNo); SECURITY_STATUS SEC_ENTRY call_sspi_acquire_credentials_handle( LPSTR pszPrincipal, // Name of principal LPSTR pszPackage, // Name of package unsigned long fCredentialUse, // Flags indicating use void *pvLogonId, // Pointer to logon ID void *pAuthData, // Package specific data SEC_GET_KEY_FN pGetKeyFn, // Pointer to GetKey() func void *pvGetKeyArgument, // Value to pass to GetKey() PCredHandle phCredential, // (out) Cred Handle PTimeStamp ptsExpiry // (out) Lifetime (optional) ); typedef DWORD (WINAPI *acquireCredentialsHandle_fn)( LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse, void *pvLogonId, void *pAuthData, SEC_GET_KEY_FN pGetKeyFn, void *pvGetKeyArgument, PCredHandle phCredential, PTimeStamp ptsExpiry ); SECURITY_STATUS SEC_ENTRY call_sspi_initialize_security_context( PCredHandle phCredential, // Cred to base context PCtxtHandle phContext, // Existing context (OPT) LPSTR pszTargetName, // Name of target unsigned long fContextReq, // Context Requirements unsigned long Reserved1, // Reserved, MBZ unsigned long TargetDataRep, // Data rep of target PSecBufferDesc pInput, // Input Buffers unsigned long Reserved2, // Reserved, MBZ PCtxtHandle phNewContext, // (out) New Context handle PSecBufferDesc pOutput, // (inout) Output Buffers unsigned long *pfContextAttr, // (out) Context attrs PTimeStamp ptsExpiry // (out) Life span (OPT) ); typedef DWORD (WINAPI *initializeSecurityContext_fn)( PCredHandle phCredential, PCtxtHandle phContext, LPSTR pszTargetName, unsigned long fContextReq, unsigned long Reserved1, unsigned long TargetDataRep, PSecBufferDesc pInput, unsigned long Reserved2, PCtxtHandle phNewContext, PSecBufferDesc pOutput, unsigned long *pfContextAttr, PTimeStamp ptsExpiry); SECURITY_STATUS SEC_ENTRY call_sspi_query_context_attributes( PCtxtHandle phContext, // Context to query unsigned long ulAttribute, // Attribute to query void *pBuffer // Buffer for attributes ); typedef DWORD (WINAPI *queryContextAttributes_fn)( PCtxtHandle phContext, unsigned long ulAttribute, void *pBuffer); #endif // SSPI_WINDOWS_H charm-2.1.1/src/gopkg.in/mgo.v2/internal/sasl/sasl.go0000664000175000017500000000661712672604565021304 0ustar marcomarco// Package sasl is an implementation detail of the mgo package. // // This package is not meant to be used by itself. // // +build !windows package sasl // #cgo LDFLAGS: -lsasl2 // // struct sasl_conn {}; // // #include // #include // // sasl_callback_t *mgo_sasl_callbacks(const char *username, const char *password); // import "C" import ( "fmt" "strings" "sync" "unsafe" ) type saslStepper interface { Step(serverData []byte) (clientData []byte, done bool, err error) Close() } type saslSession struct { conn *C.sasl_conn_t step int mech string cstrings []*C.char callbacks *C.sasl_callback_t } var initError error var initOnce sync.Once func initSASL() { rc := C.sasl_client_init(nil) if rc != C.SASL_OK { initError = saslError(rc, nil, "cannot initialize SASL library") } } func New(username, password, mechanism, service, host string) (saslStepper, error) { initOnce.Do(initSASL) if initError != nil { return nil, initError } ss := &saslSession{mech: mechanism} if service == "" { service = "mongodb" } if i := strings.Index(host, ":"); i >= 0 { host = host[:i] } ss.callbacks = C.mgo_sasl_callbacks(ss.cstr(username), ss.cstr(password)) rc := C.sasl_client_new(ss.cstr(service), ss.cstr(host), nil, nil, ss.callbacks, 0, &ss.conn) if rc != C.SASL_OK { ss.Close() return nil, saslError(rc, nil, "cannot create new SASL client") } return ss, nil } func (ss *saslSession) cstr(s string) *C.char { cstr := C.CString(s) ss.cstrings = append(ss.cstrings, cstr) return cstr } func (ss *saslSession) Close() { for _, cstr := range ss.cstrings { C.free(unsafe.Pointer(cstr)) } ss.cstrings = nil if ss.callbacks != nil { C.free(unsafe.Pointer(ss.callbacks)) } // The documentation of SASL dispose makes it clear that this should only // be done when the connection is done, not when the authentication phase // is done, because an encryption layer may have been negotiated. // Even then, we'll do this for now, because it's simpler and prevents // keeping track of this state for every socket. If it breaks, we'll fix it. C.sasl_dispose(&ss.conn) } func (ss *saslSession) Step(serverData []byte) (clientData []byte, done bool, err error) { ss.step++ if ss.step > 10 { return nil, false, fmt.Errorf("too many SASL steps without authentication") } var cclientData *C.char var cclientDataLen C.uint var rc C.int if ss.step == 1 { var mechanism *C.char // ignored - must match cred rc = C.sasl_client_start(ss.conn, ss.cstr(ss.mech), nil, &cclientData, &cclientDataLen, &mechanism) } else { var cserverData *C.char var cserverDataLen C.uint if len(serverData) > 0 { cserverData = (*C.char)(unsafe.Pointer(&serverData[0])) cserverDataLen = C.uint(len(serverData)) } rc = C.sasl_client_step(ss.conn, cserverData, cserverDataLen, nil, &cclientData, &cclientDataLen) } if cclientData != nil && cclientDataLen > 0 { clientData = C.GoBytes(unsafe.Pointer(cclientData), C.int(cclientDataLen)) } if rc == C.SASL_OK { return clientData, true, nil } if rc == C.SASL_CONTINUE { return clientData, false, nil } return nil, false, saslError(rc, ss.conn, "cannot establish SASL session") } func saslError(rc C.int, conn *C.sasl_conn_t, msg string) error { var detail string if conn == nil { detail = C.GoString(C.sasl_errstring(rc, nil, nil)) } else { detail = C.GoString(C.sasl_errdetail(conn)) } return fmt.Errorf(msg + ": " + detail) } charm-2.1.1/src/gopkg.in/mgo.v2/internal/sasl/sasl.c0000664000175000017500000000305112672604565021106 0ustar marcomarco// +build !windows #include #include #include #include static int mgo_sasl_simple(void *context, int id, const char **result, unsigned int *len) { if (!result) { return SASL_BADPARAM; } switch (id) { case SASL_CB_USER: *result = (char *)context; break; case SASL_CB_AUTHNAME: *result = (char *)context; break; case SASL_CB_LANGUAGE: *result = NULL; break; default: return SASL_BADPARAM; } if (len) { *len = *result ? strlen(*result) : 0; } return SASL_OK; } typedef int (*callback)(void); static int mgo_sasl_secret(sasl_conn_t *conn, void *context, int id, sasl_secret_t **result) { if (!conn || !result || id != SASL_CB_PASS) { return SASL_BADPARAM; } *result = (sasl_secret_t *)context; return SASL_OK; } sasl_callback_t *mgo_sasl_callbacks(const char *username, const char *password) { sasl_callback_t *cb = malloc(4 * sizeof(sasl_callback_t)); int n = 0; size_t len = strlen(password); sasl_secret_t *secret = (sasl_secret_t*)malloc(sizeof(sasl_secret_t) + len); if (!secret) { free(cb); return NULL; } strcpy((char *)secret->data, password); secret->len = len; cb[n].id = SASL_CB_PASS; cb[n].proc = (callback)&mgo_sasl_secret; cb[n].context = secret; n++; cb[n].id = SASL_CB_USER; cb[n].proc = (callback)&mgo_sasl_simple; cb[n].context = (char*)username; n++; cb[n].id = SASL_CB_AUTHNAME; cb[n].proc = (callback)&mgo_sasl_simple; cb[n].context = (char*)username; n++; cb[n].id = SASL_CB_LIST_END; cb[n].proc = NULL; cb[n].context = NULL; return cb; } charm-2.1.1/src/gopkg.in/mgo.v2/internal/sasl/sspi_windows.c0000664000175000017500000000660112672604565022700 0ustar marcomarco// Code adapted from the NodeJS kerberos library: // // https://github.com/christkv/kerberos/tree/master/lib/win32/kerberos_sspi.c // // Under the terms of the Apache License, Version 2.0: // // http://www.apache.org/licenses/LICENSE-2.0 // #include #include "sspi_windows.h" static HINSTANCE sspi_secur32_dll = NULL; int load_secur32_dll() { sspi_secur32_dll = LoadLibrary("secur32.dll"); if (sspi_secur32_dll == NULL) { return GetLastError(); } return 0; } SECURITY_STATUS SEC_ENTRY call_sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo) { if (sspi_secur32_dll == NULL) { return -1; } encryptMessage_fn pfn_encryptMessage = (encryptMessage_fn) GetProcAddress(sspi_secur32_dll, "EncryptMessage"); if (!pfn_encryptMessage) { return -2; } return (*pfn_encryptMessage)(phContext, fQOP, pMessage, MessageSeqNo); } SECURITY_STATUS SEC_ENTRY call_sspi_acquire_credentials_handle( LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse, void *pvLogonId, void *pAuthData, SEC_GET_KEY_FN pGetKeyFn, void *pvGetKeyArgument, PCredHandle phCredential, PTimeStamp ptsExpiry) { if (sspi_secur32_dll == NULL) { return -1; } acquireCredentialsHandle_fn pfn_acquireCredentialsHandle; #ifdef _UNICODE pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn) GetProcAddress(sspi_secur32_dll, "AcquireCredentialsHandleW"); #else pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn) GetProcAddress(sspi_secur32_dll, "AcquireCredentialsHandleA"); #endif if (!pfn_acquireCredentialsHandle) { return -2; } return (*pfn_acquireCredentialsHandle)( pszPrincipal, pszPackage, fCredentialUse, pvLogonId, pAuthData, pGetKeyFn, pvGetKeyArgument, phCredential, ptsExpiry); } SECURITY_STATUS SEC_ENTRY call_sspi_initialize_security_context( PCredHandle phCredential, PCtxtHandle phContext, LPSTR pszTargetName, unsigned long fContextReq, unsigned long Reserved1, unsigned long TargetDataRep, PSecBufferDesc pInput, unsigned long Reserved2, PCtxtHandle phNewContext, PSecBufferDesc pOutput, unsigned long *pfContextAttr, PTimeStamp ptsExpiry) { if (sspi_secur32_dll == NULL) { return -1; } initializeSecurityContext_fn pfn_initializeSecurityContext; #ifdef _UNICODE pfn_initializeSecurityContext = (initializeSecurityContext_fn) GetProcAddress(sspi_secur32_dll, "InitializeSecurityContextW"); #else pfn_initializeSecurityContext = (initializeSecurityContext_fn) GetProcAddress(sspi_secur32_dll, "InitializeSecurityContextA"); #endif if (!pfn_initializeSecurityContext) { return -2; } return (*pfn_initializeSecurityContext)( phCredential, phContext, pszTargetName, fContextReq, Reserved1, TargetDataRep, pInput, Reserved2, phNewContext, pOutput, pfContextAttr, ptsExpiry); } SECURITY_STATUS SEC_ENTRY call_sspi_query_context_attributes(PCtxtHandle phContext, unsigned long ulAttribute, void *pBuffer) { if (sspi_secur32_dll == NULL) { return -1; } queryContextAttributes_fn pfn_queryContextAttributes; #ifdef _UNICODE pfn_queryContextAttributes = (queryContextAttributes_fn) GetProcAddress(sspi_secur32_dll, "QueryContextAttributesW"); #else pfn_queryContextAttributes = (queryContextAttributes_fn) GetProcAddress(sspi_secur32_dll, "QueryContextAttributesA"); #endif if (!pfn_queryContextAttributes) { return -2; } return (*pfn_queryContextAttributes)(phContext, ulAttribute, pBuffer); } charm-2.1.1/src/gopkg.in/mgo.v2/suite_test.go0000664000175000017500000001337412672604565017752 0ustar marcomarco// mgo - MongoDB driver for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo_test import ( "errors" "flag" "fmt" "net" "os/exec" "runtime" "strconv" "testing" "time" . "gopkg.in/check.v1" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" ) var fast = flag.Bool("fast", false, "Skip slow tests") type M bson.M type cLogger C func (c *cLogger) Output(calldepth int, s string) error { ns := time.Now().UnixNano() t := float64(ns%100e9) / 1e9 ((*C)(c)).Logf("[LOG] %.05f %s", t, s) return nil } func TestAll(t *testing.T) { TestingT(t) } type S struct { session *mgo.Session stopped bool build mgo.BuildInfo frozen []string } func (s *S) versionAtLeast(v ...int) (result bool) { for i := range v { if i == len(s.build.VersionArray) { return false } if s.build.VersionArray[i] != v[i] { return s.build.VersionArray[i] >= v[i] } } return true } var _ = Suite(&S{}) func (s *S) SetUpSuite(c *C) { mgo.SetDebug(true) mgo.SetStats(true) s.StartAll() session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) s.build, err = session.BuildInfo() c.Check(err, IsNil) session.Close() } func (s *S) SetUpTest(c *C) { err := run("mongo --nodb testdb/dropall.js") if err != nil { panic(err.Error()) } mgo.SetLogger((*cLogger)(c)) mgo.ResetStats() } func (s *S) TearDownTest(c *C) { if s.stopped { s.Stop(":40201") s.Stop(":40202") s.Stop(":40203") s.StartAll() } for _, host := range s.frozen { if host != "" { s.Thaw(host) } } var stats mgo.Stats for i := 0; ; i++ { stats = mgo.GetStats() if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 { break } if i == 20 { c.Fatal("Test left sockets in a dirty state") } c.Logf("Waiting for sockets to die: %d in use, %d alive", stats.SocketsInUse, stats.SocketsAlive) time.Sleep(500 * time.Millisecond) } for i := 0; ; i++ { stats = mgo.GetStats() if stats.Clusters == 0 { break } if i == 60 { c.Fatal("Test left clusters alive") } c.Logf("Waiting for clusters to die: %d alive", stats.Clusters) time.Sleep(1 * time.Second) } } func (s *S) Stop(host string) { // Give a moment for slaves to sync and avoid getting rollback issues. panicOnWindows() time.Sleep(2 * time.Second) err := run("cd _testdb && supervisorctl stop " + supvName(host)) if err != nil { panic(err) } s.stopped = true } func (s *S) pid(host string) int { output, err := exec.Command("lsof", "-iTCP:"+hostPort(host), "-sTCP:LISTEN", "-Fp").CombinedOutput() if err != nil { panic(err) } pidstr := string(output[1 : len(output)-1]) pid, err := strconv.Atoi(pidstr) if err != nil { panic("cannot convert pid to int: " + pidstr) } return pid } func (s *S) Freeze(host string) { err := stop(s.pid(host)) if err != nil { panic(err) } s.frozen = append(s.frozen, host) } func (s *S) Thaw(host string) { err := cont(s.pid(host)) if err != nil { panic(err) } for i, frozen := range s.frozen { if frozen == host { s.frozen[i] = "" } } } func (s *S) StartAll() { if s.stopped { // Restart any stopped nodes. run("cd _testdb && supervisorctl start all") err := run("cd testdb && mongo --nodb wait.js") if err != nil { panic(err) } s.stopped = false } } func run(command string) error { var output []byte var err error if runtime.GOOS == "windows" { output, err = exec.Command("cmd", "/C", command).CombinedOutput() } else { output, err = exec.Command("/bin/sh", "-c", command).CombinedOutput() } if err != nil { msg := fmt.Sprintf("Failed to execute: %s: %s\n%s", command, err.Error(), string(output)) return errors.New(msg) } return nil } var supvNames = map[string]string{ "40001": "db1", "40002": "db2", "40011": "rs1a", "40012": "rs1b", "40013": "rs1c", "40021": "rs2a", "40022": "rs2b", "40023": "rs2c", "40031": "rs3a", "40032": "rs3b", "40033": "rs3c", "40041": "rs4a", "40101": "cfg1", "40102": "cfg2", "40103": "cfg3", "40201": "s1", "40202": "s2", "40203": "s3", } // supvName returns the supervisord name for the given host address. func supvName(host string) string { host, port, err := net.SplitHostPort(host) if err != nil { panic(err) } name, ok := supvNames[port] if !ok { panic("Unknown host: " + host) } return name } func hostPort(host string) string { _, port, err := net.SplitHostPort(host) if err != nil { panic(err) } return port } func panicOnWindows() { if runtime.GOOS == "windows" { panic("the test suite is not yet fully supported on Windows") } } charm-2.1.1/src/gopkg.in/mgo.v2/stats.go0000664000175000017500000000656212672604565016721 0ustar marcomarco// mgo - MongoDB driver for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo import ( "sync" ) var stats *Stats var statsMutex sync.Mutex func SetStats(enabled bool) { statsMutex.Lock() if enabled { if stats == nil { stats = &Stats{} } } else { stats = nil } statsMutex.Unlock() } func GetStats() (snapshot Stats) { statsMutex.Lock() snapshot = *stats statsMutex.Unlock() return } func ResetStats() { statsMutex.Lock() debug("Resetting stats") old := stats stats = &Stats{} // These are absolute values: stats.Clusters = old.Clusters stats.SocketsInUse = old.SocketsInUse stats.SocketsAlive = old.SocketsAlive stats.SocketRefs = old.SocketRefs statsMutex.Unlock() return } type Stats struct { Clusters int MasterConns int SlaveConns int SentOps int ReceivedOps int ReceivedDocs int SocketsAlive int SocketsInUse int SocketRefs int } func (stats *Stats) cluster(delta int) { if stats != nil { statsMutex.Lock() stats.Clusters += delta statsMutex.Unlock() } } func (stats *Stats) conn(delta int, master bool) { if stats != nil { statsMutex.Lock() if master { stats.MasterConns += delta } else { stats.SlaveConns += delta } statsMutex.Unlock() } } func (stats *Stats) sentOps(delta int) { if stats != nil { statsMutex.Lock() stats.SentOps += delta statsMutex.Unlock() } } func (stats *Stats) receivedOps(delta int) { if stats != nil { statsMutex.Lock() stats.ReceivedOps += delta statsMutex.Unlock() } } func (stats *Stats) receivedDocs(delta int) { if stats != nil { statsMutex.Lock() stats.ReceivedDocs += delta statsMutex.Unlock() } } func (stats *Stats) socketsInUse(delta int) { if stats != nil { statsMutex.Lock() stats.SocketsInUse += delta statsMutex.Unlock() } } func (stats *Stats) socketsAlive(delta int) { if stats != nil { statsMutex.Lock() stats.SocketsAlive += delta statsMutex.Unlock() } } func (stats *Stats) socketRefs(delta int) { if stats != nil { statsMutex.Lock() stats.SocketRefs += delta statsMutex.Unlock() } } charm-2.1.1/src/gopkg.in/mgo.v2/raceon.go0000664000175000017500000000006712672604565017024 0ustar marcomarco// +build race package mgo const raceDetector = true charm-2.1.1/src/gopkg.in/mgo.v2/queue.go0000664000175000017500000000544612672604565016707 0ustar marcomarco// mgo - MongoDB driver for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo type queue struct { elems []interface{} nelems, popi, pushi int } func (q *queue) Len() int { return q.nelems } func (q *queue) Push(elem interface{}) { //debugf("Pushing(pushi=%d popi=%d cap=%d): %#v\n", // q.pushi, q.popi, len(q.elems), elem) if q.nelems == len(q.elems) { q.expand() } q.elems[q.pushi] = elem q.nelems++ q.pushi = (q.pushi + 1) % len(q.elems) //debugf(" Pushed(pushi=%d popi=%d cap=%d): %#v\n", // q.pushi, q.popi, len(q.elems), elem) } func (q *queue) Pop() (elem interface{}) { //debugf("Popping(pushi=%d popi=%d cap=%d)\n", // q.pushi, q.popi, len(q.elems)) if q.nelems == 0 { return nil } elem = q.elems[q.popi] q.elems[q.popi] = nil // Help GC. q.nelems-- q.popi = (q.popi + 1) % len(q.elems) //debugf(" Popped(pushi=%d popi=%d cap=%d): %#v\n", // q.pushi, q.popi, len(q.elems), elem) return elem } func (q *queue) expand() { curcap := len(q.elems) var newcap int if curcap == 0 { newcap = 8 } else if curcap < 1024 { newcap = curcap * 2 } else { newcap = curcap + (curcap / 4) } elems := make([]interface{}, newcap) if q.popi == 0 { copy(elems, q.elems) q.pushi = curcap } else { newpopi := newcap - (curcap - q.popi) copy(elems, q.elems[:q.popi]) copy(elems[newpopi:], q.elems[q.popi:]) q.popi = newpopi } for i := range q.elems { q.elems[i] = nil // Help GC. } q.elems = elems } charm-2.1.1/src/gopkg.in/mgo.v2/auth.go0000664000175000017500000003021512672604565016514 0ustar marcomarco// mgo - MongoDB driver for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo import ( "crypto/md5" "crypto/sha1" "encoding/hex" "errors" "fmt" "sync" "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/internal/scram" ) type authCmd struct { Authenticate int Nonce string User string Key string } type startSaslCmd struct { StartSASL int `bson:"startSasl"` } type authResult struct { ErrMsg string Ok bool } type getNonceCmd struct { GetNonce int } type getNonceResult struct { Nonce string Err string "$err" Code int } type logoutCmd struct { Logout int } type saslCmd struct { Start int `bson:"saslStart,omitempty"` Continue int `bson:"saslContinue,omitempty"` ConversationId int `bson:"conversationId,omitempty"` Mechanism string `bson:"mechanism,omitempty"` Payload []byte } type saslResult struct { Ok bool `bson:"ok"` NotOk bool `bson:"code"` // Server <= 2.3.2 returns ok=1 & code>0 on errors (WTF?) Done bool ConversationId int `bson:"conversationId"` Payload []byte ErrMsg string } type saslStepper interface { Step(serverData []byte) (clientData []byte, done bool, err error) Close() } func (socket *mongoSocket) getNonce() (nonce string, err error) { socket.Lock() for socket.cachedNonce == "" && socket.dead == nil { debugf("Socket %p to %s: waiting for nonce", socket, socket.addr) socket.gotNonce.Wait() } if socket.cachedNonce == "mongos" { socket.Unlock() return "", errors.New("Can't authenticate with mongos; see http://j.mp/mongos-auth") } debugf("Socket %p to %s: got nonce", socket, socket.addr) nonce, err = socket.cachedNonce, socket.dead socket.cachedNonce = "" socket.Unlock() if err != nil { nonce = "" } return } func (socket *mongoSocket) resetNonce() { debugf("Socket %p to %s: requesting a new nonce", socket, socket.addr) op := &queryOp{} op.query = &getNonceCmd{GetNonce: 1} op.collection = "admin.$cmd" op.limit = -1 op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { if err != nil { socket.kill(errors.New("getNonce: "+err.Error()), true) return } result := &getNonceResult{} err = bson.Unmarshal(docData, &result) if err != nil { socket.kill(errors.New("Failed to unmarshal nonce: "+err.Error()), true) return } debugf("Socket %p to %s: nonce unmarshalled: %#v", socket, socket.addr, result) if result.Code == 13390 { // mongos doesn't yet support auth (see http://j.mp/mongos-auth) result.Nonce = "mongos" } else if result.Nonce == "" { var msg string if result.Err != "" { msg = fmt.Sprintf("Got an empty nonce: %s (%d)", result.Err, result.Code) } else { msg = "Got an empty nonce" } socket.kill(errors.New(msg), true) return } socket.Lock() if socket.cachedNonce != "" { socket.Unlock() panic("resetNonce: nonce already cached") } socket.cachedNonce = result.Nonce socket.gotNonce.Signal() socket.Unlock() } err := socket.Query(op) if err != nil { socket.kill(errors.New("resetNonce: "+err.Error()), true) } } func (socket *mongoSocket) Login(cred Credential) error { socket.Lock() if cred.Mechanism == "" && socket.serverInfo.MaxWireVersion >= 3 { cred.Mechanism = "SCRAM-SHA-1" } for _, sockCred := range socket.creds { if sockCred == cred { debugf("Socket %p to %s: login: db=%q user=%q (already logged in)", socket, socket.addr, cred.Source, cred.Username) socket.Unlock() return nil } } if socket.dropLogout(cred) { debugf("Socket %p to %s: login: db=%q user=%q (cached)", socket, socket.addr, cred.Source, cred.Username) socket.creds = append(socket.creds, cred) socket.Unlock() return nil } socket.Unlock() debugf("Socket %p to %s: login: db=%q user=%q", socket, socket.addr, cred.Source, cred.Username) var err error switch cred.Mechanism { case "", "MONGODB-CR", "MONGO-CR": // Name changed to MONGODB-CR in SERVER-8501. err = socket.loginClassic(cred) case "PLAIN": err = socket.loginPlain(cred) case "MONGODB-X509": err = socket.loginX509(cred) default: // Try SASL for everything else, if it is available. err = socket.loginSASL(cred) } if err != nil { debugf("Socket %p to %s: login error: %s", socket, socket.addr, err) } else { debugf("Socket %p to %s: login successful", socket, socket.addr) } return err } func (socket *mongoSocket) loginClassic(cred Credential) error { // Note that this only works properly because this function is // synchronous, which means the nonce won't get reset while we're // using it and any other login requests will block waiting for a // new nonce provided in the defer call below. nonce, err := socket.getNonce() if err != nil { return err } defer socket.resetNonce() psum := md5.New() psum.Write([]byte(cred.Username + ":mongo:" + cred.Password)) ksum := md5.New() ksum.Write([]byte(nonce + cred.Username)) ksum.Write([]byte(hex.EncodeToString(psum.Sum(nil)))) key := hex.EncodeToString(ksum.Sum(nil)) cmd := authCmd{Authenticate: 1, User: cred.Username, Nonce: nonce, Key: key} res := authResult{} return socket.loginRun(cred.Source, &cmd, &res, func() error { if !res.Ok { return errors.New(res.ErrMsg) } socket.Lock() socket.dropAuth(cred.Source) socket.creds = append(socket.creds, cred) socket.Unlock() return nil }) } type authX509Cmd struct { Authenticate int User string Mechanism string } func (socket *mongoSocket) loginX509(cred Credential) error { cmd := authX509Cmd{Authenticate: 1, User: cred.Username, Mechanism: "MONGODB-X509"} res := authResult{} return socket.loginRun(cred.Source, &cmd, &res, func() error { if !res.Ok { return errors.New(res.ErrMsg) } socket.Lock() socket.dropAuth(cred.Source) socket.creds = append(socket.creds, cred) socket.Unlock() return nil }) } func (socket *mongoSocket) loginPlain(cred Credential) error { cmd := saslCmd{Start: 1, Mechanism: "PLAIN", Payload: []byte("\x00" + cred.Username + "\x00" + cred.Password)} res := authResult{} return socket.loginRun(cred.Source, &cmd, &res, func() error { if !res.Ok { return errors.New(res.ErrMsg) } socket.Lock() socket.dropAuth(cred.Source) socket.creds = append(socket.creds, cred) socket.Unlock() return nil }) } func (socket *mongoSocket) loginSASL(cred Credential) error { var sasl saslStepper var err error if cred.Mechanism == "SCRAM-SHA-1" { // SCRAM is handled without external libraries. sasl = saslNewScram(cred) } else if len(cred.ServiceHost) > 0 { sasl, err = saslNew(cred, cred.ServiceHost) } else { sasl, err = saslNew(cred, socket.Server().Addr) } if err != nil { return err } defer sasl.Close() // The goal of this logic is to carry a locked socket until the // local SASL step confirms the auth is valid; the socket needs to be // locked so that concurrent action doesn't leave the socket in an // auth state that doesn't reflect the operations that took place. // As a simple case, imagine inverting login=>logout to logout=>login. // // The logic below works because the lock func isn't called concurrently. locked := false lock := func(b bool) { if locked != b { locked = b if b { socket.Lock() } else { socket.Unlock() } } } lock(true) defer lock(false) start := 1 cmd := saslCmd{} res := saslResult{} for { payload, done, err := sasl.Step(res.Payload) if err != nil { return err } if done && res.Done { socket.dropAuth(cred.Source) socket.creds = append(socket.creds, cred) break } lock(false) cmd = saslCmd{ Start: start, Continue: 1 - start, ConversationId: res.ConversationId, Mechanism: cred.Mechanism, Payload: payload, } start = 0 err = socket.loginRun(cred.Source, &cmd, &res, func() error { // See the comment on lock for why this is necessary. lock(true) if !res.Ok || res.NotOk { return fmt.Errorf("server returned error on SASL authentication step: %s", res.ErrMsg) } return nil }) if err != nil { return err } if done && res.Done { socket.dropAuth(cred.Source) socket.creds = append(socket.creds, cred) break } } return nil } func saslNewScram(cred Credential) *saslScram { credsum := md5.New() credsum.Write([]byte(cred.Username + ":mongo:" + cred.Password)) client := scram.NewClient(sha1.New, cred.Username, hex.EncodeToString(credsum.Sum(nil))) return &saslScram{cred: cred, client: client} } type saslScram struct { cred Credential client *scram.Client } func (s *saslScram) Close() {} func (s *saslScram) Step(serverData []byte) (clientData []byte, done bool, err error) { more := s.client.Step(serverData) return s.client.Out(), !more, s.client.Err() } func (socket *mongoSocket) loginRun(db string, query, result interface{}, f func() error) error { var mutex sync.Mutex var replyErr error mutex.Lock() op := queryOp{} op.query = query op.collection = db + ".$cmd" op.limit = -1 op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { defer mutex.Unlock() if err != nil { replyErr = err return } err = bson.Unmarshal(docData, result) if err != nil { replyErr = err } else { // Must handle this within the read loop for the socket, so // that concurrent login requests are properly ordered. replyErr = f() } } err := socket.Query(&op) if err != nil { return err } mutex.Lock() // Wait. return replyErr } func (socket *mongoSocket) Logout(db string) { socket.Lock() cred, found := socket.dropAuth(db) if found { debugf("Socket %p to %s: logout: db=%q (flagged)", socket, socket.addr, db) socket.logout = append(socket.logout, cred) } socket.Unlock() } func (socket *mongoSocket) LogoutAll() { socket.Lock() if l := len(socket.creds); l > 0 { debugf("Socket %p to %s: logout all (flagged %d)", socket, socket.addr, l) socket.logout = append(socket.logout, socket.creds...) socket.creds = socket.creds[0:0] } socket.Unlock() } func (socket *mongoSocket) flushLogout() (ops []interface{}) { socket.Lock() if l := len(socket.logout); l > 0 { debugf("Socket %p to %s: logout all (flushing %d)", socket, socket.addr, l) for i := 0; i != l; i++ { op := queryOp{} op.query = &logoutCmd{1} op.collection = socket.logout[i].Source + ".$cmd" op.limit = -1 ops = append(ops, &op) } socket.logout = socket.logout[0:0] } socket.Unlock() return } func (socket *mongoSocket) dropAuth(db string) (cred Credential, found bool) { for i, sockCred := range socket.creds { if sockCred.Source == db { copy(socket.creds[i:], socket.creds[i+1:]) socket.creds = socket.creds[:len(socket.creds)-1] return sockCred, true } } return cred, false } func (socket *mongoSocket) dropLogout(cred Credential) (found bool) { for i, sockCred := range socket.logout { if sockCred == cred { copy(socket.logout[i:], socket.logout[i+1:]) socket.logout = socket.logout[:len(socket.logout)-1] return true } } return false } charm-2.1.1/src/gopkg.in/mgo.v2/bulk_test.go0000664000175000017500000002360712672604565017556 0ustar marcomarco// mgo - MongoDB driver for Go // // Copyright (c) 2010-2015 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo_test import ( . "gopkg.in/check.v1" "gopkg.in/mgo.v2" ) func (s *S) TestBulkInsert(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") bulk := coll.Bulk() bulk.Insert(M{"n": 1}) bulk.Insert(M{"n": 2}, M{"n": 3}) r, err := bulk.Run() c.Assert(err, IsNil) c.Assert(r, FitsTypeOf, &mgo.BulkResult{}) type doc struct{ N int } var res []doc err = coll.Find(nil).Sort("n").All(&res) c.Assert(err, IsNil) c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}}) } func (s *S) TestBulkInsertError(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") bulk := coll.Bulk() bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3}) _, err = bulk.Run() c.Assert(err, ErrorMatches, ".*duplicate key.*") c.Assert(mgo.IsDup(err), Equals, true) type doc struct { N int `_id` } var res []doc err = coll.Find(nil).Sort("_id").All(&res) c.Assert(err, IsNil) c.Assert(res, DeepEquals, []doc{{1}, {2}}) } func (s *S) TestBulkInsertErrorUnordered(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") bulk := coll.Bulk() bulk.Unordered() bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3}) _, err = bulk.Run() c.Assert(err, ErrorMatches, ".*duplicate key.*") type doc struct { N int `_id` } var res []doc err = coll.Find(nil).Sort("_id").All(&res) c.Assert(err, IsNil) c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}}) } func (s *S) TestBulkInsertErrorUnorderedSplitBatch(c *C) { // The server has a batch limit of 1000 documents when using write commands. // This artificial limit did not exist with the old wire protocol, so to // avoid compatibility issues the implementation internally split batches // into the proper size and delivers them one by one. This test ensures that // the behavior of unordered (that is, continue on error) remains correct // when errors happen and there are batches left. session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") bulk := coll.Bulk() bulk.Unordered() const total = 4096 type doc struct { Id int `_id` } docs := make([]interface{}, total) for i := 0; i < total; i++ { docs[i] = doc{i} } docs[1] = doc{0} bulk.Insert(docs...) _, err = bulk.Run() c.Assert(err, ErrorMatches, ".*duplicate key.*") n, err := coll.Count() c.Assert(err, IsNil) c.Assert(n, Equals, total-1) var res doc err = coll.FindId(1500).One(&res) c.Assert(err, IsNil) c.Assert(res.Id, Equals, 1500) } func (s *S) TestBulkError(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") // If it's just the same string multiple times, join it into a single message. bulk := coll.Bulk() bulk.Unordered() bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}) _, err = bulk.Run() c.Assert(err, ErrorMatches, ".*duplicate key.*") c.Assert(err, Not(ErrorMatches), ".*duplicate key.*duplicate key") c.Assert(mgo.IsDup(err), Equals, true) // With matching errors but different messages, present them all. bulk = coll.Bulk() bulk.Unordered() bulk.Insert(M{"_id": "dupone"}, M{"_id": "dupone"}, M{"_id": "duptwo"}, M{"_id": "duptwo"}) _, err = bulk.Run() if s.versionAtLeast(2, 6) { c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n( - .*duplicate.*\n){2}$") c.Assert(err, ErrorMatches, "(?s).*dupone.*") c.Assert(err, ErrorMatches, "(?s).*duptwo.*") } else { // Wire protocol query doesn't return all errors. c.Assert(err, ErrorMatches, ".*duplicate.*") } c.Assert(mgo.IsDup(err), Equals, true) // With mixed errors, present them all. bulk = coll.Bulk() bulk.Unordered() bulk.Insert(M{"_id": 1}, M{"_id": []int{2}}) _, err = bulk.Run() if s.versionAtLeast(2, 6) { c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n - .*duplicate.*\n - .*array.*\n$") } else { // Wire protocol query doesn't return all errors. c.Assert(err, ErrorMatches, ".*array.*") } c.Assert(mgo.IsDup(err), Equals, false) } func (s *S) TestBulkUpdate(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}) c.Assert(err, IsNil) bulk := coll.Bulk() bulk.Update(M{"n": 1}, M{"$set": M{"n": 1}}) bulk.Update(M{"n": 2}, M{"$set": M{"n": 20}}) bulk.Update(M{"n": 5}, M{"$set": M{"n": 50}}) // Won't match. bulk.Update(M{"n": 1}, M{"$set": M{"n": 10}}, M{"n": 3}, M{"$set": M{"n": 30}}) r, err := bulk.Run() c.Assert(err, IsNil) c.Assert(r.Matched, Equals, 4) if s.versionAtLeast(2, 6) { c.Assert(r.Modified, Equals, 3) } type doc struct{ N int } var res []doc err = coll.Find(nil).Sort("n").All(&res) c.Assert(err, IsNil) c.Assert(res, DeepEquals, []doc{{10}, {20}, {30}}) } func (s *S) TestBulkUpdateError(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}) c.Assert(err, IsNil) bulk := coll.Bulk() bulk.Update( M{"n": 1}, M{"$set": M{"n": 10}}, M{"n": 2}, M{"$set": M{"n": 20, "_id": 20}}, M{"n": 3}, M{"$set": M{"n": 30}}, ) r, err := bulk.Run() c.Assert(err, ErrorMatches, ".*_id.*") c.Assert(r, FitsTypeOf, &mgo.BulkResult{}) type doc struct{ N int } var res []doc err = coll.Find(nil).Sort("n").All(&res) c.Assert(err, IsNil) c.Assert(res, DeepEquals, []doc{{2}, {3}, {10}}) } func (s *S) TestBulkUpdateErrorUnordered(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}) c.Assert(err, IsNil) bulk := coll.Bulk() bulk.Unordered() bulk.Update( M{"n": 1}, M{"$set": M{"n": 10}}, M{"n": 2}, M{"$set": M{"n": 20, "_id": 20}}, M{"n": 3}, M{"$set": M{"n": 30}}, ) r, err := bulk.Run() c.Assert(err, ErrorMatches, ".*_id.*") c.Assert(r, FitsTypeOf, &mgo.BulkResult{}) type doc struct{ N int } var res []doc err = coll.Find(nil).Sort("n").All(&res) c.Assert(err, IsNil) c.Assert(res, DeepEquals, []doc{{2}, {10}, {30}}) } func (s *S) TestBulkUpdateAll(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}) c.Assert(err, IsNil) bulk := coll.Bulk() bulk.UpdateAll(M{"n": 1}, M{"$set": M{"n": 10}}) bulk.UpdateAll(M{"n": 2}, M{"$set": M{"n": 2}}) bulk.UpdateAll(M{"n": 5}, M{"$set": M{"n": 50}}) // Won't match. bulk.UpdateAll(M{}, M{"$inc": M{"n": 1}}, M{"n": 11}, M{"$set": M{"n": 5}}) r, err := bulk.Run() c.Assert(err, IsNil) c.Assert(r.Matched, Equals, 6) if s.versionAtLeast(2, 6) { c.Assert(r.Modified, Equals, 5) } type doc struct{ N int } var res []doc err = coll.Find(nil).Sort("n").All(&res) c.Assert(err, IsNil) c.Assert(res, DeepEquals, []doc{{3}, {4}, {5}}) } func (s *S) TestBulkMixedUnordered(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") // Abuse undefined behavior to ensure the desired implementation is in place. bulk := coll.Bulk() bulk.Unordered() bulk.Insert(M{"n": 1}) bulk.Update(M{"n": 2}, M{"$inc": M{"n": 1}}) bulk.Insert(M{"n": 2}) bulk.Update(M{"n": 3}, M{"$inc": M{"n": 1}}) bulk.Update(M{"n": 1}, M{"$inc": M{"n": 1}}) bulk.Insert(M{"n": 3}) r, err := bulk.Run() c.Assert(err, IsNil) c.Assert(r.Matched, Equals, 3) if s.versionAtLeast(2, 6) { c.Assert(r.Modified, Equals, 3) } type doc struct{ N int } var res []doc err = coll.Find(nil).Sort("n").All(&res) c.Assert(err, IsNil) c.Assert(res, DeepEquals, []doc{{2}, {3}, {4}}) } func (s *S) TestBulkUpsert(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}) c.Assert(err, IsNil) bulk := coll.Bulk() bulk.Upsert(M{"n": 2}, M{"$set": M{"n": 20}}) bulk.Upsert(M{"n": 4}, M{"$set": M{"n": 40}}, M{"n": 3}, M{"$set": M{"n": 30}}) r, err := bulk.Run() c.Assert(err, IsNil) c.Assert(r, FitsTypeOf, &mgo.BulkResult{}) type doc struct{ N int } var res []doc err = coll.Find(nil).Sort("n").All(&res) c.Assert(err, IsNil) c.Assert(res, DeepEquals, []doc{{1}, {20}, {30}, {40}}) } charm-2.1.1/src/gopkg.in/mgo.v2/server.go0000664000175000017500000002735212672604565017071 0ustar marcomarco// mgo - MongoDB driver for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo import ( "errors" "net" "sort" "sync" "time" "gopkg.in/mgo.v2/bson" ) // --------------------------------------------------------------------------- // Mongo server encapsulation. type mongoServer struct { sync.RWMutex Addr string ResolvedAddr string tcpaddr *net.TCPAddr unusedSockets []*mongoSocket liveSockets []*mongoSocket closed bool abended bool sync chan bool dial dialer pingValue time.Duration pingIndex int pingCount uint32 pingWindow [6]time.Duration info *mongoServerInfo } type dialer struct { old func(addr net.Addr) (net.Conn, error) new func(addr *ServerAddr) (net.Conn, error) } func (dial dialer) isSet() bool { return dial.old != nil || dial.new != nil } type mongoServerInfo struct { Master bool Mongos bool Tags bson.D MaxWireVersion int SetName string } var defaultServerInfo mongoServerInfo func newServer(addr string, tcpaddr *net.TCPAddr, sync chan bool, dial dialer) *mongoServer { server := &mongoServer{ Addr: addr, ResolvedAddr: tcpaddr.String(), tcpaddr: tcpaddr, sync: sync, dial: dial, info: &defaultServerInfo, pingValue: time.Hour, // Push it back before an actual ping. } go server.pinger(true) return server } var errPoolLimit = errors.New("per-server connection limit reached") var errServerClosed = errors.New("server was closed") // AcquireSocket returns a socket for communicating with the server. // This will attempt to reuse an old connection, if one is available. Otherwise, // it will establish a new one. The returned socket is owned by the call site, // and will return to the cache when the socket has its Release method called // the same number of times as AcquireSocket + Acquire were called for it. // If the poolLimit argument is greater than zero and the number of sockets in // use in this server is greater than the provided limit, errPoolLimit is // returned. func (server *mongoServer) AcquireSocket(poolLimit int, timeout time.Duration) (socket *mongoSocket, abended bool, err error) { for { server.Lock() abended = server.abended if server.closed { server.Unlock() return nil, abended, errServerClosed } n := len(server.unusedSockets) if poolLimit > 0 && len(server.liveSockets)-n >= poolLimit { server.Unlock() return nil, false, errPoolLimit } if n > 0 { socket = server.unusedSockets[n-1] server.unusedSockets[n-1] = nil // Help GC. server.unusedSockets = server.unusedSockets[:n-1] info := server.info server.Unlock() err = socket.InitialAcquire(info, timeout) if err != nil { continue } } else { server.Unlock() socket, err = server.Connect(timeout) if err == nil { server.Lock() // We've waited for the Connect, see if we got // closed in the meantime if server.closed { server.Unlock() socket.Release() socket.Close() return nil, abended, errServerClosed } server.liveSockets = append(server.liveSockets, socket) server.Unlock() } } return } panic("unreachable") } // Connect establishes a new connection to the server. This should // generally be done through server.AcquireSocket(). func (server *mongoServer) Connect(timeout time.Duration) (*mongoSocket, error) { server.RLock() master := server.info.Master dial := server.dial server.RUnlock() logf("Establishing new connection to %s (timeout=%s)...", server.Addr, timeout) var conn net.Conn var err error switch { case !dial.isSet(): // Cannot do this because it lacks timeout support. :-( //conn, err = net.DialTCP("tcp", nil, server.tcpaddr) conn, err = net.DialTimeout("tcp", server.ResolvedAddr, timeout) case dial.old != nil: conn, err = dial.old(server.tcpaddr) case dial.new != nil: conn, err = dial.new(&ServerAddr{server.Addr, server.tcpaddr}) default: panic("dialer is set, but both dial.old and dial.new are nil") } if err != nil { logf("Connection to %s failed: %v", server.Addr, err.Error()) return nil, err } logf("Connection to %s established.", server.Addr) stats.conn(+1, master) return newSocket(server, conn, timeout), nil } // Close forces closing all sockets that are alive, whether // they're currently in use or not. func (server *mongoServer) Close() { server.Lock() server.closed = true liveSockets := server.liveSockets unusedSockets := server.unusedSockets server.liveSockets = nil server.unusedSockets = nil server.Unlock() logf("Connections to %s closing (%d live sockets).", server.Addr, len(liveSockets)) for i, s := range liveSockets { s.Close() liveSockets[i] = nil } for i := range unusedSockets { unusedSockets[i] = nil } } // RecycleSocket puts socket back into the unused cache. func (server *mongoServer) RecycleSocket(socket *mongoSocket) { server.Lock() if !server.closed { server.unusedSockets = append(server.unusedSockets, socket) } server.Unlock() } func removeSocket(sockets []*mongoSocket, socket *mongoSocket) []*mongoSocket { for i, s := range sockets { if s == socket { copy(sockets[i:], sockets[i+1:]) n := len(sockets) - 1 sockets[n] = nil sockets = sockets[:n] break } } return sockets } // AbendSocket notifies the server that the given socket has terminated // abnormally, and thus should be discarded rather than cached. func (server *mongoServer) AbendSocket(socket *mongoSocket) { server.Lock() server.abended = true if server.closed { server.Unlock() return } server.liveSockets = removeSocket(server.liveSockets, socket) server.unusedSockets = removeSocket(server.unusedSockets, socket) server.Unlock() // Maybe just a timeout, but suggest a cluster sync up just in case. select { case server.sync <- true: default: } } func (server *mongoServer) SetInfo(info *mongoServerInfo) { server.Lock() server.info = info server.Unlock() } func (server *mongoServer) Info() *mongoServerInfo { server.Lock() info := server.info server.Unlock() return info } func (server *mongoServer) hasTags(serverTags []bson.D) bool { NextTagSet: for _, tags := range serverTags { NextReqTag: for _, req := range tags { for _, has := range server.info.Tags { if req.Name == has.Name { if req.Value == has.Value { continue NextReqTag } continue NextTagSet } } continue NextTagSet } return true } return false } var pingDelay = 15 * time.Second func (server *mongoServer) pinger(loop bool) { var delay time.Duration if raceDetector { // This variable is only ever touched by tests. globalMutex.Lock() delay = pingDelay globalMutex.Unlock() } else { delay = pingDelay } op := queryOp{ collection: "admin.$cmd", query: bson.D{{"ping", 1}}, flags: flagSlaveOk, limit: -1, } for { if loop { time.Sleep(delay) } op := op socket, _, err := server.AcquireSocket(0, delay) if err == nil { start := time.Now() _, _ = socket.SimpleQuery(&op) delay := time.Now().Sub(start) server.pingWindow[server.pingIndex] = delay server.pingIndex = (server.pingIndex + 1) % len(server.pingWindow) server.pingCount++ var max time.Duration for i := 0; i < len(server.pingWindow) && uint32(i) < server.pingCount; i++ { if server.pingWindow[i] > max { max = server.pingWindow[i] } } socket.Release() server.Lock() if server.closed { loop = false } server.pingValue = max server.Unlock() logf("Ping for %s is %d ms", server.Addr, max/time.Millisecond) } else if err == errServerClosed { return } if !loop { return } } } type mongoServerSlice []*mongoServer func (s mongoServerSlice) Len() int { return len(s) } func (s mongoServerSlice) Less(i, j int) bool { return s[i].ResolvedAddr < s[j].ResolvedAddr } func (s mongoServerSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s mongoServerSlice) Sort() { sort.Sort(s) } func (s mongoServerSlice) Search(resolvedAddr string) (i int, ok bool) { n := len(s) i = sort.Search(n, func(i int) bool { return s[i].ResolvedAddr >= resolvedAddr }) return i, i != n && s[i].ResolvedAddr == resolvedAddr } type mongoServers struct { slice mongoServerSlice } func (servers *mongoServers) Search(resolvedAddr string) (server *mongoServer) { if i, ok := servers.slice.Search(resolvedAddr); ok { return servers.slice[i] } return nil } func (servers *mongoServers) Add(server *mongoServer) { servers.slice = append(servers.slice, server) servers.slice.Sort() } func (servers *mongoServers) Remove(other *mongoServer) (server *mongoServer) { if i, found := servers.slice.Search(other.ResolvedAddr); found { server = servers.slice[i] copy(servers.slice[i:], servers.slice[i+1:]) n := len(servers.slice) - 1 servers.slice[n] = nil // Help GC. servers.slice = servers.slice[:n] } return } func (servers *mongoServers) Slice() []*mongoServer { return ([]*mongoServer)(servers.slice) } func (servers *mongoServers) Get(i int) *mongoServer { return servers.slice[i] } func (servers *mongoServers) Len() int { return len(servers.slice) } func (servers *mongoServers) Empty() bool { return len(servers.slice) == 0 } // BestFit returns the best guess of what would be the most interesting // server to perform operations on at this point in time. func (servers *mongoServers) BestFit(mode Mode, serverTags []bson.D) *mongoServer { var best *mongoServer for _, next := range servers.slice { if best == nil { best = next best.RLock() if serverTags != nil && !next.info.Mongos && !best.hasTags(serverTags) { best.RUnlock() best = nil } continue } next.RLock() swap := false switch { case serverTags != nil && !next.info.Mongos && !next.hasTags(serverTags): // Must have requested tags. case next.info.Master != best.info.Master && mode != Nearest: // Prefer slaves, unless the mode is PrimaryPreferred. swap = (mode == PrimaryPreferred) != best.info.Master case absDuration(next.pingValue-best.pingValue) > 15*time.Millisecond: // Prefer nearest server. swap = next.pingValue < best.pingValue case len(next.liveSockets)-len(next.unusedSockets) < len(best.liveSockets)-len(best.unusedSockets): // Prefer servers with less connections. swap = true } if swap { best.RUnlock() best = next } else { next.RUnlock() } } if best != nil { best.RUnlock() } return best } func absDuration(d time.Duration) time.Duration { if d < 0 { return -d } return d } charm-2.1.1/src/gopkg.in/macaroon.v1/0000775000175000017500000000000012672604513016230 5ustar marcomarcocharm-2.1.1/src/gopkg.in/macaroon.v1/TODO0000664000175000017500000000013712672604513016721 0ustar marcomarcomacaroon: - verify that all signature calculations to correspond exactly with libmacaroons. charm-2.1.1/src/gopkg.in/macaroon.v1/export_test.go0000664000175000017500000000071212672604513021137 0ustar marcomarcopackage macaroon // Data returns the macaroon's data. func (m *Macaroon) Data() []byte { return m.data } // AddThirdPartyCaveatWithRand adds a third-party caveat to the macaroon, using // the given source of randomness for encrypting the caveat id. var AddThirdPartyCaveatWithRand = (*Macaroon).addThirdPartyCaveatWithRand // MaxPacketLen is the maximum allowed length of a packet in the macaroon // serialization format. var MaxPacketLen = maxPacketLen charm-2.1.1/src/gopkg.in/macaroon.v1/marshal_test.go0000664000175000017500000000530612672604513021251 0ustar marcomarcopackage macaroon_test import ( gc "gopkg.in/check.v1" "gopkg.in/macaroon.v1" ) type marshalSuite struct{} var _ = gc.Suite(&marshalSuite{}) func (*marshalSuite) TestMarshalUnmarshalMacaroon(c *gc.C) { rootKey := []byte("secret") m := MustNew(rootKey, "some id", "a location") err := m.AddFirstPartyCaveat("a caveat") c.Assert(err, gc.IsNil) b, err := m.MarshalBinary() c.Assert(err, gc.IsNil) unmarshaledM := &macaroon.Macaroon{} err = unmarshaledM.UnmarshalBinary(b) c.Assert(err, gc.IsNil) c.Assert(m.Location(), gc.Equals, unmarshaledM.Location()) c.Assert(m.Id(), gc.Equals, unmarshaledM.Id()) c.Assert(m.Signature(), gc.DeepEquals, unmarshaledM.Signature()) c.Assert(m.Caveats(), gc.DeepEquals, unmarshaledM.Caveats()) c.Assert(m, gc.DeepEquals, unmarshaledM) } func (*marshalSuite) TestMarshalUnmarshalSlice(c *gc.C) { rootKey := []byte("secret") m1 := MustNew(rootKey, "some id", "a location") m2 := MustNew(rootKey, "some other id", "another location") err := m1.AddFirstPartyCaveat("a caveat") c.Assert(err, gc.IsNil) err = m2.AddFirstPartyCaveat("another caveat") c.Assert(err, gc.IsNil) macaroons := macaroon.Slice{m1, m2} b, err := macaroons.MarshalBinary() c.Assert(err, gc.IsNil) var unmarshaledMacs macaroon.Slice err = unmarshaledMacs.UnmarshalBinary(b) c.Assert(err, gc.IsNil) c.Assert(unmarshaledMacs, gc.HasLen, len(macaroons)) for i, m := range macaroons { c.Assert(m.Location(), gc.Equals, unmarshaledMacs[i].Location()) c.Assert(m.Id(), gc.Equals, unmarshaledMacs[i].Id()) c.Assert(m.Signature(), gc.DeepEquals, unmarshaledMacs[i].Signature()) c.Assert(m.Caveats(), gc.DeepEquals, unmarshaledMacs[i].Caveats()) } c.Assert(macaroons, gc.DeepEquals, unmarshaledMacs) // The unmarshaled macaroons share the same underlying data // slice, so check that appending a caveat to the first does not // affect the second. for i := 0; i < 10; i++ { err = unmarshaledMacs[0].AddFirstPartyCaveat("caveat") c.Assert(err, gc.IsNil) } c.Assert(unmarshaledMacs[1], gc.DeepEquals, macaroons[1]) c.Assert(err, gc.IsNil) } func (*marshalSuite) TestSliceRoundtrip(c *gc.C) { rootKey := []byte("secret") m1 := MustNew(rootKey, "some id", "a location") m2 := MustNew(rootKey, "some other id", "another location") err := m1.AddFirstPartyCaveat("a caveat") c.Assert(err, gc.IsNil) err = m2.AddFirstPartyCaveat("another caveat") c.Assert(err, gc.IsNil) macaroons := macaroon.Slice{m1, m2} b, err := macaroons.MarshalBinary() c.Assert(err, gc.IsNil) var unmarshaledMacs macaroon.Slice err = unmarshaledMacs.UnmarshalBinary(b) c.Assert(err, gc.IsNil) marshaledMacs, err := unmarshaledMacs.MarshalBinary() c.Assert(err, gc.IsNil) c.Assert(b, gc.DeepEquals, marshaledMacs) } charm-2.1.1/src/gopkg.in/macaroon.v1/LICENSE0000664000175000017500000000274512672604513017245 0ustar marcomarcoCopyright © 2014, Roger Peppe All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of this project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. charm-2.1.1/src/gopkg.in/macaroon.v1/macaroon.go0000664000175000017500000002152512672604513020363 0ustar marcomarco// The macaroon package implements macaroons as described in // the paper "Macaroons: Cookies with Contextual Caveats for // Decentralized Authorization in the Cloud" // (http://theory.stanford.edu/~ataly/Papers/macaroons.pdf) // // See the macaroon bakery packages at http://godoc.org/gopkg.in/macaroon-bakery.v0 // for higher level services and operations that use macaroons. package macaroon import ( "bytes" "crypto/hmac" "crypto/rand" "fmt" "io" ) // Macaroon holds a macaroon. // See Fig. 7 of http://theory.stanford.edu/~ataly/Papers/macaroons.pdf // for a description of the data contained within. // Macaroons are mutable objects - use Clone as appropriate // to avoid unwanted mutation. type Macaroon struct { // data holds the binary-marshalled form // of the macaroon data. data []byte location packet id packet caveats []caveat sig [hashLen]byte } // caveat holds a first person or third party caveat. type caveat struct { location packet caveatId packet verificationId packet } type Caveat struct { Id string Location string } // isThirdParty reports whether the caveat must be satisfied // by some third party (if not, it's a first person caveat). func (cav *caveat) isThirdParty() bool { return cav.verificationId.len() > 0 } // New returns a new macaroon with the given root key, // identifier and location. func New(rootKey []byte, id, loc string) (*Macaroon, error) { var m Macaroon if err := m.init(id, loc); err != nil { return nil, err } derivedKey := makeKey(rootKey) m.sig = *keyedHash(derivedKey, m.dataBytes(m.id)) return &m, nil } func (m *Macaroon) init(id, loc string) error { var ok bool m.location, ok = m.appendPacket(fieldLocation, []byte(loc)) if !ok { return fmt.Errorf("macaroon location too big") } m.id, ok = m.appendPacket(fieldIdentifier, []byte(id)) if !ok { return fmt.Errorf("macaroon identifier too big") } return nil } // Clone returns a copy of the receiving macaroon. func (m *Macaroon) Clone() *Macaroon { m1 := *m // Ensure that if any data is appended to the new // macaroon, it will copy data and caveats. m1.data = m1.data[0:len(m1.data):len(m1.data)] m1.caveats = m1.caveats[0:len(m1.caveats):len(m1.caveats)] return &m1 } // Location returns the macaroon's location hint. This is // not verified as part of the macaroon. func (m *Macaroon) Location() string { return m.dataStr(m.location) } // Id returns the id of the macaroon. This can hold // arbitrary information. func (m *Macaroon) Id() string { return m.dataStr(m.id) } // Signature returns the macaroon's signature. func (m *Macaroon) Signature() []byte { // sig := m.sig // return sig[:] // Work around https://github.com/golang/go/issues/9537 sig := new([hashLen]byte) *sig = m.sig return sig[:] } // Caveats returns the macaroon's caveats. // This method will probably change, and it's important not to change the returned caveat. func (m *Macaroon) Caveats() []Caveat { caveats := make([]Caveat, len(m.caveats)) for i, cav := range m.caveats { caveats[i] = Caveat{ Id: m.dataStr(cav.caveatId), Location: m.dataStr(cav.location), } } return caveats } // appendCaveat appends a caveat without modifying the macaroon's signature. func (m *Macaroon) appendCaveat(caveatId string, verificationId []byte, loc string) (*caveat, error) { var cav caveat var ok bool if caveatId != "" { cav.caveatId, ok = m.appendPacket(fieldCaveatId, []byte(caveatId)) if !ok { return nil, fmt.Errorf("caveat identifier too big") } } if len(verificationId) > 0 { cav.verificationId, ok = m.appendPacket(fieldVerificationId, verificationId) if !ok { return nil, fmt.Errorf("caveat verification id too big") } } if loc != "" { cav.location, ok = m.appendPacket(fieldCaveatLocation, []byte(loc)) if !ok { return nil, fmt.Errorf("caveat location too big") } } m.caveats = append(m.caveats, cav) return &m.caveats[len(m.caveats)-1], nil } func (m *Macaroon) addCaveat(caveatId string, verificationId []byte, loc string) error { cav, err := m.appendCaveat(caveatId, verificationId, loc) if err != nil { return err } m.sig = *keyedHash2(&m.sig, m.dataBytes(cav.verificationId), m.dataBytes(cav.caveatId)) return nil } func keyedHash2(key *[keyLen]byte, d1, d2 []byte) *[hashLen]byte { if len(d1) == 0 { return keyedHash(key, d2) } var data [hashLen * 2]byte copy(data[0:], keyedHash(key, d1)[:]) copy(data[hashLen:], keyedHash(key, d2)[:]) return keyedHash(key, data[:]) } // Bind prepares the macaroon for being used to discharge the // macaroon with the given signature sig. This must be // used before it is used in the discharges argument to Verify. func (m *Macaroon) Bind(sig []byte) { m.sig = *bindForRequest(sig, &m.sig) } // AddFirstPartyCaveat adds a caveat that will be verified // by the target service. func (m *Macaroon) AddFirstPartyCaveat(caveatId string) error { return m.addCaveat(caveatId, nil, "") } // AddThirdPartyCaveat adds a third-party caveat to the macaroon, // using the given shared root key, caveat id and location hint. // The caveat id should encode the root key in some // way, either by encrypting it with a key known to the third party // or by holding a reference to it stored in the third party's // storage. func (m *Macaroon) AddThirdPartyCaveat(rootKey []byte, caveatId string, loc string) error { return m.addThirdPartyCaveatWithRand(rootKey, caveatId, loc, rand.Reader) } func (m *Macaroon) addThirdPartyCaveatWithRand(rootKey []byte, caveatId string, loc string, r io.Reader) error { derivedKey := makeKey(rootKey) verificationId, err := encrypt(&m.sig, derivedKey, r) if err != nil { return err } return m.addCaveat(caveatId, verificationId, loc) } var zeroKey [hashLen]byte // bindForRequest binds the given macaroon // to the given signature of its parent macaroon. func bindForRequest(rootSig []byte, dischargeSig *[hashLen]byte) *[hashLen]byte { if bytes.Equal(rootSig, dischargeSig[:]) { return dischargeSig } return keyedHash2(&zeroKey, rootSig, dischargeSig[:]) } // Verify verifies that the receiving macaroon is valid. // The root key must be the same that the macaroon was originally // minted with. The check function is called to verify each // first-party caveat - it should return an error if the // condition is not met. // // The discharge macaroons should be provided in discharges. // // Verify returns nil if the verification succeeds. func (m *Macaroon) Verify(rootKey []byte, check func(caveat string) error, discharges []*Macaroon) error { derivedKey := makeKey(rootKey) // TODO(rog) consider distinguishing between classes of // check error - some errors may be resolved by minting // a new macaroon; others may not. used := make([]int, len(discharges)) if err := m.verify(&m.sig, derivedKey, check, discharges, used); err != nil { return err } for i, dm := range discharges { switch used[i] { case 0: return fmt.Errorf("discharge macaroon %q was not used", dm.Id()) case 1: continue default: // Should be impossible because of check in verify, but be defensive. return fmt.Errorf("discharge macaroon %q was used more than once", dm.Id()) } } return nil } func (m *Macaroon) verify(rootSig *[hashLen]byte, rootKey *[hashLen]byte, check func(caveat string) error, discharges []*Macaroon, used []int) error { caveatSig := keyedHash(rootKey, m.dataBytes(m.id)) for i, cav := range m.caveats { if cav.isThirdParty() { cavKey, err := decrypt(caveatSig, m.dataBytes(cav.verificationId)) if err != nil { return fmt.Errorf("failed to decrypt caveat %d signature: %v", i, err) } // We choose an arbitrary error from one of the // possible discharge macaroon verifications // if there's more than one discharge macaroon // with the required id. found := false for di, dm := range discharges { if !bytes.Equal(dm.dataBytes(dm.id), m.dataBytes(cav.caveatId)) { continue } found = true // It's important that we do this before calling verify, // as it prevents potentially infinite recursion. if used[di]++; used[di] > 1 { return fmt.Errorf("discharge macaroon %q was used more than once", dm.Id()) } if err := dm.verify(rootSig, cavKey, check, discharges, used); err != nil { return err } break } if !found { return fmt.Errorf("cannot find discharge macaroon for caveat %q", m.dataBytes(cav.caveatId)) } } else { if err := check(string(m.dataBytes(cav.caveatId))); err != nil { return err } } caveatSig = keyedHash2(caveatSig, m.dataBytes(cav.verificationId), m.dataBytes(cav.caveatId)) } // TODO perhaps we should actually do this check before doing // all the potentially expensive caveat checks. boundSig := bindForRequest(rootSig[:], caveatSig) if !hmac.Equal(boundSig[:], m.sig[:]) { return fmt.Errorf("signature mismatch after caveat verification") } return nil } type Verifier interface { Verify(m *Macaroon, rootKey []byte) (bool, error) } charm-2.1.1/src/gopkg.in/macaroon.v1/crypto.go0000664000175000017500000000412412672604513020100 0ustar marcomarcopackage macaroon import ( "crypto/hmac" "crypto/sha256" "fmt" "hash" "io" "golang.org/x/crypto/nacl/secretbox" ) func keyedHash(key *[hashLen]byte, text []byte) *[hashLen]byte { h := keyedHasher(key) h.Write([]byte(text)) var sum [hashLen]byte hashSum(h, &sum) return &sum } func keyedHasher(key *[hashLen]byte) hash.Hash { return hmac.New(sha256.New, key[:]) } var keyGen = []byte("macaroons-key-generator") // makeKey derives a fixed length key from a variable // length key. The keyGen constant is the same // as that used in libmacaroons. func makeKey(variableKey []byte) *[keyLen]byte { h := hmac.New(sha256.New, keyGen) h.Write(variableKey) var key [keyLen]byte hashSum(h, &key) return &key } // hashSum calls h.Sum to put the sum into // the given destination. It also sanity // checks that the result really is the expected // size. func hashSum(h hash.Hash, dest *[hashLen]byte) { r := h.Sum(dest[:0]) if len(r) != len(dest) { panic("hash size inconsistency") } } const ( keyLen = 32 nonceLen = 24 hashLen = sha256.Size ) func newNonce(r io.Reader) (*[nonceLen]byte, error) { var nonce [nonceLen]byte _, err := r.Read(nonce[:]) if err != nil { return nil, fmt.Errorf("cannot generate random bytes: %v", err) } return &nonce, nil } func encrypt(key *[keyLen]byte, text *[hashLen]byte, r io.Reader) ([]byte, error) { nonce, err := newNonce(r) if err != nil { return nil, err } out := make([]byte, 0, len(nonce)+secretbox.Overhead+len(text)) out = append(out, nonce[:]...) return secretbox.Seal(out, text[:], nonce, key), nil } func decrypt(key *[keyLen]byte, ciphertext []byte) (*[hashLen]byte, error) { if len(ciphertext) < nonceLen+secretbox.Overhead { return nil, fmt.Errorf("message too short") } var nonce [nonceLen]byte copy(nonce[:], ciphertext) ciphertext = ciphertext[nonceLen:] text, ok := secretbox.Open(nil, ciphertext, &nonce, key) if !ok { return nil, fmt.Errorf("decryption failure") } if len(text) != hashLen { return nil, fmt.Errorf("decrypted text is wrong length") } var rtext [hashLen]byte copy(rtext[:], text) return &rtext, nil } charm-2.1.1/src/gopkg.in/macaroon.v1/macaroon_test.go0000664000175000017500000004121312672604513021416 0ustar marcomarcopackage macaroon_test import ( "crypto/rand" "encoding/base64" "encoding/hex" "encoding/json" "fmt" "testing" gc "gopkg.in/check.v1" "gopkg.in/macaroon.v1" ) func TestPackage(t *testing.T) { gc.TestingT(t) } type macaroonSuite struct{} var _ = gc.Suite(&macaroonSuite{}) func never(string) error { return fmt.Errorf("condition is never true") } func (*macaroonSuite) TestNoCaveats(c *gc.C) { rootKey := []byte("secret") m := MustNew(rootKey, "some id", "a location") c.Assert(m.Location(), gc.Equals, "a location") c.Assert(m.Id(), gc.Equals, "some id") err := m.Verify(rootKey, never, nil) c.Assert(err, gc.IsNil) } func (*macaroonSuite) TestFirstPartyCaveat(c *gc.C) { rootKey := []byte("secret") m := MustNew(rootKey, "some id", "a location") caveats := map[string]bool{ "a caveat": true, "another caveat": true, } tested := make(map[string]bool) for cav := range caveats { m.AddFirstPartyCaveat(cav) } expectErr := fmt.Errorf("condition not met") check := func(cav string) error { tested[cav] = true if caveats[cav] { return nil } return expectErr } err := m.Verify(rootKey, check, nil) c.Assert(err, gc.IsNil) c.Assert(tested, gc.DeepEquals, caveats) m.AddFirstPartyCaveat("not met") err = m.Verify(rootKey, check, nil) c.Assert(err, gc.Equals, expectErr) c.Assert(tested["not met"], gc.Equals, true) } func (*macaroonSuite) TestThirdPartyCaveat(c *gc.C) { rootKey := []byte("secret") m := MustNew(rootKey, "some id", "a location") dischargeRootKey := []byte("shared root key") thirdPartyCaveatId := "3rd party caveat" err := m.AddThirdPartyCaveat(dischargeRootKey, thirdPartyCaveatId, "remote.com") c.Assert(err, gc.IsNil) dm := MustNew(dischargeRootKey, thirdPartyCaveatId, "remote location") dm.Bind(m.Signature()) err = m.Verify(rootKey, never, []*macaroon.Macaroon{dm}) c.Assert(err, gc.IsNil) } func (*macaroonSuite) TestThirdPartyCaveatBadRandom(c *gc.C) { rootKey := []byte("secret") m := MustNew(rootKey, "some id", "a location") dischargeRootKey := []byte("shared root key") thirdPartyCaveatId := "3rd party caveat" err := macaroon.AddThirdPartyCaveatWithRand(m, dischargeRootKey, thirdPartyCaveatId, "remote.com", &macaroon.ErrorReader{}) c.Assert(err, gc.ErrorMatches, "cannot generate random bytes: fail") } type conditionTest struct { conditions map[string]bool expectErr string } var verifyTests = []struct { about string macaroons []macaroonSpec conditions []conditionTest }{{ about: "single third party caveat without discharge", macaroons: []macaroonSpec{{ rootKey: "root-key", id: "root-id", caveats: []caveat{{ condition: "wonderful", }, { condition: "bob-is-great", location: "bob", rootKey: "bob-caveat-root-key", }}, }}, conditions: []conditionTest{{ conditions: map[string]bool{ "wonderful": true, }, expectErr: `cannot find discharge macaroon for caveat "bob-is-great"`, }}, }, { about: "single third party caveat with discharge", macaroons: []macaroonSpec{{ rootKey: "root-key", id: "root-id", caveats: []caveat{{ condition: "wonderful", }, { condition: "bob-is-great", location: "bob", rootKey: "bob-caveat-root-key", }}, }, { location: "bob", rootKey: "bob-caveat-root-key", id: "bob-is-great", }}, conditions: []conditionTest{{ conditions: map[string]bool{ "wonderful": true, }, }, { conditions: map[string]bool{ "wonderful": false, }, expectErr: `condition "wonderful" not met`, }}, }, { about: "single third party caveat with discharge with mismatching root key", macaroons: []macaroonSpec{{ rootKey: "root-key", id: "root-id", caveats: []caveat{{ condition: "wonderful", }, { condition: "bob-is-great", location: "bob", rootKey: "bob-caveat-root-key", }}, }, { location: "bob", rootKey: "bob-caveat-root-key-wrong", id: "bob-is-great", }}, conditions: []conditionTest{{ conditions: map[string]bool{ "wonderful": true, }, expectErr: `signature mismatch after caveat verification`, }}, }, { about: "single third party caveat with two discharges", macaroons: []macaroonSpec{{ rootKey: "root-key", id: "root-id", caveats: []caveat{{ condition: "wonderful", }, { condition: "bob-is-great", location: "bob", rootKey: "bob-caveat-root-key", }}, }, { location: "bob", rootKey: "bob-caveat-root-key", id: "bob-is-great", caveats: []caveat{{ condition: "splendid", }}, }, { location: "bob", rootKey: "bob-caveat-root-key", id: "bob-is-great", caveats: []caveat{{ condition: "top of the world", }}, }}, conditions: []conditionTest{{ conditions: map[string]bool{ "wonderful": true, }, expectErr: `condition "splendid" not met`, }, { conditions: map[string]bool{ "wonderful": true, "splendid": true, "top of the world": true, }, expectErr: `discharge macaroon "bob-is-great" was not used`, }, { conditions: map[string]bool{ "wonderful": true, "splendid": false, "top of the world": true, }, expectErr: `condition "splendid" not met`, }, { conditions: map[string]bool{ "wonderful": true, "splendid": true, "top of the world": false, }, expectErr: `discharge macaroon "bob-is-great" was not used`, }}, }, { about: "one discharge used for two macaroons", macaroons: []macaroonSpec{{ rootKey: "root-key", id: "root-id", caveats: []caveat{{ condition: "somewhere else", location: "bob", rootKey: "bob-caveat-root-key", }, { condition: "bob-is-great", location: "charlie", rootKey: "bob-caveat-root-key", }}, }, { location: "bob", rootKey: "bob-caveat-root-key", id: "somewhere else", caveats: []caveat{{ condition: "bob-is-great", location: "charlie", rootKey: "bob-caveat-root-key", }}, }, { location: "bob", rootKey: "bob-caveat-root-key", id: "bob-is-great", }}, conditions: []conditionTest{{ expectErr: `discharge macaroon "bob-is-great" was used more than once`, }}, }, { about: "recursive third party caveat", macaroons: []macaroonSpec{{ rootKey: "root-key", id: "root-id", caveats: []caveat{{ condition: "bob-is-great", location: "bob", rootKey: "bob-caveat-root-key", }}, }, { location: "bob", rootKey: "bob-caveat-root-key", id: "bob-is-great", caveats: []caveat{{ condition: "bob-is-great", location: "charlie", rootKey: "bob-caveat-root-key", }}, }}, conditions: []conditionTest{{ expectErr: `discharge macaroon "bob-is-great" was used more than once`, }}, }, { about: "two third party caveats", macaroons: []macaroonSpec{{ rootKey: "root-key", id: "root-id", caveats: []caveat{{ condition: "wonderful", }, { condition: "bob-is-great", location: "bob", rootKey: "bob-caveat-root-key", }, { condition: "charlie-is-great", location: "charlie", rootKey: "charlie-caveat-root-key", }}, }, { location: "bob", rootKey: "bob-caveat-root-key", id: "bob-is-great", caveats: []caveat{{ condition: "splendid", }}, }, { location: "charlie", rootKey: "charlie-caveat-root-key", id: "charlie-is-great", caveats: []caveat{{ condition: "top of the world", }}, }}, conditions: []conditionTest{{ conditions: map[string]bool{ "wonderful": true, "splendid": true, "top of the world": true, }, }, { conditions: map[string]bool{ "wonderful": true, "splendid": false, "top of the world": true, }, expectErr: `condition "splendid" not met`, }, { conditions: map[string]bool{ "wonderful": true, "splendid": true, "top of the world": false, }, expectErr: `condition "top of the world" not met`, }}, }, { about: "third party caveat with undischarged third party caveat", macaroons: []macaroonSpec{{ rootKey: "root-key", id: "root-id", caveats: []caveat{{ condition: "wonderful", }, { condition: "bob-is-great", location: "bob", rootKey: "bob-caveat-root-key", }}, }, { location: "bob", rootKey: "bob-caveat-root-key", id: "bob-is-great", caveats: []caveat{{ condition: "splendid", }, { condition: "barbara-is-great", location: "barbara", rootKey: "barbara-caveat-root-key", }}, }}, conditions: []conditionTest{{ conditions: map[string]bool{ "wonderful": true, "splendid": true, }, expectErr: `cannot find discharge macaroon for caveat "barbara-is-great"`, }}, }, { about: "recursive third party caveats", macaroons: recursiveThirdPartyCaveatMacaroons, conditions: []conditionTest{{ conditions: map[string]bool{ "wonderful": true, "splendid": true, "high-fiving": true, "spiffing": true, }, }, { conditions: map[string]bool{ "wonderful": true, "splendid": true, "high-fiving": false, "spiffing": true, }, expectErr: `condition "high-fiving" not met`, }}, }, { about: "unused discharge", macaroons: []macaroonSpec{{ rootKey: "root-key", id: "root-id", }, { rootKey: "other-key", id: "unused", }}, conditions: []conditionTest{{ expectErr: `discharge macaroon "unused" was not used`, }}, }} var recursiveThirdPartyCaveatMacaroons = []macaroonSpec{{ rootKey: "root-key", id: "root-id", caveats: []caveat{{ condition: "wonderful", }, { condition: "bob-is-great", location: "bob", rootKey: "bob-caveat-root-key", }, { condition: "charlie-is-great", location: "charlie", rootKey: "charlie-caveat-root-key", }}, }, { location: "bob", rootKey: "bob-caveat-root-key", id: "bob-is-great", caveats: []caveat{{ condition: "splendid", }, { condition: "barbara-is-great", location: "barbara", rootKey: "barbara-caveat-root-key", }}, }, { location: "charlie", rootKey: "charlie-caveat-root-key", id: "charlie-is-great", caveats: []caveat{{ condition: "splendid", }, { condition: "celine-is-great", location: "celine", rootKey: "celine-caveat-root-key", }}, }, { location: "barbara", rootKey: "barbara-caveat-root-key", id: "barbara-is-great", caveats: []caveat{{ condition: "spiffing", }, { condition: "ben-is-great", location: "ben", rootKey: "ben-caveat-root-key", }}, }, { location: "ben", rootKey: "ben-caveat-root-key", id: "ben-is-great", }, { location: "celine", rootKey: "celine-caveat-root-key", id: "celine-is-great", caveats: []caveat{{ condition: "high-fiving", }}, }} func (*macaroonSuite) TestVerify(c *gc.C) { for i, test := range verifyTests { c.Logf("test %d: %s", i, test.about) rootKey, primary, discharges := makeMacaroons(test.macaroons) for _, cond := range test.conditions { c.Logf("conditions %#v", cond.conditions) check := func(cav string) error { if cond.conditions[cav] { return nil } return fmt.Errorf("condition %q not met", cav) } err := primary.Verify( rootKey, check, discharges, ) if cond.expectErr != "" { c.Assert(err, gc.ErrorMatches, cond.expectErr) } else { c.Assert(err, gc.IsNil) } // Cloned macaroon should have same verify result. cloneErr := primary.Clone().Verify(rootKey, check, discharges) c.Assert(cloneErr, gc.DeepEquals, err) } } } func (*macaroonSuite) TestMarshalJSON(c *gc.C) { rootKey := []byte("secret") m0 := MustNew(rootKey, "some id", "a location") m0.AddFirstPartyCaveat("account = 3735928559") m0JSON, err := json.Marshal(m0) c.Assert(err, gc.IsNil) var m1 macaroon.Macaroon err = json.Unmarshal(m0JSON, &m1) c.Assert(err, gc.IsNil) c.Assert(m0.Location(), gc.Equals, m1.Location()) c.Assert(m0.Id(), gc.Equals, m1.Id()) c.Assert( hex.EncodeToString(m0.Signature()), gc.Equals, hex.EncodeToString(m1.Signature())) } func (*macaroonSuite) TestJSONRoundTrip(c *gc.C) { // jsonData produced from the second example in libmacaroons // example README, but with the signature tweaked to // match our current behaviour. // TODO fix that behaviour so that our signatures match. jsonData := `{"caveats":[{"cid":"account = 3735928559"},{"cid":"this was how we remind auth of key\/pred","vid":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA027FAuBYhtHwJ58FX6UlVNFtFsGxQHS7uD\/w\/dedwv4Jjw7UorCREw5rXbRqIKhr","cl":"http:\/\/auth.mybank\/"}],"location":"http:\/\/mybank\/","identifier":"we used our other secret key","signature":"6e315b0b391e8c6cc6f8d88fc22933a13430fb289b2fb613cf70f746bbe7d27d"}` var m macaroon.Macaroon err := json.Unmarshal([]byte(jsonData), &m) c.Assert(err, gc.IsNil) c.Assert(hex.EncodeToString(m.Signature()), gc.Equals, "6e315b0b391e8c6cc6f8d88fc22933a13430fb289b2fb613cf70f746bbe7d27d") data, err := m.MarshalJSON() c.Assert(err, gc.IsNil) // Check that the round-tripped data is the same as the original // data when unmarshalled into an interface{}. var got interface{} err = json.Unmarshal(data, &got) c.Assert(err, gc.IsNil) var original interface{} err = json.Unmarshal([]byte(jsonData), &original) c.Assert(err, gc.IsNil) c.Assert(got, gc.DeepEquals, original) } type caveat struct { rootKey string location string condition string } type macaroonSpec struct { rootKey string id string caveats []caveat location string } func makeMacaroons(mspecs []macaroonSpec) ( rootKey []byte, primary *macaroon.Macaroon, discharges []*macaroon.Macaroon, ) { var macaroons []*macaroon.Macaroon for _, mspec := range mspecs { macaroons = append(macaroons, makeMacaroon(mspec)) } primary = macaroons[0] discharges = macaroons[1:] for _, m := range discharges { m.Bind(primary.Signature()) } return []byte(mspecs[0].rootKey), primary, discharges } func makeMacaroon(mspec macaroonSpec) *macaroon.Macaroon { m := MustNew([]byte(mspec.rootKey), mspec.id, mspec.location) for _, cav := range mspec.caveats { if cav.location != "" { err := m.AddThirdPartyCaveat([]byte(cav.rootKey), cav.condition, cav.location) if err != nil { panic(err) } } else { m.AddFirstPartyCaveat(cav.condition) } } return m } func assertEqualMacaroons(c *gc.C, m0, m1 *macaroon.Macaroon) { m0json, err := m0.MarshalJSON() c.Assert(err, gc.IsNil) m1json, err := m1.MarshalJSON() var m0val, m1val interface{} err = json.Unmarshal(m0json, &m0val) c.Assert(err, gc.IsNil) err = json.Unmarshal(m1json, &m1val) c.Assert(err, gc.IsNil) c.Assert(m0val, gc.DeepEquals, m1val) } func (*macaroonSuite) TestBinaryRoundTrip(c *gc.C) { // Test the binary marshalling and unmarshalling of a macaroon with // first and third party caveats. rootKey := []byte("secret") m0 := MustNew(rootKey, "some id", "a location") err := m0.AddFirstPartyCaveat("first caveat") c.Assert(err, gc.IsNil) err = m0.AddFirstPartyCaveat("second caveat") c.Assert(err, gc.IsNil) err = m0.AddThirdPartyCaveat([]byte("shared root key"), "3rd party caveat", "remote.com") c.Assert(err, gc.IsNil) data, err := m0.MarshalBinary() c.Assert(err, gc.IsNil) var m1 macaroon.Macaroon err = m1.UnmarshalBinary(data) c.Assert(err, gc.IsNil) assertEqualMacaroons(c, m0, &m1) } func (*macaroonSuite) TestBinaryMarshalingAgainstLibmacaroon(c *gc.C) { // Test that a libmacaroon marshalled macaroon can be correctly unmarshaled data, err := base64.StdEncoding.DecodeString( "MDAxN2xvY2F0aW9uIHNvbWV3aGVyZQowMDEyaWRlbnRpZmllciBpZAowMDEzY2lkIGlkZW50aWZpZXIKMDA1MXZpZCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC4i9QwCgbL/wZGFvLQpsyhLOv0v6VjIo2KJv5miz+7krqCpt5EhmrL8pYO9xrhT80KMDAxM2NsIHRoaXJkIHBhcnR5CjAwMmZzaWduYXR1cmUg3BXkIDX0giAPPrgkDLbiMGYy/zsC2qPb4jU4G/dohkAK") c.Assert(err, gc.IsNil) var m0 macaroon.Macaroon err = m0.UnmarshalBinary(data) c.Assert(err, gc.IsNil) jsonData := []byte(`{"caveats":[{"cid":"identifier","vid":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAuIvUMAoGy/8GRhby0KbMoSzr9L+lYyKNiib+Zos/u5K6gqbeRIZqy/KWDvca4U/N","cl":"third party"}],"location":"somewhere","identifier":"id","signature":"dc15e42035f482200f3eb8240cb6e2306632ff3b02daa3dbe235381bf7688640"}`) var m1 macaroon.Macaroon err = m1.UnmarshalJSON(jsonData) c.Assert(err, gc.IsNil) assertEqualMacaroons(c, &m0, &m1) } func (*macaroonSuite) TestMacaroonFieldsTooBig(c *gc.C) { rootKey := []byte("secret") toobig := make([]byte, macaroon.MaxPacketLen) _, err := rand.Reader.Read(toobig) c.Assert(err, gc.IsNil) _, err = macaroon.New(rootKey, string(toobig), "a location") c.Assert(err, gc.ErrorMatches, "macaroon identifier too big") _, err = macaroon.New(rootKey, "some id", string(toobig)) c.Assert(err, gc.ErrorMatches, "macaroon location too big") m0 := MustNew(rootKey, "some id", "a location") err = m0.AddThirdPartyCaveat([]byte("shared root key"), string(toobig), "remote.com") c.Assert(err, gc.ErrorMatches, "caveat identifier too big") err = m0.AddThirdPartyCaveat([]byte("shared root key"), "3rd party caveat", string(toobig)) c.Assert(err, gc.ErrorMatches, "caveat location too big") } charm-2.1.1/src/gopkg.in/macaroon.v1/crypto_test.go0000664000175000017500000000321712672604513021141 0ustar marcomarcopackage macaroon import ( "crypto/rand" "fmt" "golang.org/x/crypto/nacl/secretbox" gc "gopkg.in/check.v1" ) type cryptoSuite struct{} var _ = gc.Suite(&cryptoSuite{}) var testCryptKey = &[hashLen]byte{'k', 'e', 'y'} var testCryptText = &[hashLen]byte{'t', 'e', 'x', 't'} func (*cryptoSuite) TestEncDec(c *gc.C) { b, err := encrypt(testCryptKey, testCryptText, rand.Reader) c.Assert(err, gc.IsNil) t, err := decrypt(testCryptKey, b) c.Assert(err, gc.IsNil) c.Assert(string(t[:]), gc.Equals, string(testCryptText[:])) } func (*cryptoSuite) TestUniqueNonces(c *gc.C) { nonces := make(map[string]struct{}) for i := 0; i < 100; i++ { nonce, err := newNonce(rand.Reader) c.Assert(err, gc.IsNil) nonces[string(nonce[:])] = struct{}{} } c.Assert(nonces, gc.HasLen, 100, gc.Commentf("duplicate nonce detected")) } type ErrorReader struct{} func (*ErrorReader) Read([]byte) (int, error) { return 0, fmt.Errorf("fail") } func (*cryptoSuite) TestBadRandom(c *gc.C) { _, err := newNonce(&ErrorReader{}) c.Assert(err, gc.ErrorMatches, "^cannot generate random bytes:.*") _, err = encrypt(testCryptKey, testCryptText, &ErrorReader{}) c.Assert(err, gc.ErrorMatches, "^cannot generate random bytes:.*") } func (*cryptoSuite) TestBadCiphertext(c *gc.C) { buf := randomBytes(nonceLen + secretbox.Overhead) for i := range buf { _, err := decrypt(testCryptKey, buf[0:i]) c.Assert(err, gc.ErrorMatches, "message too short") } _, err := decrypt(testCryptKey, buf) c.Assert(err, gc.ErrorMatches, "decryption failure") } func randomBytes(n int) []byte { buf := make([]byte, n) if _, err := rand.Reader.Read(buf); err != nil { panic(err) } return buf } charm-2.1.1/src/gopkg.in/macaroon.v1/marshal.go0000664000175000017500000001564012672604513020214 0ustar marcomarcopackage macaroon import ( "encoding/base64" "encoding/hex" "encoding/json" "fmt" ) // field names, as defined in libmacaroons const ( fieldLocation = "location" fieldIdentifier = "identifier" fieldSignature = "signature" fieldCaveatId = "cid" fieldVerificationId = "vid" fieldCaveatLocation = "cl" ) var ( fieldLocationBytes = []byte("location") fieldIdentifierBytes = []byte("identifier") fieldSignatureBytes = []byte("signature") fieldCaveatIdBytes = []byte("cid") fieldVerificationIdBytes = []byte("vid") fieldCaveatLocationBytes = []byte("cl") ) // macaroonJSON defines the JSON format for macaroons. type macaroonJSON struct { Caveats []caveatJSON `json:"caveats"` Location string `json:"location"` Identifier string `json:"identifier"` Signature string `json:"signature"` // hex-encoded } // caveatJSON defines the JSON format for caveats within a macaroon. type caveatJSON struct { CID string `json:"cid"` VID string `json:"vid,omitempty"` Location string `json:"cl,omitempty"` } // MarshalJSON implements json.Marshaler. func (m *Macaroon) MarshalJSON() ([]byte, error) { mjson := macaroonJSON{ Location: m.Location(), Identifier: m.dataStr(m.id), Signature: hex.EncodeToString(m.sig[:]), Caveats: make([]caveatJSON, len(m.caveats)), } for i, cav := range m.caveats { mjson.Caveats[i] = caveatJSON{ Location: m.dataStr(cav.location), CID: m.dataStr(cav.caveatId), VID: base64.URLEncoding.EncodeToString(m.dataBytes(cav.verificationId)), } } data, err := json.Marshal(mjson) if err != nil { return nil, fmt.Errorf("cannot marshal json data: %v", err) } return data, nil } // UnmarshalJSON implements json.Unmarshaler. func (m *Macaroon) UnmarshalJSON(jsonData []byte) error { var mjson macaroonJSON err := json.Unmarshal(jsonData, &mjson) if err != nil { return fmt.Errorf("cannot unmarshal json data: %v", err) } if err := m.init(mjson.Identifier, mjson.Location); err != nil { return err } sig, err := hex.DecodeString(mjson.Signature) if err != nil { return fmt.Errorf("cannot decode macaroon signature %q: %v", m.sig, err) } if len(sig) != hashLen { return fmt.Errorf("signature has unexpected length %d", len(sig)) } copy(m.sig[:], sig) m.caveats = m.caveats[:0] for _, cav := range mjson.Caveats { vid, err := base64Decode(cav.VID) if err != nil { return fmt.Errorf("cannot decode verification id %q: %v", cav.VID, err) } if _, err := m.appendCaveat(cav.CID, vid, cav.Location); err != nil { return err } } return nil } // MarshalBinary implements encoding.BinaryMarshaler. func (m *Macaroon) MarshalBinary() ([]byte, error) { data := make([]byte, 0, m.marshalBinaryLen()) return m.appendBinary(data) } // The binary format of a macaroon is as follows. // Each identifier repesents a packet. // // location // identifier // ( // caveatId? // verificationId? // caveatLocation? // )* // signature // unmarshalBinaryNoCopy is the internal implementation of // UnmarshalBinary. It differs in that it does not copy the // data. func (m *Macaroon) unmarshalBinaryNoCopy(data []byte) error { m.data = data var err error var start int start, m.location, err = m.expectPacket(0, fieldLocation) if err != nil { return err } start, m.id, err = m.expectPacket(start, fieldIdentifier) if err != nil { return err } var cav caveat for { p, err := m.parsePacket(start) if err != nil { return err } start += p.len() switch field := string(m.fieldName(p)); field { case fieldSignature: // At the end of the caveats we find the signature. if cav.caveatId.len() != 0 { m.caveats = append(m.caveats, cav) } // Remove the signature from data. m.data = m.data[0:p.start] sig := m.dataBytes(p) if len(sig) != hashLen { return fmt.Errorf("signature has unexpected length %d", len(sig)) } copy(m.sig[:], sig) return nil case fieldCaveatId: if cav.caveatId.len() != 0 { m.caveats = append(m.caveats, cav) } cav.caveatId = p case fieldVerificationId: if cav.verificationId.len() != 0 { return fmt.Errorf("repeated field %q in caveat", fieldVerificationId) } cav.verificationId = p case fieldCaveatLocation: if cav.location.len() != 0 { return fmt.Errorf("repeated field %q in caveat", fieldLocation) } cav.location = p default: return fmt.Errorf("unexpected field %q", field) } } } // UnmarshalBinary implements encoding.BinaryUnmarshaler. func (m *Macaroon) UnmarshalBinary(data []byte) error { data = append([]byte(nil), data...) return m.unmarshalBinaryNoCopy(data) } func (m *Macaroon) expectPacket(start int, kind string) (int, packet, error) { p, err := m.parsePacket(start) if err != nil { return 0, packet{}, err } if field := string(m.fieldName(p)); field != kind { return 0, packet{}, fmt.Errorf("unexpected field %q; expected %s", field, kind) } return start + p.len(), p, nil } func (m *Macaroon) appendBinary(data []byte) ([]byte, error) { data = append(data, m.data...) data, _, ok := rawAppendPacket(data, fieldSignature, m.sig[:]) if !ok { return nil, fmt.Errorf("failed to append signature to macaroon, packet is too long") } return data, nil } func (m *Macaroon) marshalBinaryLen() int { return len(m.data) + packetSize(fieldSignature, m.sig[:]) } // Slice defines a collection of macaroons. By convention, the // first macaroon in the slice is a primary macaroon and the rest // are discharges for its third party caveats. type Slice []*Macaroon // MarshalBinary implements encoding.BinaryMarshaler. func (s Slice) MarshalBinary() ([]byte, error) { size := 0 for _, m := range s { size += m.marshalBinaryLen() } data := make([]byte, 0, size) var err error for _, m := range s { data, err = m.appendBinary(data) if err != nil { return nil, fmt.Errorf("failed to marshal macaroon %q: %v", m.Id(), err) } } return data, nil } // UnmarshalBinary implements encoding.BinaryUnmarshaler. func (s *Slice) UnmarshalBinary(data []byte) error { data = append([]byte(nil), data...) *s = (*s)[:0] for len(data) > 0 { var m Macaroon err := m.unmarshalBinaryNoCopy(data) if err != nil { return fmt.Errorf("cannot unmarshal macaroon: %v", err) } *s = append(*s, &m) // Prevent the macaroon from overwriting the other ones // by setting the capacity of its data. m.data = m.data[0:len(m.data):m.marshalBinaryLen()] data = data[m.marshalBinaryLen():] } return nil } // base64Decode decodes base64 data that might be missing trailing // pad characters. func base64Decode(b64String string) ([]byte, error) { paddedLen := (len(b64String) + 3) / 4 * 4 b64data := make([]byte, len(b64String), paddedLen) copy(b64data, b64String) for i := len(b64String); i < paddedLen; i++ { b64data = append(b64data, '=') } data := make([]byte, base64.URLEncoding.DecodedLen(len(b64data))) n, err := base64.URLEncoding.Decode(data, b64data) if err != nil { return nil, err } return data[0:n], nil } charm-2.1.1/src/gopkg.in/macaroon.v1/README.md0000664000175000017500000000772312672604513017520 0ustar marcomarco# macaroon -- import "gopkg.in/macaroon.v1" The macaroon package implements macaroons as described in the paper "Macaroons: Cookies with Contextual Caveats for Decentralized Authorization in the Cloud" (http://theory.stanford.edu/~ataly/Papers/macaroons.pdf) See the macaroon bakery packages at http://godoc.org/gopkg.in/macaroon-bakery.v0 for higher level services and operations that use macaroons. ## Usage #### type Caveat ```go type Caveat struct { Id string Location string } ``` #### type Macaroon ```go type Macaroon struct { } ``` Macaroon holds a macaroon. See Fig. 7 of http://theory.stanford.edu/~ataly/Papers/macaroons.pdf for a description of the data contained within. Macaroons are mutable objects - use Clone as appropriate to avoid unwanted mutation. #### func New ```go func New(rootKey []byte, id, loc string) (*Macaroon, error) ``` New returns a new macaroon with the given root key, identifier and location. #### func (*Macaroon) AddFirstPartyCaveat ```go func (m *Macaroon) AddFirstPartyCaveat(caveatId string) error ``` AddFirstPartyCaveat adds a caveat that will be verified by the target service. #### func (*Macaroon) AddThirdPartyCaveat ```go func (m *Macaroon) AddThirdPartyCaveat(rootKey []byte, caveatId string, loc string) error ``` AddThirdPartyCaveat adds a third-party caveat to the macaroon, using the given shared root key, caveat id and location hint. The caveat id should encode the root key in some way, either by encrypting it with a key known to the third party or by holding a reference to it stored in the third party's storage. #### func (*Macaroon) Bind ```go func (m *Macaroon) Bind(sig []byte) ``` Bind prepares the macaroon for being used to discharge the macaroon with the given signature sig. This must be used before it is used in the discharges argument to Verify. #### func (*Macaroon) Caveats ```go func (m *Macaroon) Caveats() []Caveat ``` Caveats returns the macaroon's caveats. This method will probably change, and it's important not to change the returned caveat. #### func (*Macaroon) Clone ```go func (m *Macaroon) Clone() *Macaroon ``` Clone returns a copy of the receiving macaroon. #### func (*Macaroon) Id ```go func (m *Macaroon) Id() string ``` Id returns the id of the macaroon. This can hold arbitrary information. #### func (*Macaroon) Location ```go func (m *Macaroon) Location() string ``` Location returns the macaroon's location hint. This is not verified as part of the macaroon. #### func (*Macaroon) MarshalBinary ```go func (m *Macaroon) MarshalBinary() ([]byte, error) ``` MarshalBinary implements encoding.BinaryMarshaler. #### func (*Macaroon) MarshalJSON ```go func (m *Macaroon) MarshalJSON() ([]byte, error) ``` MarshalJSON implements json.Marshaler. #### func (*Macaroon) Signature ```go func (m *Macaroon) Signature() []byte ``` Signature returns the macaroon's signature. #### func (*Macaroon) UnmarshalBinary ```go func (m *Macaroon) UnmarshalBinary(data []byte) error ``` UnmarshalBinary implements encoding.BinaryUnmarshaler. #### func (*Macaroon) UnmarshalJSON ```go func (m *Macaroon) UnmarshalJSON(jsonData []byte) error ``` UnmarshalJSON implements json.Unmarshaler. #### func (*Macaroon) Verify ```go func (m *Macaroon) Verify(rootKey []byte, check func(caveat string) error, discharges []*Macaroon) error ``` Verify verifies that the receiving macaroon is valid. The root key must be the same that the macaroon was originally minted with. The check function is called to verify each first-party caveat - it should return an error if the condition is not met. The discharge macaroons should be provided in discharges. Verify returns true if the verification succeeds; if returns (false, nil) if the verification fails, and (false, err) if the verification cannot be asserted (but may not be false). TODO(rog) is there a possible DOS attack that can cause this function to infinitely recurse? #### type Verifier ```go type Verifier interface { Verify(m *Macaroon, rootKey []byte) (bool, error) } ``` charm-2.1.1/src/gopkg.in/macaroon.v1/packet_test.go0000664000175000017500000001006512672604513021067 0ustar marcomarcopackage macaroon import ( "strconv" "strings" "unicode" gc "gopkg.in/check.v1" ) type packetSuite struct{} var _ = gc.Suite(&packetSuite{}) func (*packetSuite) TestAppendPacket(c *gc.C) { var m Macaroon p, ok := m.appendPacket("field", []byte("some data")) c.Assert(ok, gc.Equals, true) c.Assert(string(m.data), gc.Equals, "0014field some data\n") c.Assert(p, gc.Equals, packet{ start: 0, totalLen: 20, headerLen: 10, }) p, ok = m.appendPacket("otherfield", []byte("more and more data")) c.Assert(ok, gc.Equals, true) c.Assert(string(m.data), gc.Equals, "0014field some data\n0022otherfield more and more data\n") c.Assert(p, gc.Equals, packet{ start: 20, totalLen: 34, headerLen: 15, }) } func (*packetSuite) TestAppendPacketTooBig(c *gc.C) { var m Macaroon data := make([]byte, 65532) p, ok := m.appendPacket("field", data) c.Assert(ok, gc.Equals, false) c.Assert(p, gc.Equals, packet{}) } func (*packetSuite) TestDataBytes(c *gc.C) { var m Macaroon m.appendPacket("first", []byte("first data")) p, ok := m.appendPacket("field", []byte("some data")) c.Assert(ok, gc.Equals, true) c.Assert(string(m.dataBytes(p)), gc.Equals, "some data") } func (*packetSuite) TestPacketBytes(c *gc.C) { var m Macaroon m.appendPacket("first", []byte("first data")) p, ok := m.appendPacket("field", []byte("some data")) c.Assert(ok, gc.Equals, true) c.Assert(string(m.packetBytes(p)), gc.Equals, "0014field some data\n") } func (*packetSuite) TestFieldName(c *gc.C) { var m Macaroon m.appendPacket("first", []byte("first data")) p, ok := m.appendPacket("field", []byte("some data")) c.Assert(ok, gc.Equals, true) c.Assert(string(m.fieldName(p)), gc.Equals, "field") c.Assert(m.fieldName(packet{}), gc.HasLen, 0) } var parsePacketTests = []struct { data string start int expect packet expectErr string expectData string expectField string }{{ expectErr: "packet too short", }, { data: "0014field some data\n", start: 0, expect: packet{ start: 0, totalLen: 20, headerLen: 10, }, expectData: "some data", expectField: "field", }, { data: "0014field some data\n", start: 1, expectErr: "packet size too big", }, { data: "0014field some data\n0014field some data\n", start: 0x14, expect: packet{ start: 0x14, totalLen: 20, headerLen: 10, }, expectData: "some data", expectField: "field", }, { data: "0014fieldwithoutanyspaceordata\n", start: 0, expectErr: "cannot parse field name", }, { data: "fedcsomefield " + strings.Repeat("x", 0xfedc-len("0000somefield \n")) + "\n", start: 0, expect: packet{ start: 0, totalLen: 0xfedc, headerLen: 14, }, expectData: strings.Repeat("x", 0xfedc-len("0000somefield \n")), expectField: "somefield", }, { data: "zzzzbadpacketsizenomacaroon", start: 0, expectErr: "cannot parse size", }} func (*packetSuite) TestParsePacket(c *gc.C) { for i, test := range parsePacketTests { c.Logf("test %d: %q", i, truncate(test.data)) m := Macaroon{ data: []byte(test.data), } p, err := m.parsePacket(test.start) if test.expectErr != "" { c.Assert(err, gc.ErrorMatches, test.expectErr) c.Assert(p, gc.Equals, packet{}) continue } c.Assert(err, gc.IsNil) c.Assert(p, gc.Equals, test.expect) c.Assert(string(m.dataBytes(p)), gc.Equals, test.expectData) c.Assert(string(m.fieldName(p)), gc.Equals, test.expectField) // append the same packet again and check that // the contents are the same. p1, ok := m.appendPacket(test.expectField, []byte(test.expectData)) c.Assert(ok, gc.Equals, true) c.Assert(string(m.packetBytes(p)), gc.Equals, string(m.packetBytes(p1))) } } func truncate(d string) string { if len(d) > 50 { return d[0:50] + "..." } return d } func (*packetSuite) TestAsciiHex(c *gc.C) { for b := 0; b < 256; b++ { n, err := strconv.ParseInt(string(b), 16, 8) value, ok := asciiHex(byte(b)) if err != nil || unicode.IsUpper(rune(b)) { c.Assert(ok, gc.Equals, false) c.Assert(value, gc.Equals, 0) } else { c.Assert(ok, gc.Equals, true) c.Assert(value, gc.Equals, int(n)) } } } charm-2.1.1/src/gopkg.in/macaroon.v1/packet.go0000664000175000017500000000734112672604513020033 0ustar marcomarcopackage macaroon import ( "bytes" "fmt" ) // The macaroon binary encoding is made from a sequence // of "packets", each of which has a field name and some data. // The encoding is: // // - four ascii hex digits holding the entire packet size (including // the digits themselves). // // - the field name, followed by an ascii space. // // - the raw data // // - a newline (\n) character // // For efficiency, we store all the packets inside // a single byte slice inside the macaroon, Macaroon.data. This // is reasonable to do because we only ever append // to macaroons. // // The packet struct below holds a reference into Macaroon.data. type packet struct { start int32 totalLen uint16 headerLen uint16 } func (p packet) len() int { return int(p.totalLen) } // dataBytes returns the data payload of the packet. func (m *Macaroon) dataBytes(p packet) []byte { if p.totalLen == 0 { return nil } return m.data[p.start+int32(p.headerLen) : p.start+int32(p.totalLen)-1] } func (m *Macaroon) dataStr(p packet) string { return string(m.dataBytes(p)) } // packetBytes returns the entire packet. func (m *Macaroon) packetBytes(p packet) []byte { return m.data[p.start : p.start+int32(p.totalLen)] } // fieldName returns the field name of the packet. func (m *Macaroon) fieldName(p packet) []byte { if p.totalLen == 0 { return nil } return m.data[p.start+4 : p.start+int32(p.headerLen)-1] } // parsePacket parses the packet starting at the given // index into m.data. func (m *Macaroon) parsePacket(start int) (packet, error) { data := m.data[start:] if len(data) < 6 { return packet{}, fmt.Errorf("packet too short") } plen, ok := parseSize(data) if !ok { return packet{}, fmt.Errorf("cannot parse size") } if plen > len(data) { return packet{}, fmt.Errorf("packet size too big") } data = data[4:plen] i := bytes.IndexByte(data, ' ') if i <= 0 { return packet{}, fmt.Errorf("cannot parse field name") } if data[len(data)-1] != '\n' { return packet{}, fmt.Errorf("no terminating newline found") } return packet{ start: int32(start), totalLen: uint16(plen), headerLen: uint16(4 + i + 1), }, nil } const maxPacketLen = 0xffff // appendPacket appends a packet with the given field name // and data to m.data, and returns the packet appended. // // It returns false (and a zero packet) if the packet was too big. func (m *Macaroon) appendPacket(field string, data []byte) (packet, bool) { mdata, p, ok := rawAppendPacket(m.data, field, data) if !ok { return p, false } m.data = mdata return p, true } // rawAppendPacket appends a packet to the given byte slice. func rawAppendPacket(buf []byte, field string, data []byte) ([]byte, packet, bool) { plen := packetSize(field, data) if plen > maxPacketLen { return nil, packet{}, false } s := packet{ start: int32(len(buf)), totalLen: uint16(plen), headerLen: uint16(4 + len(field) + 1), } buf = appendSize(buf, plen) buf = append(buf, field...) buf = append(buf, ' ') buf = append(buf, data...) buf = append(buf, '\n') return buf, s, true } func packetSize(field string, data []byte) int { return 4 + len(field) + 1 + len(data) + 1 } var hexDigits = []byte("0123456789abcdef") func appendSize(data []byte, size int) []byte { return append(data, hexDigits[size>>12], hexDigits[(size>>8)&0xf], hexDigits[(size>>4)&0xf], hexDigits[size&0xf], ) } func parseSize(data []byte) (int, bool) { d0, ok0 := asciiHex(data[0]) d1, ok1 := asciiHex(data[1]) d2, ok2 := asciiHex(data[2]) d3, ok3 := asciiHex(data[3]) return d0<<12 + d1<<8 + d2<<4 + d3, ok0 && ok1 && ok2 && ok3 } func asciiHex(b byte) (int, bool) { switch { case b >= '0' && b <= '9': return int(b) - '0', true case b >= 'a' && b <= 'f': return int(b) - 'a' + 0xa, true } return 0, false } charm-2.1.1/src/gopkg.in/macaroon.v1/bench_test.go0000664000175000017500000000464512672604513020706 0ustar marcomarcopackage macaroon_test import ( "crypto/rand" "encoding/base64" "testing" "gopkg.in/macaroon.v1" ) func randomBytes(n int) []byte { b := make([]byte, n) _, err := rand.Read(b) if err != nil { panic(err) } return b } func BenchmarkNew(b *testing.B) { rootKey := randomBytes(24) id := base64.StdEncoding.EncodeToString(randomBytes(100)) loc := base64.StdEncoding.EncodeToString(randomBytes(40)) b.ResetTimer() for i := b.N - 1; i >= 0; i-- { MustNew(rootKey, id, loc) } } func BenchmarkAddCaveat(b *testing.B) { rootKey := randomBytes(24) id := base64.StdEncoding.EncodeToString(randomBytes(100)) loc := base64.StdEncoding.EncodeToString(randomBytes(40)) b.ResetTimer() for i := b.N - 1; i >= 0; i-- { b.StopTimer() m := MustNew(rootKey, id, loc) b.StartTimer() m.AddFirstPartyCaveat("some caveat stuff") } } func benchmarkVerify(b *testing.B, mspecs []macaroonSpec) { rootKey, primary, discharges := makeMacaroons(mspecs) check := func(string) error { return nil } b.ResetTimer() for i := b.N - 1; i >= 0; i-- { err := primary.Verify(rootKey, check, discharges) if err != nil { b.Fatalf("verification failed: %v", err) } } } func BenchmarkVerifyLarge(b *testing.B) { benchmarkVerify(b, recursiveThirdPartyCaveatMacaroons) } func BenchmarkVerifySmall(b *testing.B) { benchmarkVerify(b, []macaroonSpec{{ rootKey: "root-key", id: "root-id", caveats: []caveat{{ condition: "wonderful", }}, }}) } func BenchmarkMarshalJSON(b *testing.B) { rootKey := randomBytes(24) id := base64.StdEncoding.EncodeToString(randomBytes(100)) loc := base64.StdEncoding.EncodeToString(randomBytes(40)) m := MustNew(rootKey, id, loc) b.ResetTimer() for i := b.N - 1; i >= 0; i-- { _, err := m.MarshalJSON() if err != nil { b.Fatalf("cannot marshal JSON: %v", err) } } } func MustNew(rootKey []byte, id, loc string) *macaroon.Macaroon { m, err := macaroon.New(rootKey, id, loc) if err != nil { panic(err) } return m } func BenchmarkUnmarshalJSON(b *testing.B) { rootKey := randomBytes(24) id := base64.StdEncoding.EncodeToString(randomBytes(100)) loc := base64.StdEncoding.EncodeToString(randomBytes(40)) m := MustNew(rootKey, id, loc) data, err := m.MarshalJSON() if err != nil { b.Fatalf("cannot marshal JSON: %v", err) } for i := b.N - 1; i >= 0; i-- { var m macaroon.Macaroon err := m.UnmarshalJSON(data) if err != nil { b.Fatalf("cannot unmarshal JSON: %v", err) } } } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/0000775000175000017500000000000012672604475017512 5ustar marcomarcocharm-2.1.1/src/gopkg.in/macaroon-bakery.v1/TODO0000664000175000017500000000022412672604475020200 0ustar marcomarcoall: - when API is stable, move to gopkg.in/macaroon.v1 macaroon: - change all signature calculations to correspond exactly with libmacaroons. charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakerytest/0000775000175000017500000000000012672604475021667 5ustar marcomarcocharm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakerytest/bakerytest.go0000664000175000017500000002227412672604475024402 0ustar marcomarco// Package bakerytest provides test helper functions for // the bakery. package bakerytest import ( "crypto/tls" "fmt" "net/http" "net/http/httptest" "sync" "time" "github.com/juju/httprequest" "gopkg.in/errgo.v1" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/httpbakery" ) // Discharger is a third-party caveat discharger suitable // for testing. It listens on a local network port for // discharge requests. It should be shut down by calling // Close when done with. type Discharger struct { Service *bakery.Service server *httptest.Server } var skipVerify struct { mu sync.Mutex refCount int oldSkipVerify bool } func startSkipVerify() { v := &skipVerify v.mu.Lock() defer v.mu.Unlock() if v.refCount++; v.refCount > 1 { return } transport, ok := http.DefaultTransport.(*http.Transport) if !ok { return } if transport.TLSClientConfig != nil { v.oldSkipVerify = transport.TLSClientConfig.InsecureSkipVerify transport.TLSClientConfig.InsecureSkipVerify = true } else { v.oldSkipVerify = false transport.TLSClientConfig = &tls.Config{ InsecureSkipVerify: true, } } } func stopSkipVerify() { v := &skipVerify v.mu.Lock() defer v.mu.Unlock() if v.refCount--; v.refCount > 0 { return } transport, ok := http.DefaultTransport.(*http.Transport) if !ok { return } // technically this doesn't return us to the original state, // as TLSClientConfig may have been nil before but won't // be now, but that should be equivalent. transport.TLSClientConfig.InsecureSkipVerify = v.oldSkipVerify } // NewDischarger returns a new third party caveat discharger // which uses the given function to check caveats. // The cond and arg arguments to the function are as returned // by checkers.ParseCaveat. // // If locator is non-nil, it will be used to find public keys // for any third party caveats returned by the checker. // // Calling this function has the side-effect of setting // InsecureSkipVerify in http.DefaultTransport.TLSClientConfig // until all the dischargers are closed. func NewDischarger( locator bakery.PublicKeyLocator, checker func(req *http.Request, cond, arg string) ([]checkers.Caveat, error), ) *Discharger { mux := http.NewServeMux() server := httptest.NewTLSServer(mux) svc, err := bakery.NewService(bakery.NewServiceParams{ Location: server.URL, Locator: locator, }) if err != nil { panic(err) } checker1 := func(req *http.Request, cavId, cav string) ([]checkers.Caveat, error) { cond, arg, err := checkers.ParseCaveat(cav) if err != nil { return nil, err } return checker(req, cond, arg) } httpbakery.AddDischargeHandler(mux, "/", svc, checker1) startSkipVerify() return &Discharger{ Service: svc, server: server, } } // Close shuts down the server. It may be called more than // once on the same discharger. func (d *Discharger) Close() { if d.server == nil { return } d.server.Close() stopSkipVerify() d.server = nil } // Location returns the location of the discharger, suitable // for setting as the location in a third party caveat. // This will be the URL of the server. func (d *Discharger) Location() string { return d.Service.Location() } // PublicKeyForLocation implements bakery.PublicKeyLocator. func (d *Discharger) PublicKeyForLocation(loc string) (*bakery.PublicKey, error) { if loc == d.Location() { return d.Service.PublicKey(), nil } return nil, bakery.ErrNotFound } type dischargeResult struct { err error cavs []checkers.Caveat } type discharge struct { cavId string c chan dischargeResult } // InteractiveDischarger is a Discharger that always requires interraction to // complete the discharge. type InteractiveDischarger struct { Discharger Mux *http.ServeMux // mu protects the following fields. mu sync.Mutex waiting map[string]discharge id int } // NewInteractiveDischarger returns a new InteractiveDischarger. The // InteractiveDischarger will serve the following endpoints by default: // // /discharge - always causes interaction to be required. // /publickey - gets the bakery public key. // /visit - delegates to visitHandler. // /wait - blocks waiting for the interaction to complete. // // Additional endpoints may be added to Mux as necessary. // // The /discharge endpoint generates a error with the code // httpbakery.ErrInterractionRequired. The visitURL and waitURL will // point to the /visit and /wait endpoints of the InteractiveDischarger // respectively. These URLs will also carry context information in query // parameters, any handlers should be careful to preserve this context // information between calls. The easiest way to do this is to always use // the URL method when generating new URLs. // // The /visit endpoint is handled by the provided visitHandler. This // handler performs the required interactions and should result in the // FinishInteraction method being called. This handler may process the // interaction in a number of steps, possibly using additional handlers, // so long as FinishInteraction is called when no further interaction is // required. // // The /wait endpoint blocks until FinishInteraction has been called by // the corresponding /visit endpoint, or another endpoint triggered by // visitHandler. // // If locator is non-nil, it will be used to find public keys // for any third party caveats returned by the checker. // // Calling this function has the side-effect of setting // InsecureSkipVerify in http.DefaultTransport.TLSClientConfig // until all the dischargers are closed. // // The returned InteractiveDischarger must be closed when finished with. func NewInteractiveDischarger(locator bakery.PublicKeyLocator, visitHandler http.Handler) *InteractiveDischarger { d := &InteractiveDischarger{ Mux: http.NewServeMux(), waiting: map[string]discharge{}, } d.Mux.Handle("/visit", visitHandler) d.Mux.Handle("/wait", http.HandlerFunc(d.wait)) server := httptest.NewTLSServer(d.Mux) svc, err := bakery.NewService(bakery.NewServiceParams{ Location: server.URL, Locator: locator, }) if err != nil { panic(err) } httpbakery.AddDischargeHandler(d.Mux, "/", svc, d.checker) startSkipVerify() d.Discharger = Discharger{ Service: svc, server: server, } return d } func (d *InteractiveDischarger) checker(req *http.Request, cavId, cav string) ([]checkers.Caveat, error) { d.mu.Lock() id := fmt.Sprintf("%d", d.id) d.id++ d.waiting[id] = discharge{cavId, make(chan dischargeResult, 1)} d.mu.Unlock() visitURL := "/visit?waitid=" + id waitURL := "/wait?waitid=" + id return nil, httpbakery.NewInteractionRequiredError(visitURL, waitURL, nil, req) } func (d *InteractiveDischarger) wait(w http.ResponseWriter, r *http.Request) { r.ParseForm() d.mu.Lock() discharge, ok := d.waiting[r.Form.Get("waitid")] d.mu.Unlock() if !ok { code, body := httpbakery.ErrorToResponse(errgo.Newf("invalid waitid %q", r.Form.Get("waitid"))) httprequest.WriteJSON(w, code, body) return } defer func() { d.mu.Lock() delete(d.waiting, r.Form.Get("waitid")) d.mu.Unlock() }() var err error var cavs []checkers.Caveat select { case res := <-discharge.c: err = res.err cavs = res.cavs case <-time.After(5 * time.Minute): code, body := httpbakery.ErrorToResponse(errgo.New("timeout waiting for interaction to complete")) httprequest.WriteJSON(w, code, body) return } if err != nil { code, body := httpbakery.ErrorToResponse(err) httprequest.WriteJSON(w, code, body) return } m, err := d.Service.Discharge( bakery.ThirdPartyCheckerFunc( func(cavId, caveat string) ([]checkers.Caveat, error) { return cavs, nil }, ), discharge.cavId, ) if err != nil { code, body := httpbakery.ErrorToResponse(err) httprequest.WriteJSON(w, code, body) return } httprequest.WriteJSON( w, http.StatusOK, httpbakery.WaitResponse{ Macaroon: m, }, ) } // FinishInteraction signals to the InteractiveDischarger that a // particular interaction is complete. It causes any waiting requests to // return. If err is not nil then it will be returned by the // corresponding /wait request. func (d *InteractiveDischarger) FinishInteraction(w http.ResponseWriter, r *http.Request, cavs []checkers.Caveat, err error) { r.ParseForm() d.mu.Lock() discharge, ok := d.waiting[r.Form.Get("waitid")] d.mu.Unlock() if !ok { code, body := httpbakery.ErrorToResponse(errgo.Newf("invalid waitid %q", r.Form.Get("waitid"))) httprequest.WriteJSON(w, code, body) return } select { case discharge.c <- dischargeResult{err: err, cavs: cavs}: default: panic("cannot finish interaction " + r.Form.Get("waitid")) } return } // HostRelativeURL is like URL but includes only the // URL path and query parameters. Use this when returning // a URL for use in GetInteractionMethods. func (d *InteractiveDischarger) HostRelativeURL(path string, r *http.Request) string { r.ParseForm() return path + "?waitid=" + r.Form.Get("waitid") } // URL returns a URL addressed to the given path in the discharger that // contains any discharger context information found in the given // request. Use this to generate intermediate URLs before calling // FinishInteraction. func (d *InteractiveDischarger) URL(path string, r *http.Request) string { r.ParseForm() return d.Location() + d.HostRelativeURL(path, r) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakerytest/bakerytest_test.go0000664000175000017500000001522212672604475025434 0ustar marcomarcopackage bakerytest_test import ( "errors" "fmt" "net/http" "net/url" "sync" "github.com/juju/httprequest" gc "gopkg.in/check.v1" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/bakerytest" "gopkg.in/macaroon-bakery.v1/httpbakery" ) type suite struct { client *httpbakery.Client } func (s *suite) SetUpTest(c *gc.C) { s.client = httpbakery.NewClient() } var _ = gc.Suite(&suite{}) func noCaveatChecker(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { return nil, nil } func (s *suite) TestDischargerSimple(c *gc.C) { d := bakerytest.NewDischarger(nil, noCaveatChecker) defer d.Close() svc, err := bakery.NewService(bakery.NewServiceParams{ Location: "here", Locator: d, }) c.Assert(err, gc.IsNil) m, err := svc.NewMacaroon("", nil, []checkers.Caveat{{ Location: d.Location(), Condition: "something", }}) c.Assert(err, gc.IsNil) ms, err := s.client.DischargeAll(m) c.Assert(err, gc.IsNil) c.Assert(ms, gc.HasLen, 2) err = svc.Check(ms, failChecker) c.Assert(err, gc.IsNil) } var failChecker = bakery.FirstPartyCheckerFunc(func(s string) error { return fmt.Errorf("fail %s", s) }) func (s *suite) TestDischargerTwoLevels(c *gc.C) { d1checker := func(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { if cond != "xtrue" { return nil, fmt.Errorf("caveat refused") } return nil, nil } d1 := bakerytest.NewDischarger(nil, d1checker) defer d1.Close() d2checker := func(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { return []checkers.Caveat{{ Location: d1.Location(), Condition: "x" + cond, }}, nil } d2 := bakerytest.NewDischarger(d1, d2checker) defer d2.Close() locator := bakery.PublicKeyLocatorMap{ d1.Location(): d1.Service.PublicKey(), d2.Location(): d2.Service.PublicKey(), } c.Logf("map: %s", locator) svc, err := bakery.NewService(bakery.NewServiceParams{ Location: "here", Locator: locator, }) c.Assert(err, gc.IsNil) m, err := svc.NewMacaroon("", nil, []checkers.Caveat{{ Location: d2.Location(), Condition: "true", }}) c.Assert(err, gc.IsNil) ms, err := s.client.DischargeAll(m) c.Assert(err, gc.IsNil) c.Assert(ms, gc.HasLen, 3) err = svc.Check(ms, failChecker) c.Assert(err, gc.IsNil) err = svc.AddCaveat(m, checkers.Caveat{ Location: d2.Location(), Condition: "nope", }) c.Assert(err, gc.IsNil) ms, err = s.client.DischargeAll(m) c.Assert(err, gc.ErrorMatches, `cannot get discharge from "https://[^"]*": third party refused discharge: cannot discharge: caveat refused`) c.Assert(ms, gc.HasLen, 0) } func (s *suite) TestInsecureSkipVerifyRestoration(c *gc.C) { d1 := bakerytest.NewDischarger(nil, noCaveatChecker) d2 := bakerytest.NewDischarger(nil, noCaveatChecker) d2.Close() c.Assert(http.DefaultTransport.(*http.Transport).TLSClientConfig.InsecureSkipVerify, gc.Equals, true) d1.Close() c.Assert(http.DefaultTransport.(*http.Transport).TLSClientConfig.InsecureSkipVerify, gc.Equals, false) // When InsecureSkipVerify is already true, it should not // be restored to false. http.DefaultTransport.(*http.Transport).TLSClientConfig.InsecureSkipVerify = true d3 := bakerytest.NewDischarger(nil, noCaveatChecker) d3.Close() c.Assert(http.DefaultTransport.(*http.Transport).TLSClientConfig.InsecureSkipVerify, gc.Equals, true) } func (s *suite) TestConcurrentDischargers(c *gc.C) { var wg sync.WaitGroup for i := 0; i < 5; i++ { wg.Add(1) go func() { d := bakerytest.NewDischarger(nil, noCaveatChecker) d.Close() wg.Done() }() } wg.Wait() c.Assert(http.DefaultTransport.(*http.Transport).TLSClientConfig.InsecureSkipVerify, gc.Equals, false) } func (s *suite) TestInteractiveDischarger(c *gc.C) { var d *bakerytest.InteractiveDischarger d = bakerytest.NewInteractiveDischarger(nil, http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { d.FinishInteraction(w, r, []checkers.Caveat{ checkers.Caveat{ Condition: "test pass", }, }, nil) }, )) defer d.Close() svc, err := bakery.NewService(bakery.NewServiceParams{ Location: "here", Locator: d, }) c.Assert(err, gc.IsNil) m, err := svc.NewMacaroon("", nil, []checkers.Caveat{{ Location: d.Location(), Condition: "something", }}) c.Assert(err, gc.IsNil) client := httpbakery.NewClient() client.VisitWebPage = func(u *url.URL) error { var c httprequest.Client return c.Get(u.String(), nil) } ms, err := client.DischargeAll(m) c.Assert(err, gc.IsNil) c.Assert(ms, gc.HasLen, 2) var r recordingChecker err = svc.Check(ms, &r) c.Assert(err, gc.IsNil) c.Assert(r.caveats, gc.HasLen, 1) c.Assert(r.caveats[0], gc.Equals, "test pass") } func (s *suite) TestLoginDischargerError(c *gc.C) { var d *bakerytest.InteractiveDischarger d = bakerytest.NewInteractiveDischarger(nil, http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { d.FinishInteraction(w, r, nil, errors.New("test error")) }, )) defer d.Close() svc, err := bakery.NewService(bakery.NewServiceParams{ Location: "here", Locator: d, }) c.Assert(err, gc.IsNil) m, err := svc.NewMacaroon("", nil, []checkers.Caveat{{ Location: d.Location(), Condition: "something", }}) c.Assert(err, gc.IsNil) client := httpbakery.NewClient() client.VisitWebPage = func(u *url.URL) error { c.Logf("visiting %s", u) var c httprequest.Client return c.Get(u.String(), nil) } _, err = client.DischargeAll(m) c.Assert(err, gc.ErrorMatches, `cannot get discharge from ".*": failed to acquire macaroon after waiting: third party refused discharge: test error`) } func (s *suite) TestInteractiveDischargerURL(c *gc.C) { var d *bakerytest.InteractiveDischarger d = bakerytest.NewInteractiveDischarger(nil, http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { http.Redirect(w, r, d.URL("/redirect", r), http.StatusFound) }, )) defer d.Close() d.Mux.Handle("/redirect", http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { d.FinishInteraction(w, r, nil, nil) }, )) svc, err := bakery.NewService(bakery.NewServiceParams{ Location: "here", Locator: d, }) c.Assert(err, gc.IsNil) m, err := svc.NewMacaroon("", nil, []checkers.Caveat{{ Location: d.Location(), Condition: "something", }}) c.Assert(err, gc.IsNil) client := httpbakery.NewClient() client.VisitWebPage = func(u *url.URL) error { var c httprequest.Client return c.Get(u.String(), nil) } ms, err := client.DischargeAll(m) c.Assert(err, gc.IsNil) c.Assert(ms, gc.HasLen, 2) err = svc.Check(ms, failChecker) c.Assert(err, gc.IsNil) } type recordingChecker struct { caveats []string } func (c *recordingChecker) CheckFirstPartyCaveat(caveat string) error { c.caveats = append(c.caveats, caveat) return nil } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakerytest/package_test.go0000664000175000017500000000017412672604475024652 0ustar marcomarcopackage bakerytest_test import ( "testing" gc "gopkg.in/check.v1" ) func TestPackage(t *testing.T) { gc.TestingT(t) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/dependencies.tsv0000664000175000017500000000273612672604475022706 0ustar marcomarcogithub.com/juju/errors git 4567a5e69fd3130ca0d89f69478e7ac025b67452 2015-03-27T19:24:31Z github.com/juju/httprequest git abb29cbb15079888950f7b9d73f77c4e4ac89042 2015-09-16T09:23:22Z github.com/juju/loggo git 8477fc936adf0e382d680310047ca27e128a309a 2015-05-27T03:58:39Z github.com/juju/schema git afe1151cb49d1d7ed3c75592dfc6f38703f2e988 2015-08-07T07:58:08Z github.com/juju/testing git 6e944d606b6efca96146fbda3e6a27f73313d867 2015-10-02T11:09:45Z github.com/juju/utils git c9d1b9e09eebb41c3bb2d762ea16f4d8abed6ce5 2015-09-02T22:36:51Z github.com/juju/webbrowser git 54b8c57083b4afb7dc75da7f13e2967b2606a507 2016-03-09T14:36:29Z github.com/julienschmidt/httprouter git 109e267447e95ad1bb48b758e40dd7453eb7b039 2015-09-05T17:25:33Z golang.org/x/crypto git aedad9a179ec1ea11b7064c57cbc6dc30d7724ec 2015-08-30T18:06:42Z golang.org/x/net git ea47fc708ee3e20177f3ca3716217c4ab75942cb 2015-08-29T23:03:18Z gopkg.in/check.v1 git b3d3430320d4260e5fea99841af984b3badcea63 2015-06-26T10:50:28Z gopkg.in/errgo.v1 git 15098963088579c1cd9eb1a7da285831e548390b 2015-07-07T18:34:45Z gopkg.in/juju/environschema.v1 git cfc85b8479122af20b9b5b5ac6a69d5248914e56 2015-10-09T10:58:43Z gopkg.in/macaroon.v1 git ab3940c6c16510a850e1c2dd628b919f0f3f1464 2015-01-21T11:42:31Z gopkg.in/mgo.v2 git f4923a569136442e900b8cf5c1a706c0a8b0883c 2015-08-21T15:31:23Z gopkg.in/yaml.v1 git 9f9df34309c04878acc86042b16630b0f696e1de 2014-09-24T16:16:07Z gopkg.in/yaml.v2 git 7ad95dd0798a40da1ccdff6dff35fd177b5edf40 2015-06-24T10:29:02Z charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/LICENSE0000664000175000017500000002113312672604475020517 0ustar marcomarcoCopyright © 2014, Roger Peppe, Canonical Inc. This software is licensed under the LGPLv3, included below. As a special exception to the GNU Lesser General Public License version 3 ("LGPL3"), the copyright holders of this Library give you permission to convey to a third party a Combined Work that links statically or dynamically to this Library without providing any Minimal Corresponding Source or Minimal Application Code as set out in 4d or providing the installation information set out in section 4e, provided that you comply with the other provisions of LGPL3 and provided that you meet, for the Application the terms and conditions of the license(s) which apply to the Application. Except as stated in this special exception, the provisions of LGPL3 will continue to comply in full to this Library. If you modify this Library, you may apply this exception to your version of this Library, but you are not obliged to do so. If you do not wish to do so, delete this exception statement from your version. This exception does not (and cannot) modify any license terms which apply to the Application, with which you must still comply. GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/0000775000175000017500000000000012677511232021660 5ustar marcomarcocharm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/browser.go0000664000175000017500000000206612672604475023705 0ustar marcomarcopackage httpbakery import ( "fmt" "net/url" "os" "github.com/juju/webbrowser" ) // OpenWebBrowser opens a web browser at the // given URL. If the OS is not recognised, the URL // is just printed to standard output. func OpenWebBrowser(url *url.URL) error { err := webbrowser.Open(url) if err == nil { fmt.Fprintf(os.Stderr, "Opening an authorization web page in your browser.\n") fmt.Fprintf(os.Stderr, "If it does not open, please open this URL:\n%s\n", url) return nil } if err == webbrowser.ErrNoBrowser { fmt.Fprintf(os.Stderr, "Please open this URL in your browser to authorize:\n%s\n", url) return nil } return err } // WebBrowserVisitor holds an interactor that supports the "Interactive" // method by opening a web browser at the required location. var WebBrowserVisitor Visitor = webBrowserVisitor{} type webBrowserVisitor struct{} func (webBrowserVisitor) VisitWebPage(client *Client, methodURLs map[string]*url.URL) error { u := methodURLs[UserInteractionMethod] if u == nil { return ErrMethodNotSupported } return OpenWebBrowser(u) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/checkers_test.go0000664000175000017500000001360612672604475025052 0ustar marcomarcopackage httpbakery_test import ( "net" "net/http" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/httpbakery" ) type CheckersSuite struct{} var _ = gc.Suite(&CheckersSuite{}) type checkTest struct { caveat string expectError string expectCause func(err error) bool } var isCaveatNotRecognized = errgo.Is(checkers.ErrCaveatNotRecognized) var checkerTests = []struct { about string checker checkers.Checker checks []checkTest }{{ about: "no host name declared", checker: checkers.New(httpbakery.Checkers(&http.Request{})), checks: []checkTest{{ caveat: checkers.ClientIPAddrCaveat(net.IP{0, 0, 0, 0}).Condition, expectError: `caveat "client-ip-addr 0.0.0.0" not satisfied: client has no remote address`, }, { caveat: checkers.ClientIPAddrCaveat(net.IP{127, 0, 0, 1}).Condition, expectError: `caveat "client-ip-addr 127.0.0.1" not satisfied: client has no remote address`, }, { caveat: "client-ip-addr badip", expectError: `caveat "client-ip-addr badip" not satisfied: cannot parse IP address in caveat`, }}, }, { about: "IPv4 host name declared", checker: checkers.New(httpbakery.Checkers(&http.Request{ RemoteAddr: "127.0.0.1:1234", })), checks: []checkTest{{ caveat: checkers.ClientIPAddrCaveat(net.IP{127, 0, 0, 1}).Condition, }, { caveat: checkers.ClientIPAddrCaveat(net.IP{127, 0, 0, 1}.To16()).Condition, }, { caveat: "client-ip-addr ::ffff:7f00:1", }, { caveat: checkers.ClientIPAddrCaveat(net.IP{127, 0, 0, 2}).Condition, expectError: `caveat "client-ip-addr 127.0.0.2" not satisfied: client IP address mismatch, got 127.0.0.1`, }, { caveat: checkers.ClientIPAddrCaveat(net.ParseIP("2001:4860:0:2001::68")).Condition, expectError: `caveat "client-ip-addr 2001:4860:0:2001::68" not satisfied: client IP address mismatch, got 127.0.0.1`, }}, }, { about: "IPv6 host name declared", checker: checkers.New(httpbakery.Checkers(&http.Request{ RemoteAddr: "[2001:4860:0:2001::68]:1234", })), checks: []checkTest{{ caveat: checkers.ClientIPAddrCaveat(net.ParseIP("2001:4860:0:2001::68")).Condition, }, { caveat: "client-ip-addr 2001:4860:0:2001:0::68", }, { caveat: checkers.ClientIPAddrCaveat(net.ParseIP("2001:4860:0:2001::69")).Condition, expectError: `caveat "client-ip-addr 2001:4860:0:2001::69" not satisfied: client IP address mismatch, got 2001:4860:0:2001::68`, }, { caveat: checkers.ClientIPAddrCaveat(net.ParseIP("127.0.0.1")).Condition, expectError: `caveat "client-ip-addr 127.0.0.1" not satisfied: client IP address mismatch, got 2001:4860:0:2001::68`, }}, }, { about: "same client address, ipv4 request address", checker: checkers.New(httpbakery.Checkers(&http.Request{ RemoteAddr: "127.0.0.1:1324", })), checks: []checkTest{{ caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ RemoteAddr: "127.0.0.1:1234", }).Condition, }, { caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ RemoteAddr: "[::ffff:7f00:1]:1235", }).Condition, }, { caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ RemoteAddr: "127.0.0.2:1234", }).Condition, expectError: `caveat "client-ip-addr 127.0.0.2" not satisfied: client IP address mismatch, got 127.0.0.1`, }, { caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ RemoteAddr: "[::ffff:7f00:2]:1235", }).Condition, expectError: `caveat "client-ip-addr 127.0.0.2" not satisfied: client IP address mismatch, got 127.0.0.1`, }, { caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{}).Condition, expectError: `caveat "error client has no remote IP address" not satisfied: bad caveat`, }, { caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ RemoteAddr: "bad", }).Condition, expectError: `caveat "error cannot parse host port in remote address: missing port in address bad" not satisfied: bad caveat`, }, { caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ RemoteAddr: "bad:56", }).Condition, expectError: `caveat "error invalid IP address in remote address \\"bad:56\\"" not satisfied: bad caveat`, }}, }, { about: "same client address, ipv6 request address", checker: checkers.New(httpbakery.Checkers(&http.Request{ RemoteAddr: "[2001:4860:0:2001:0::68]:1235", })), checks: []checkTest{{ caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ RemoteAddr: "[2001:4860:0:2001:0::68]:1234", }).Condition, }, { caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ RemoteAddr: "127.0.0.2:1234", }).Condition, expectError: `caveat "client-ip-addr 127.0.0.2" not satisfied: client IP address mismatch, got 2001:4860:0:2001::68`, }}, }, { about: "request with no origin", checker: checkers.New(httpbakery.Checkers(&http.Request{})), checks: []checkTest{{ caveat: checkers.ClientOriginCaveat("").Condition, }, { caveat: checkers.ClientOriginCaveat("somewhere").Condition, expectError: `caveat "origin somewhere" not satisfied: request has invalid Origin header; got ""`, }}, }, { about: "request with origin", checker: checkers.New(httpbakery.Checkers(&http.Request{ Header: http.Header{ "Origin": {"somewhere"}, }, })), checks: []checkTest{{ caveat: checkers.ClientOriginCaveat("").Condition, expectError: `caveat "origin " not satisfied: request has invalid Origin header; got "somewhere"`, }, { caveat: checkers.ClientOriginCaveat("somewhere").Condition, }}, }} func (s *CheckersSuite) TestCheckers(c *gc.C) { for i, test := range checkerTests { c.Logf("test %d: %s", i, test.about) for j, check := range test.checks { c.Logf("\tcheck %d", j) err := checkers.New(test.checker).CheckFirstPartyCaveat(check.caveat) if check.expectError != "" { c.Assert(err, gc.ErrorMatches, check.expectError) if check.expectCause == nil { check.expectCause = errgo.Any } c.Assert(check.expectCause(errgo.Cause(err)), gc.Equals, true) } else { c.Assert(err, gc.IsNil) } } } } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/error_test.go0000664000175000017500000000642612672604475024416 0ustar marcomarcopackage httpbakery_test import ( "encoding/json" "errors" "net/http" "net/http/httptest" "github.com/juju/httprequest" jc "github.com/juju/testing/checkers" "github.com/juju/testing/httptesting" gc "gopkg.in/check.v1" "gopkg.in/macaroon.v1" "gopkg.in/macaroon-bakery.v1/httpbakery" ) type ErrorSuite struct{} var _ = gc.Suite(&ErrorSuite{}) func (s *ErrorSuite) TestWriteDischargeRequiredError(c *gc.C) { m, err := macaroon.New([]byte("secret"), "id", "a location") c.Assert(err, gc.IsNil) tests := []struct { about string path string err error expectedResponse httpbakery.Error }{{ about: `write discharge required with "an error" but no path`, path: "", err: errors.New("an error"), expectedResponse: httpbakery.Error{ Code: httpbakery.ErrDischargeRequired, Message: "an error", Info: &httpbakery.ErrorInfo{ Macaroon: m, }, }, }, { about: `write discharge required with "an error" but and set a path`, path: "http://foobar:1234", err: errors.New("an error"), expectedResponse: httpbakery.Error{ Code: httpbakery.ErrDischargeRequired, Message: "an error", Info: &httpbakery.ErrorInfo{ Macaroon: m, MacaroonPath: "http://foobar:1234", }, }, }, { about: `write discharge required with nil error but set a path`, path: "http://foobar:1234", err: nil, expectedResponse: httpbakery.Error{ Code: httpbakery.ErrDischargeRequired, Message: httpbakery.ErrDischargeRequired.Error(), Info: &httpbakery.ErrorInfo{ Macaroon: m, MacaroonPath: "http://foobar:1234", }, }, }, } for i, t := range tests { c.Logf("Running test %d %s", i, t.about) response := httptest.NewRecorder() httpbakery.WriteDischargeRequiredError(response, m, t.path, t.err) httptesting.AssertJSONResponse(c, response, http.StatusProxyAuthRequired, t.expectedResponse) } } func (s *ErrorSuite) TestNewInteractionRequiredError(c *gc.C) { // With a request with no version header, the response // should be 407. req, err := http.NewRequest("GET", "/", nil) c.Assert(err, gc.IsNil) err = httpbakery.NewInteractionRequiredError("/visit", "/wait", nil, req) code, resp := httpbakery.ErrorToResponse(err) c.Assert(code, gc.Equals, http.StatusProxyAuthRequired) data, err := json.Marshal(resp) c.Assert(err, gc.IsNil) c.Assert(string(data), jc.JSONEquals, &httpbakery.Error{ Code: httpbakery.ErrInteractionRequired, Message: httpbakery.ErrInteractionRequired.Error(), Info: &httpbakery.ErrorInfo{ VisitURL: "/visit", WaitURL: "/wait", }, }) // With a request with a version 1 header, the response // should be 401. req.Header.Set("Bakery-Protocol-Version", "1") err = httpbakery.NewInteractionRequiredError("/visit", "/wait", nil, req) code, resp = httpbakery.ErrorToResponse(err) c.Assert(code, gc.Equals, http.StatusUnauthorized) h := make(http.Header) resp.(httprequest.HeaderSetter).SetHeader(h) c.Assert(h.Get("WWW-Authenticate"), gc.Equals, "Macaroon") data, err = json.Marshal(resp) c.Assert(err, gc.IsNil) c.Assert(string(data), jc.JSONEquals, &httpbakery.Error{ Code: httpbakery.ErrInteractionRequired, Message: httpbakery.ErrInteractionRequired.Error(), Info: &httpbakery.ErrorInfo{ VisitURL: "/visit", WaitURL: "/wait", }, }) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/form/0000775000175000017500000000000012672604475022632 5ustar marcomarcocharm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/form/form.go0000664000175000017500000001055412672604475024131 0ustar marcomarco// Package form enables interactive login without using a web browser. package form import ( "net/url" "github.com/juju/httprequest" "github.com/juju/loggo" "golang.org/x/net/publicsuffix" "gopkg.in/errgo.v1" "gopkg.in/juju/environschema.v1" "gopkg.in/juju/environschema.v1/form" "gopkg.in/macaroon-bakery.v1/httpbakery" ) var logger = loggo.GetLogger("httpbakery.form") /* PROTOCOL A form login works as follows: Client Login Service | | | GET visitURL with | | "Accept: application/json" | |----------------------------------->| | | | Login Methods (including "form") | |<-----------------------------------| | | | GET "form" URL | |----------------------------------->| | | | Schema definition | |<-----------------------------------| | | +-------------+ | | Client | | | Interaction | | +-------------+ | | | | POST data to "form" URL | |----------------------------------->| | | | Form login response | |<-----------------------------------| | | The schema is provided as a environschema.Fileds object. It is the client's responsibility to interpret the schema and present it to the user. */ const ( // InteractionMethod is the methodURLs key // used for a URL that can be used for form-based // interaction. InteractionMethod = "form" ) // SchemaRequest is a request for a form schema. type SchemaRequest struct { httprequest.Route `httprequest:"GET"` } // SchemaResponse contains the message expected in response to the schema // request. type SchemaResponse struct { Schema environschema.Fields `json:"schema"` } // LoginRequest is a request to perform a login using the provided form. type LoginRequest struct { httprequest.Route `httprequest:"POST"` Body LoginBody `httprequest:",body"` } // LoginBody holds the body of a form login request. type LoginBody struct { Form map[string]interface{} `json:"form"` } // Visitor implements httpbakery.Visitor // by providing form-based interaction. type Visitor struct { // Filler holds the form filler that will be used when // form-based interaction is required. Filler form.Filler } // visitWebPage performs the actual visit request. It attempts to // determine that form login is supported and then download the form // schema. It calls v.handler.Handle using the downloaded schema and then // submits the returned form. Any error produced by v.handler.Handle will // not have it's cause masked. func (v Visitor) VisitWebPage(client *httpbakery.Client, methodURLs map[string]*url.URL) error { return v.visitWebPage(client, methodURLs) } // visitWebPage is the internal version of VisitWebPage that operates // on a Doer rather than an httpbakery.Client, so that we // can remain compatible with the historic // signature of the VisitWebPage function. func (v Visitor) visitWebPage(doer httprequest.Doer, methodURLs map[string]*url.URL) error { schemaURL := methodURLs[InteractionMethod] if schemaURL == nil { return httpbakery.ErrMethodNotSupported } logger.Infof("got schemaURL %v", schemaURL) httpReqClient := &httprequest.Client{ Doer: doer, } var s SchemaResponse if err := httpReqClient.CallURL(schemaURL.String(), &SchemaRequest{}, &s); err != nil { return errgo.Notef(err, "cannot get schema") } if len(s.Schema) == 0 { return errgo.Newf("invalid schema: no fields found") } host, err := publicsuffix.EffectiveTLDPlusOne(schemaURL.Host) if err != nil { host = schemaURL.Host } form, err := v.Filler.Fill(form.Form{ Title: "Log in to " + host, Fields: s.Schema, }) if err != nil { return errgo.NoteMask(err, "cannot handle form", errgo.Any) } lr := LoginRequest{ Body: LoginBody{ Form: form, }, } if err := httpReqClient.CallURL(schemaURL.String(), &lr, nil); err != nil { return errgo.Notef(err, "cannot submit form") } return nil } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/form/form_test.go0000664000175000017500000002247412672604475025174 0ustar marcomarcopackage form_test import ( "fmt" "net/http" "net/url" "github.com/juju/httprequest" jujutesting "github.com/juju/testing" "github.com/juju/testing/httptesting" gc "gopkg.in/check.v1" "gopkg.in/juju/environschema.v1" esform "gopkg.in/juju/environschema.v1/form" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/bakerytest" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/macaroon-bakery.v1/httpbakery/form" ) type formSuite struct { jujutesting.LoggingSuite } var _ = gc.Suite(&formSuite{}) var formLoginTests = []struct { about string opts dischargeOptions filler fillerFunc fallback httpbakery.Visitor expectError string }{{ about: "complete visit", }, { about: "visit error", opts: dischargeOptions{ visitError: true, }, expectError: `cannot get discharge from ".*": cannot start interactive session: no methods supported`, }, { about: "interaction methods not supported", opts: dischargeOptions{ ignoreAccept: true, }, expectError: `cannot get discharge from ".*": cannot start interactive session: no methods supported`, }, { about: "form visit method not supported", opts: dischargeOptions{ formUnsupported: true, }, expectError: `cannot get discharge from ".*": cannot start interactive session: no methods supported`, }, { about: "error getting schema", opts: dischargeOptions{ getError: true, }, expectError: `cannot get discharge from ".*": cannot start interactive session: cannot get schema: GET .*: httprequest: test error`, }, { about: "error submitting form", opts: dischargeOptions{ postError: true, }, expectError: `cannot get discharge from ".*": cannot start interactive session: cannot submit form: POST .*: httprequest: test error`, }, { about: "no schema", opts: dischargeOptions{ emptySchema: true, }, expectError: `cannot get discharge from ".*": cannot start interactive session: invalid schema: no fields found`, }, { about: "filler error", filler: func(esform.Form) (map[string]interface{}, error) { return nil, testError }, expectError: `cannot get discharge from ".*": cannot start interactive session: cannot handle form: test error`, }, { about: "interaction methods fallback success", opts: dischargeOptions{ ignoreAccept: true, }, fallback: visitorFunc(func(c *httpbakery.Client, m map[string]*url.URL) error { req, _ := http.NewRequest("GET", m[httpbakery.UserInteractionMethod].String()+"&fallback=OK", nil) resp, err := c.Do(req) if err == nil { resp.Body.Close() } return err }), }, { about: "interaction methods fallback failure", opts: dischargeOptions{ ignoreAccept: true, }, fallback: visitorFunc(func(c *httpbakery.Client, m map[string]*url.URL) error { return testError }), expectError: `cannot get discharge from ".*": cannot start interactive session: test error`, }, { about: "form not supported fallback success", opts: dischargeOptions{ formUnsupported: true, }, fallback: visitorFunc(func(c *httpbakery.Client, m map[string]*url.URL) error { req, err := http.NewRequest("GET", m["othermethod"].String()+"&fallback=OK", nil) if err != nil { panic(err) } resp, err := c.Do(req) if err == nil { resp.Body.Close() } return err }), }, { about: "form not supported fallback failure", opts: dischargeOptions{ formUnsupported: true, }, fallback: visitorFunc(func(c *httpbakery.Client, m map[string]*url.URL) error { return testError }), expectError: `cannot get discharge from ".*": cannot start interactive session: test error`, }} type visitorFunc func(*httpbakery.Client, map[string]*url.URL) error func (f visitorFunc) VisitWebPage(c *httpbakery.Client, m map[string]*url.URL) error { return f(c, m) } func (s *formSuite) TestFormLogin(c *gc.C) { d := &formDischarger{} d.discharger = bakerytest.NewInteractiveDischarger(nil, http.HandlerFunc(d.visit)) defer d.discharger.Close() d.discharger.Mux.Handle("/form", http.HandlerFunc(d.form)) svc, err := bakery.NewService(bakery.NewServiceParams{ Locator: d.discharger, }) c.Assert(err, gc.IsNil) for i, test := range formLoginTests { c.Logf("test %d: %s", i, test.about) d.dischargeOptions = test.opts m, err := svc.NewMacaroon("", nil, []checkers.Caveat{{ Location: d.discharger.Location(), Condition: "test condition", }}) c.Assert(err, gc.Equals, nil) client := httpbakery.NewClient() filler := defaultFiller if test.filler != nil { filler = test.filler } handlers := []httpbakery.Visitor{ form.Visitor{ Filler: filler, }, } if test.fallback != nil { handlers = append(handlers, test.fallback) } client.WebPageVisitor = httpbakery.NewMultiVisitor(handlers...) ms, err := client.DischargeAll(m) if test.expectError != "" { c.Assert(err, gc.ErrorMatches, test.expectError) continue } c.Assert(err, gc.IsNil) c.Assert(len(ms), gc.Equals, 2) } } var formTitleTests = []struct { host string expect string }{{ host: "xyz.com", expect: "Log in to xyz.com", }, { host: "abc.xyz.com", expect: "Log in to xyz.com", }, { host: "com", expect: "Log in to com", }} func (s *formSuite) TestFormTitle(c *gc.C) { d := &formDischarger{} d.discharger = bakerytest.NewInteractiveDischarger(nil, http.HandlerFunc(d.visit)) defer d.discharger.Close() d.discharger.Mux.Handle("/form", http.HandlerFunc(d.form)) svc, err := bakery.NewService(bakery.NewServiceParams{ Locator: testLocator{ loc: d.discharger.Location(), locator: d.discharger, }, }) c.Assert(err, gc.IsNil) for i, test := range formTitleTests { c.Logf("test %d: %s", i, test.host) m, err := svc.NewMacaroon("", nil, []checkers.Caveat{{ Location: "https://" + test.host, Condition: "test condition", }}) c.Assert(err, gc.Equals, nil) client := httpbakery.NewClient() c.Logf("match %v; replace with %v", test.host, d.discharger.Location()) client.Client.Transport = httptesting.URLRewritingTransport{ MatchPrefix: "https://" + test.host, Replace: d.discharger.Location(), RoundTripper: http.DefaultTransport, } var f titleTestFiller client.WebPageVisitor = httpbakery.NewMultiVisitor( form.Visitor{ Filler: &f, }, ) ms, err := client.DischargeAll(m) c.Assert(err, gc.IsNil) c.Assert(len(ms), gc.Equals, 2) c.Assert(f.title, gc.Equals, test.expect) } } type dischargeOptions struct { ignoreAccept bool visitError bool formUnsupported bool getError bool postError bool emptySchema bool } type formDischarger struct { discharger *bakerytest.InteractiveDischarger dischargeOptions } func (d *formDischarger) visit(w http.ResponseWriter, r *http.Request) { r.ParseForm() if r.Form.Get("fallback") != "" { d.discharger.FinishInteraction(w, r, nil, nil) return } if d.ignoreAccept { w.Write([]byte("OK")) return } if r.Header.Get("Accept") != "application/json" { d.errorf(w, r, "bad accept header %q", r.Header.Get("Accept")) } if d.visitError { httprequest.WriteJSON(w, http.StatusInternalServerError, testError) d.discharger.FinishInteraction(w, r, nil, testError) return } methods := map[string]string{ form.InteractionMethod: d.discharger.HostRelativeURL("/form", r), "othermethod": d.discharger.HostRelativeURL("/visit", r), } if d.formUnsupported { delete(methods, form.InteractionMethod) } httprequest.WriteJSON(w, http.StatusOK, methods) } func (d *formDischarger) form(w http.ResponseWriter, r *http.Request) { if r.Method == "GET" { if d.getError { httprequest.WriteJSON(w, http.StatusInternalServerError, testError) d.discharger.FinishInteraction(w, r, nil, testError) return } var sr form.SchemaResponse if !d.emptySchema { sr.Schema = environschema.Fields{ "username": environschema.Attr{ Type: environschema.Tstring, }, "password": environschema.Attr{ Type: environschema.Tstring, Secret: true, }, } } httprequest.WriteJSON(w, http.StatusOK, sr) return } if r.Method != "POST" { d.errorf(w, r, "bad method %q", r.Method) return } if d.postError { httprequest.WriteJSON(w, http.StatusInternalServerError, testError) d.discharger.FinishInteraction(w, r, nil, testError) return } var lr form.LoginRequest err := httprequest.Unmarshal(httprequest.Params{Request: r}, &lr) if err != nil { d.errorf(w, r, "bad visit request: %s", err) return } d.discharger.FinishInteraction(w, r, nil, nil) } func (d *formDischarger) errorf(w http.ResponseWriter, r *http.Request, s string, p ...interface{}) { err := &httpbakery.Error{ Code: httpbakery.ErrBadRequest, Message: fmt.Sprintf(s, p...), } d.discharger.FinishInteraction(w, r, nil, err) } var testError = &httpbakery.Error{ Message: "test error", } type fillerFunc func(esform.Form) (map[string]interface{}, error) func (f fillerFunc) Fill(form esform.Form) (map[string]interface{}, error) { return f(form) } var defaultFiller = fillerFunc(func(esform.Form) (map[string]interface{}, error) { return map[string]interface{}{"test": 1}, nil }) type testLocator struct { loc string locator bakery.PublicKeyLocator } func (l testLocator) PublicKeyForLocation(loc string) (*bakery.PublicKey, error) { return l.locator.PublicKeyForLocation(l.loc) } type titleTestFiller struct { title string } func (f *titleTestFiller) Fill(form esform.Form) (map[string]interface{}, error) { f.title = form.Title return map[string]interface{}{"test": 1}, nil } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/form/package_test.go0000664000175000017500000000016612672604475025616 0ustar marcomarcopackage form_test import ( "testing" gc "gopkg.in/check.v1" ) func TestPackage(t *testing.T) { gc.TestingT(t) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/client.go0000664000175000017500000005467212677511232023503 0ustar marcomarcopackage httpbakery import ( "encoding/base64" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/http/cookiejar" "net/url" "strings" "sync" "github.com/juju/loggo" "golang.org/x/net/publicsuffix" "gopkg.in/errgo.v1" "gopkg.in/macaroon.v1" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/checkers" ) var logger = loggo.GetLogger("httpbakery") // DischargeError represents the error when a third party discharge // is refused by a server. type DischargeError struct { // Reason holds the underlying remote error that caused the // discharge to fail. Reason *Error } func (e *DischargeError) Error() string { return fmt.Sprintf("third party refused discharge: %v", e.Reason) } // IsDischargeError reports whether err is a *DischargeError. func IsDischargeError(err error) bool { _, ok := err.(*DischargeError) return ok } // InteractionError wraps an error returned by a call to visitWebPage. type InteractionError struct { // Reason holds the actual error returned from visitWebPage. Reason error } func (e *InteractionError) Error() string { return fmt.Sprintf("cannot start interactive session: %v", e.Reason) } // IsInteractionError reports whether err is an *InteractionError. func IsInteractionError(err error) bool { _, ok := err.(*InteractionError) return ok } // WaitResponse holds the type that should be returned // by an HTTP response made to a WaitURL // (See the ErrorInfo type). type WaitResponse struct { Macaroon *macaroon.Macaroon } // NewHTTPClient returns an http.Client that ensures // that headers are sent to the server even when the // server redirects a GET request. The returned client // also contains an empty in-memory cookie jar. // // See https://github.com/golang/go/issues/4677 func NewHTTPClient() *http.Client { c := *http.DefaultClient c.CheckRedirect = func(req *http.Request, via []*http.Request) error { if len(via) >= 10 { return fmt.Errorf("too many redirects") } if len(via) == 0 { return nil } for attr, val := range via[0].Header { if _, ok := req.Header[attr]; !ok { req.Header[attr] = val } } return nil } jar, err := cookiejar.New(&cookiejar.Options{ PublicSuffixList: publicsuffix.List, }) if err != nil { panic(err) } c.Jar = &cookieLogger{jar} return &c } // Client holds the context for making HTTP requests // that automatically acquire and discharge macaroons. type Client struct { // Client holds the HTTP client to use. It should have a cookie // jar configured, and when redirecting it should preserve the // headers (see NewHTTPClient). *http.Client // WebPageVisitor holds a Visitor that is called when the // discharge process requires further interaction. If this // is nil, VisitWebPage will be called; if that is also nil, no // interaction will be allowed. // // The VisitWebPage method will always be called with a map // containing a single entry with the key UserInteractionMethod, // holding the URL found in the InteractionRequired error's // VisitURL field. WebPageVisitor Visitor // VisitWebPage is called when WebPageVisitor is nil and // the discharge process requires further interaction. // // Note that this field is now deprecated in favour of // WebPageVisitor, which will take priority if set. VisitWebPage func(*url.URL) error // Key holds the client's key. If set, the client will try to // discharge third party caveats with the special location // "local" by using this key. See bakery.DischargeAllWithKey and // bakery.LocalThirdPartyCaveat for more information Key *bakery.KeyPair // DischargeAcquirer holds the object that will be used to obtain // third-party discharges. If nil, the Client itself will be used. DischargeAcquirer DischargeAcquirer } // DischargeAcquirer can be implemented by clients that want to customize the // discharge-acquisition process used by a Client. type DischargeAcquirer interface { // AcquireDischarge should return a discharge macaroon for the given third // party caveat. The firstPartyLocation holds the location of the original // macaroon. AcquireDischarge(firstPartyLocation string, cav macaroon.Caveat) (*macaroon.Macaroon, error) } // NewClient returns a new Client containing an HTTP client // created with NewHTTPClient and leaves all other fields zero. func NewClient() *Client { return &Client{ Client: NewHTTPClient(), } } // Do sends the given HTTP request and returns its response. If the // request fails with a discharge-required error, any required discharge // macaroons will be acquired, and the request will be repeated with // those attached. Do may add headers to req.Header. // // If the required discharges were refused by a third party, an error // with a *DischargeError cause will be returned. // // Note that because the request may be retried, no body may be provided // in the request, otherwise the contents will be lost when retrying. // For requests with a body (for example PUT or POST methods), use // DoWithBody instead. // // If interaction is required by the user, the visitWebPage function is // called with a URL to be opened in a web browser. If visitWebPage // returns an error, an error with a *InteractionError cause will be // returned. See OpenWebBrowser for a possible implementation of // visitWebPage. func (c *Client) Do(req *http.Request) (*http.Response, error) { if req.Body != nil { return nil, fmt.Errorf("body unexpectedly provided in request - use DoWithBody") } return c.DoWithBody(req, nil) } // DischargeAll attempts to acquire discharge macaroons for all the // third party caveats in m, and returns a slice containing all // of them bound to m. // // If the discharge fails because a third party refuses to discharge a // caveat, the returned error will have a cause of type *DischargeError. // If the discharge fails because visitWebPage returns an error, // the returned error will have a cause of *InteractionError. // // The returned macaroon slice will not be stored in the client // cookie jar (see SetCookie if you need to do that). func (c *Client) DischargeAll(m *macaroon.Macaroon) (macaroon.Slice, error) { return bakery.DischargeAllWithKey(m, c.dischargeAcquirer().AcquireDischarge, c.Key) } func (c *Client) dischargeAcquirer() DischargeAcquirer { if c.DischargeAcquirer != nil { return c.DischargeAcquirer } return c } // relativeURL returns newPath relative to an original URL. func relativeURL(base, new string) (*url.URL, error) { if new == "" { return nil, errgo.Newf("empty URL") } baseURL, err := url.Parse(base) if err != nil { return nil, errgo.Notef(err, "cannot parse URL") } newURL, err := url.Parse(new) if err != nil { return nil, errgo.Notef(err, "cannot parse URL") } return baseURL.ResolveReference(newURL), nil } // DoWithBody is like Do except that the given body is used for the body // of the HTTP request, and reset to its start by seeking if the request // is retried. It is an error if req.Body is non-zero. // // Note that, unlike the request body passed to http.NewRequest, // the body will not be closed even if implements io.Closer. // // Do may add headers to req.Header. func (c *Client) DoWithBody(req *http.Request, body io.ReadSeeker) (*http.Response, error) { return c.DoWithBodyAndCustomError(req, body, nil) } // DoWithBodyAndCustomError is like DoWithBody except it allows a client // to specify a custom error function, getError, which is called on the // HTTP response and may return a non-nil error if the response holds an // error. If the cause of the returned error is a *Error value and its // code is ErrDischargeRequired, the macaroon in its Info field will be // discharged and the request will be repeated with the discharged // macaroon. If getError returns nil, it should leave the response body // unchanged. // // If getError is nil, DefaultGetError will be used. // // This method can be useful when dealing with APIs that // return their errors in a format incompatible with Error, but the // need for it should be avoided when creating new APIs, // as it makes the endpoints less amenable to generic tools. func (c *Client) DoWithBodyAndCustomError(req *http.Request, body io.ReadSeeker, getError func(resp *http.Response) error) (*http.Response, error) { logger.Debugf("client do %s %s {", req.Method, req.URL) resp, err := c.doWithBody(req, body, getError) logger.Debugf("} -> error %#v", err) return resp, err } func (c *Client) doWithBody(req *http.Request, body io.ReadSeeker, getError func(resp *http.Response) error) (*http.Response, error) { if getError == nil { getError = DefaultGetError } if req.Body != nil { return nil, errgo.New("body unexpectedly supplied in Request struct") } if c.Client.Jar == nil { return nil, errgo.New("no cookie jar supplied in HTTP client") } if err := c.setRequestBody(req, body); err != nil { return nil, errgo.Mask(err) } req.Header.Set(BakeryProtocolHeader, fmt.Sprint(latestVersion)) httpResp, err := c.Client.Do(req) if err != nil { return nil, errgo.Mask(err, errgo.Any) } err = getError(httpResp) if err == nil { return httpResp, nil } httpResp.Body.Close() if err := c.HandleError(req.URL, err); err != nil { return nil, errgo.Mask(err, errgo.Any) } if err := c.setRequestBody(req, body); err != nil { return nil, errgo.Mask(err) } // Try again with our newly acquired discharge macaroons hresp, err := c.Client.Do(req) if err != nil { return nil, errgo.Mask(err, errgo.Any) } return hresp, nil } // HandleError tries to resolve the given error, which should be a // response to the given URL, by discharging any macaroon contained in // it. That is, if the error cause is an *Error and its code is // ErrDischargeRequired, then it will try to discharge // err.Info.Macaroon. If the discharge succeeds, the discharged macaroon // will be saved to the client's cookie jar and ResolveError will return // nil. // // For any other kind of error, the original error will be returned. func (c *Client) HandleError(reqURL *url.URL, err error) error { respErr, ok := errgo.Cause(err).(*Error) if !ok { return err } if respErr.Code != ErrDischargeRequired { return respErr } if respErr.Info == nil || respErr.Info.Macaroon == nil { return errgo.New("no macaroon found in discharge-required response") } mac := respErr.Info.Macaroon macaroons, err := bakery.DischargeAllWithKey(mac, c.dischargeAcquirer().AcquireDischarge, c.Key) if err != nil { return errgo.Mask(err, errgo.Any) } var cookiePath string if path := respErr.Info.MacaroonPath; path != "" { relURL, err := parseURLPath(path) if err != nil { logger.Warningf("ignoring invalid path in discharge-required response: %v", err) } else { cookiePath = reqURL.ResolveReference(relURL).Path } } cookie, err := NewCookie(macaroons) if err != nil { return errgo.Notef(err, "cannot make cookie") } cookie.Path = cookiePath if name := respErr.Info.CookieNameSuffix; name != "" { cookie.Name = "macaroon-" + name } c.Jar.SetCookies(reqURL, []*http.Cookie{cookie}) return nil } // DefaultGetError is the default error unmarshaler used by Client.DoWithBody. func DefaultGetError(httpResp *http.Response) error { if httpResp.StatusCode != http.StatusProxyAuthRequired && httpResp.StatusCode != http.StatusUnauthorized { return nil } // Check for the new protocol discharge error. if httpResp.StatusCode == http.StatusUnauthorized && httpResp.Header.Get("WWW-Authenticate") != "Macaroon" { return nil } if httpResp.Header.Get("Content-Type") != "application/json" { return nil } var resp Error if err := json.NewDecoder(httpResp.Body).Decode(&resp); err != nil { return fmt.Errorf("cannot unmarshal error response: %v", err) } return &resp } func parseURLPath(path string) (*url.URL, error) { u, err := url.Parse(path) if err != nil { return nil, errgo.Mask(err) } if u.Scheme != "" || u.Opaque != "" || u.User != nil || u.Host != "" || u.RawQuery != "" || u.Fragment != "" { return nil, errgo.Newf("URL path %q is not clean", path) } return u, nil } func (c *Client) setRequestBody(req *http.Request, body io.ReadSeeker) error { if body == nil { return nil } if req.Body != nil { req.Body.Close() if _, err := body.Seek(0, 0); err != nil { return errgo.Notef(err, "cannot seek to start of request body") } } // Always replace the body with a new readStopper so that // the old request cannot interfere with the new request's reader. req.Body = &readStopper{ r: body, } return nil } var errClosed = errgo.New("reader has been closed") // readStopper works around an issue with the net/http // package (see http://golang.org/issue/12796). // Because the first HTTP request might not have finished // reading from its body when it returns, we need to // ensure that the second request does not race on Read, // so this type implements a Reader that prevents all Read // calls to the underlying Reader after Close has been called. type readStopper struct { mu sync.Mutex r io.ReadSeeker } func (r *readStopper) Read(buf []byte) (int, error) { r.mu.Lock() defer r.mu.Unlock() if r.r == nil { return 0, errClosed } return r.r.Read(buf) } func (r *readStopper) Close() error { r.mu.Lock() r.r = nil r.mu.Unlock() return nil } // NewCookie takes a slice of macaroons and returns them // encoded as a cookie. The slice should contain a single primary // macaroon in its first element, and any discharges after that. func NewCookie(ms macaroon.Slice) (*http.Cookie, error) { if len(ms) == 0 { return nil, errgo.New("no macaroons in cookie") } data, err := json.Marshal(ms) if err != nil { return nil, errgo.Notef(err, "cannot marshal macaroons") } cookie := &http.Cookie{ Name: fmt.Sprintf("macaroon-%x", ms[0].Signature()), Value: base64.StdEncoding.EncodeToString(data), } cookie.Expires, _ = checkers.MacaroonsExpiryTime(ms) // TODO(rog) other fields. return cookie, nil } // SetCookie sets a cookie for the given URL on the given cookie jar // that will holds the given macaroon slice. The macaroon slice should // contain a single primary macaroon in its first element, and any // discharges after that. func SetCookie(jar http.CookieJar, url *url.URL, ms macaroon.Slice) error { cookie, err := NewCookie(ms) if err != nil { return errgo.Mask(err) } // TODO verify that setting this for the URL makes it available // to all paths under that URL. jar.SetCookies(url, []*http.Cookie{cookie}) return nil } // MacaroonsForURL returns any macaroons associated with the // given URL in the given cookie jar. func MacaroonsForURL(jar http.CookieJar, u *url.URL) []macaroon.Slice { return cookiesToMacaroons(jar.Cookies(u)) } func appendURLElem(u, elem string) string { if strings.HasSuffix(u, "/") { return u + elem } return u + "/" + elem } // AcquireDischarge implements DischargeAcquirer by requesting a discharge // macaroon from the caveat location as an HTTP URL. func (c *Client) AcquireDischarge(originalLocation string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { var resp dischargeResponse loc := appendURLElem(cav.Location, "discharge") err := postFormJSON( loc, url.Values{ "id": {cav.Id}, "location": {originalLocation}, }, &resp, c.postForm, ) if err == nil { return resp.Macaroon, nil } cause, ok := errgo.Cause(err).(*Error) if !ok { return nil, errgo.NoteMask(err, "cannot acquire discharge", IsInteractionError) } if cause.Code != ErrInteractionRequired { return nil, &DischargeError{ Reason: cause, } } if cause.Info == nil { return nil, errgo.Notef(err, "interaction-required response with no info") } m, err := c.interact(loc, cause.Info.VisitURL, cause.Info.WaitURL) if err != nil { return nil, errgo.Mask(err, IsDischargeError, IsInteractionError) } return m, nil } // interact gathers a macaroon by directing the user to interact // with a web page. func (c *Client) interact(location, visitURLStr, waitURLStr string) (*macaroon.Macaroon, error) { visitURL, err := relativeURL(location, visitURLStr) if err != nil { return nil, errgo.Notef(err, "cannot make relative visit URL") } waitURL, err := relativeURL(location, waitURLStr) if err != nil { return nil, errgo.Notef(err, "cannot make relative wait URL") } switch { case c.WebPageVisitor != nil: err = c.WebPageVisitor.VisitWebPage(c, map[string]*url.URL{ UserInteractionMethod: visitURL, }) case c.VisitWebPage != nil: err = c.VisitWebPage(visitURL) default: err = errgo.New("interaction required but not possible") } if err != nil { return nil, &InteractionError{ Reason: err, } } waitResp, err := c.Client.Get(waitURL.String()) if err != nil { return nil, errgo.Notef(err, "cannot get %q", waitURL) } defer waitResp.Body.Close() if waitResp.StatusCode != http.StatusOK { var resp Error if err := json.NewDecoder(waitResp.Body).Decode(&resp); err != nil { return nil, errgo.Notef(err, "cannot unmarshal wait error response") } dischargeErr := &DischargeError{ Reason: &resp, } return nil, errgo.NoteMask(dischargeErr, "failed to acquire macaroon after waiting", errgo.Any) } var resp WaitResponse if err := json.NewDecoder(waitResp.Body).Decode(&resp); err != nil { return nil, errgo.Notef(err, "cannot unmarshal wait response") } if resp.Macaroon == nil { return nil, errgo.New("no macaroon found in wait response") } return resp.Macaroon, nil } func (c *Client) postForm(url string, data url.Values) (*http.Response, error) { return c.post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) } func (c *Client) post(url string, bodyType string, body io.ReadSeeker) (resp *http.Response, err error) { req, err := http.NewRequest("POST", url, nil) if err != nil { return nil, err } req.Header.Set("Content-Type", bodyType) // TODO(rog) see http.shouldRedirectPost return c.DoWithBody(req, body) } // postFormJSON does an HTTP POST request to the given url with the given // values and unmarshals the response in the value pointed to be resp. // It uses the given postForm function to actually make the POST request. func postFormJSON(url string, vals url.Values, resp interface{}, postForm func(url string, vals url.Values) (*http.Response, error)) error { logger.Debugf("postFormJSON to %s; vals: %#v", url, vals) httpResp, err := postForm(url, vals) if err != nil { return errgo.NoteMask(err, fmt.Sprintf("cannot http POST to %q", url), errgo.Any) } defer httpResp.Body.Close() data, err := ioutil.ReadAll(httpResp.Body) if err != nil { return errgo.Notef(err, "failed to read body from %q", url) } if httpResp.StatusCode != http.StatusOK { var errResp Error if err := json.Unmarshal(data, &errResp); err != nil { // TODO better error here return errgo.Notef(err, "POST %q failed with status %q; cannot parse body %q", url, httpResp.Status, data) } return &errResp } if err := json.Unmarshal(data, resp); err != nil { return errgo.Notef(err, "cannot unmarshal response from %q", url) } return nil } // MacaroonsHeader is the key of the HTTP header that can be used to provide a // macaroon for request authorization. const MacaroonsHeader = "Macaroons" // RequestMacaroons returns any collections of macaroons from the header and // cookies found in the request. By convention, each slice will contain a // primary macaroon followed by its discharges. func RequestMacaroons(req *http.Request) []macaroon.Slice { mss := cookiesToMacaroons(req.Cookies()) for _, h := range req.Header[MacaroonsHeader] { ms, err := decodeMacaroonSlice(h) if err != nil { logger.Errorf("cannot retrieve macaroons from header: %v", err) } else { mss = append(mss, ms) } } return mss } // cookiesToMacaroons returns a slice of any macaroons found // in the given slice of cookies. func cookiesToMacaroons(cookies []*http.Cookie) []macaroon.Slice { var mss []macaroon.Slice for _, cookie := range cookies { if !strings.HasPrefix(cookie.Name, "macaroon-") { continue } ms, err := decodeMacaroonSlice(cookie.Value) if err != nil { logger.Errorf("cannot retrieve macaroons from cookie: %v", err) continue } mss = append(mss, ms) } return mss } // decodeMacaroonSlice decodes a base64-JSON-encoded slice of macaroons from // the given string. func decodeMacaroonSlice(value string) (macaroon.Slice, error) { data, err := base64.StdEncoding.DecodeString(value) if err != nil { return nil, errgo.NoteMask(err, "cannot base64-decode macaroons") } var ms macaroon.Slice if err := json.Unmarshal(data, &ms); err != nil { return nil, errgo.NoteMask(err, "cannot unmarshal macaroons") } return ms, nil } func isVerificationError(err error) bool { _, ok := err.(*bakery.VerificationError) return ok } // CheckRequest checks that the given http request contains at least one // valid macaroon minted by the given service, using checker to check // any first party caveats. It returns an error with a // *bakery.VerificationError cause if the macaroon verification failed. // // The assert map holds any required attributes of "declared" attributes, // overriding any inferences made from the macaroons themselves. // It has a similar effect to adding a checkers.DeclaredCaveat // for each key and value, but the error message will be more // useful. // // It adds all the standard caveat checkers to the given checker. // // It returns any attributes declared in the successfully validated request. func CheckRequest(svc *bakery.Service, req *http.Request, assert map[string]string, checker checkers.Checker) (map[string]string, error) { attrs, _, err := CheckRequestM(svc, req, assert, checker) return attrs, err } // CheckRequestM is like CheckRequest except that on success it also returns // the set of macaroons that was successfully checked. // The "M" suffix is for backward compatibility reasons - in a // later bakery version, the signature of CheckRequest will be // changed to return the macaroon slice and CheckRequestM will be // removed. func CheckRequestM(svc *bakery.Service, req *http.Request, assert map[string]string, checker checkers.Checker) (map[string]string, macaroon.Slice, error) { mss := RequestMacaroons(req) if len(mss) == 0 { return nil, nil, &bakery.VerificationError{ Reason: errgo.Newf("no macaroon cookies in request"), } } checker = checkers.New( checker, Checkers(req), checkers.TimeBefore, ) attrs, ms, err := svc.CheckAnyM(mss, assert, checker) if err != nil { return nil, nil, errgo.Mask(err, isVerificationError) } return attrs, ms, nil } type cookieLogger struct { http.CookieJar } func (j *cookieLogger) SetCookies(u *url.URL, cookies []*http.Cookie) { logger.Debugf("%p setting %d cookies for %s", j.CookieJar, len(cookies), u) for i, c := range cookies { logger.Debugf("\t%d. path %s; name %s", i, c.Path, c.Name) } j.CookieJar.SetCookies(u, cookies) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/error.go0000664000175000017500000002264512672604475023360 0ustar marcomarcopackage httpbakery import ( "net/http" "strconv" "github.com/juju/httprequest" "gopkg.in/errgo.v1" "gopkg.in/macaroon.v1" ) // ErrorCode holds an error code that classifies // an error returned from a bakery HTTP handler. type ErrorCode string func (e ErrorCode) Error() string { return string(e) } func (e ErrorCode) ErrorCode() ErrorCode { return e } const ( ErrBadRequest = ErrorCode("bad request") ErrDischargeRequired = ErrorCode("macaroon discharge required") ErrInteractionRequired = ErrorCode("interaction required") ) var ( errorMapper httprequest.ErrorMapper = ErrorToResponse handleJSON = errorMapper.HandleJSON writeError = errorMapper.WriteError ) // Error holds the type of a response from an httpbakery HTTP request, // marshaled as JSON. // // Note: Do not construct Error values with ErrDischargeRequired or // ErrInteractionRequired codes directly - use the // NewDischargeRequiredErrorForRequest or NewInteractionRequiredError // functions instead. type Error struct { Code ErrorCode `json:",omitempty"` Message string `json:",omitempty"` Info *ErrorInfo `json:",omitempty"` // version holds the protocol version that was used // to create the error (see NewDischargeRequiredErrorWithVersion). version version } // version represents a version of the bakery protocol. It is jused // to determine the kind of response to send when there is a // discharge-required error. type version int const ( version0 version = 0 version1 version = 1 latestVersion = version1 ) // ErrorInfo holds additional information provided // by an error. type ErrorInfo struct { // Macaroon may hold a macaroon that, when // discharged, may allow access to a service. // This field is associated with the ErrDischargeRequired // error code. Macaroon *macaroon.Macaroon `json:",omitempty"` // MacaroonPath holds the URL path to be associated // with the macaroon. The macaroon is potentially // valid for all URLs under the given path. // If it is empty, the macaroon will be associated with // the original URL from which the error was returned. MacaroonPath string `json:",omitempty"` // CookieNameSuffix holds the desired cookie name suffix to be // associated with the macaroon. The actual name used will be // ("macaroon-" + CookieName). Clients may ignore this field - // older clients will always use ("macaroon-" + // macaroon.Signature() in hex). CookieNameSuffix string `json:",omitempty"` // VisitURL and WaitURL are associated with the // ErrInteractionRequired error code. // VisitURL holds a URL that the client should visit // in a web browser to authenticate themselves. VisitURL string `json:",omitempty"` // WaitURL holds a URL that the client should visit // to acquire the discharge macaroon. A GET on // this URL will block until the client has authenticated, // and then it will return the discharge macaroon. WaitURL string `json:",omitempty"` } func (e *Error) Error() string { return e.Message } func (e *Error) ErrorCode() ErrorCode { return e.Code } // ErrorInfo returns additional information // about the error. // TODO return interface{} here? func (e *Error) ErrorInfo() *ErrorInfo { return e.Info } // ErrorToResponse returns the HTTP status and an error body to be // marshaled as JSON for the given error. This allows a third party // package to integrate bakery errors into their error responses when // they encounter an error with a *bakery.Error cause. func ErrorToResponse(err error) (int, interface{}) { errorBody := errorResponseBody(err) var body interface{} = errorBody status := http.StatusInternalServerError switch errorBody.Code { case ErrBadRequest: status = http.StatusBadRequest case ErrDischargeRequired, ErrInteractionRequired: switch errorBody.version { case version0: status = http.StatusProxyAuthRequired case version1: status = http.StatusUnauthorized body = httprequest.CustomHeader{ Body: body, SetHeaderFunc: setAuthenticateHeader, } default: panic("out of range version number") } } return status, body } func setAuthenticateHeader(h http.Header) { h.Set("WWW-Authenticate", "Macaroon") } type errorInfoer interface { ErrorInfo() *ErrorInfo } type errorCoder interface { ErrorCode() ErrorCode } // errorResponse returns an appropriate error // response for the provided error. func errorResponseBody(err error) *Error { var errResp Error cause := errgo.Cause(err) if cause, ok := cause.(*Error); ok { // It's an Error already. Preserve the wrapped // error message but copy everything else. errResp = *cause errResp.Message = err.Error() return &errResp } // It's not an error. Preserve as much info as // we can find. errResp.Message = err.Error() if coder, ok := cause.(errorCoder); ok { errResp.Code = coder.ErrorCode() } if infoer, ok := cause.(errorInfoer); ok { errResp.Info = infoer.ErrorInfo() } return &errResp } func badRequestErrorf(f string, a ...interface{}) error { return errgo.WithCausef(nil, ErrBadRequest, f, a...) } // WriteDischargeRequiredError creates an error using // NewDischargeRequiredError and writes it to the given response writer, // indicating that the client should discharge the macaroon to allow the // original request to be accepted. func WriteDischargeRequiredError(w http.ResponseWriter, m *macaroon.Macaroon, path string, originalErr error) { writeError(w, NewDischargeRequiredError(m, path, originalErr)) } // WriteDischargeRequiredErrorForRequest is like NewDischargeRequiredError // but uses the given request to determine the protocol version appropriate // for the client. // // This function should always be used in preference to // WriteDischargeRequiredError, because it enables // in-browser macaroon discharge. func WriteDischargeRequiredErrorForRequest(w http.ResponseWriter, m *macaroon.Macaroon, path string, originalErr error, req *http.Request) { writeError(w, NewDischargeRequiredErrorForRequest(m, path, originalErr, req)) } // NewDischargeRequiredError returns an error of type *Error that // reports the given original error and includes the given macaroon. // // The returned macaroon will be declared as valid for the given URL // path and may be relative. When the client stores the discharged // macaroon as a cookie this will be the path associated with the // cookie. See ErrorInfo.MacaroonPath for more information. func NewDischargeRequiredError(m *macaroon.Macaroon, path string, originalErr error) error { return newDischargeRequiredErrorWithVersion(m, path, originalErr, version0) } // NewInteractionRequiredError returns an error of type *Error // that requests an interaction from the client in response // to the given request. The originalErr value describes the original // error - if it is nil, a default message will be provided. // // See Error.ErrorInfo for more details of visitURL and waitURL. // // This function should be used in preference to creating the Error value // directly, as it sets the bakery protocol version correctly in the error. func NewInteractionRequiredError(visitURL, waitURL string, originalErr error, req *http.Request) error { if originalErr == nil { originalErr = ErrInteractionRequired } return &Error{ Message: originalErr.Error(), version: versionFromRequest(req), Code: ErrInteractionRequired, Info: &ErrorInfo{ VisitURL: visitURL, WaitURL: waitURL, }, } } // NewDischargeRequiredErrorForRequest is like NewDischargeRequiredError // except that it determines the client's bakery protocol version from // the request and returns an error response appropriate for that. // // This function should always be used in preference to // NewDischargeRequiredError, because it enables in-browser macaroon // discharge. // // To request a particular cookie name: // // err := NewDischargeRequiredErrorForRequest(...) // err.(*httpbakery.Error).Info.CookieNameSuffix = cookieName func NewDischargeRequiredErrorForRequest(m *macaroon.Macaroon, path string, originalErr error, req *http.Request) error { v := versionFromRequest(req) return newDischargeRequiredErrorWithVersion(m, path, originalErr, v) } // newDischargeRequiredErrorWithVersion is the internal version of NewDischargeRequiredErrorForRequest. func newDischargeRequiredErrorWithVersion(m *macaroon.Macaroon, path string, originalErr error, v version) error { if originalErr == nil { originalErr = ErrDischargeRequired } return &Error{ Message: originalErr.Error(), version: v, Code: ErrDischargeRequired, Info: &ErrorInfo{ Macaroon: m, MacaroonPath: path, }, } } // BakeryProtocolHeader is the header that HTTP clients should set // to determine the bakery protocol version. If it is 0 or missing, // a discharge-required error response will be returned with HTTP status 407; // if it is 1, the response will have status 401 with the WWW-Authenticate // header set to "Macaroon". const BakeryProtocolHeader = "Bakery-Protocol-Version" // versionFromRequest determines the bakery protocol version from a client // request. If the protocol cannot be determined, or is invalid, // the original version of the protocol is used. func versionFromRequest(req *http.Request) version { vs := req.Header.Get(BakeryProtocolHeader) if vs == "" { // No header - use backward compatibility mode. return version0 } v, err := strconv.Atoi(vs) if err != nil || version(v) < 0 || version(v) > latestVersion { // Badly formed header - use backward compatibility mode. return version0 } return version(v) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/client_test.go0000664000175000017500000007612112672604475024542 0ustar marcomarcopackage httpbakery_test import ( "encoding/base64" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/http/cookiejar" "net/http/httptest" "net/url" "sort" "strings" "sync" "time" "github.com/juju/httprequest" jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/macaroon.v1" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/bakerytest" "gopkg.in/macaroon-bakery.v1/httpbakery" ) type ClientSuite struct { jujutesting.LoggingSuite } var _ = gc.Suite(&ClientSuite{}) // TestSingleServiceFirstParty creates a single service // with a macaroon with one first party caveat. // It creates a request with this macaroon and checks that the service // can verify this macaroon as valid. func (s *ClientSuite) TestSingleServiceFirstParty(c *gc.C) { // Create a target service. svc := newService("loc", nil) // No discharge required, so pass "unknown" for the third party // caveat discharger location so we know that we don't try // to discharge the location. ts := httptest.NewServer(serverHandler(serverHandlerParams{ service: svc, authLocation: "unknown", })) defer ts.Close() // Mint a macaroon for the target service. serverMacaroon, err := svc.NewMacaroon("", nil, nil) c.Assert(err, gc.IsNil) c.Assert(serverMacaroon.Location(), gc.Equals, "loc") err = svc.AddCaveat(serverMacaroon, checkers.Caveat{ Condition: "is something", }) c.Assert(err, gc.IsNil) // Create a client request. req, err := http.NewRequest("GET", ts.URL, nil) c.Assert(err, gc.IsNil) client := clientRequestWithCookies(c, ts.URL, macaroon.Slice{serverMacaroon}) // Somehow the client has accquired the macaroon. Add it to the cookiejar in our request. // Make the request to the server. resp, err := client.Do(req) c.Assert(err, gc.IsNil) defer resp.Body.Close() assertResponse(c, resp, "done") } func (s *ClientSuite) TestSingleServiceFirstPartyWithHeader(c *gc.C) { // Create a target service. svc := newService("loc", nil) // No discharge required, so pass "unknown" for the third party // caveat discharger location so we know that we don't try // to discharge the location. ts := httptest.NewServer(serverHandler(serverHandlerParams{ service: svc, authLocation: "unknown", })) defer ts.Close() // Mint a macaroon for the target service. serverMacaroon, err := svc.NewMacaroon("", nil, nil) c.Assert(err, gc.IsNil) c.Assert(serverMacaroon.Location(), gc.Equals, "loc") err = svc.AddCaveat(serverMacaroon, checkers.Caveat{ Condition: "is something", }) c.Assert(err, gc.IsNil) // Serialize the macaroon slice. data, err := json.Marshal(macaroon.Slice{serverMacaroon}) c.Assert(err, gc.IsNil) value := base64.StdEncoding.EncodeToString(data) // Create a client request. req, err := http.NewRequest("GET", ts.URL, nil) c.Assert(err, gc.IsNil) req.Header.Set(httpbakery.MacaroonsHeader, value) client := httpbakery.NewHTTPClient() // Make the request to the server. resp, err := client.Do(req) c.Assert(err, gc.IsNil) defer resp.Body.Close() assertResponse(c, resp, "done") } func (s *ClientSuite) TestRepeatedRequestWithBody(c *gc.C) { d := bakerytest.NewDischarger(nil, noCaveatChecker) defer d.Close() // Create a target service. svc := newService("loc", d) ts := httptest.NewServer(serverHandler(serverHandlerParams{ service: svc, authLocation: d.Location(), })) defer ts.Close() // Create a client request. req, err := http.NewRequest("POST", ts.URL, nil) c.Assert(err, gc.IsNil) // Make the request to the server. // First try with a body in the request, which should be denied // because we must use DoWithBody. req.Body = ioutil.NopCloser(strings.NewReader("postbody")) resp, err := httpbakery.NewClient().Do(req) c.Assert(err, gc.ErrorMatches, "body unexpectedly provided in request - use DoWithBody") c.Assert(resp, gc.IsNil) // Then try with no authorization, so make sure that httpbakery.Do // really will retry the request. req.Body = nil bodyText := "postbody" bodyReader := &readCounter{ReadSeeker: strings.NewReader(bodyText)} resp, err = httpbakery.NewClient().DoWithBody(req, bodyReader) c.Assert(err, gc.IsNil) defer resp.Body.Close() assertResponse(c, resp, "done postbody") // Sanity check that the body really was read twice and hence // that we are checking the logic we intend to check. c.Assert(bodyReader.byteCount, gc.Equals, len(bodyText)*2) } func (s ClientSuite) TestWithLargeBody(c *gc.C) { // This test is designed to fail when run with the race // checker enabled and when go issue #12796 // is not fixed. d := bakerytest.NewDischarger(nil, noCaveatChecker) defer d.Close() // Create a target service. svc := newService("loc", d) ts := httptest.NewServer(serverHandler(serverHandlerParams{ service: svc, authLocation: d.Location(), })) defer ts.Close() // Create a client request. req, err := http.NewRequest("POST", ts.URL+"/no-body", nil) c.Assert(err, gc.IsNil) resp, err := httpbakery.NewClient().DoWithBody(req, &largeReader{total: 3 * 1024 * 1024}) c.Assert(err, gc.IsNil) c.Assert(resp.StatusCode, gc.Equals, http.StatusOK) } // largeReader implements a reader that produces up to total bytes // in 1 byte reads. type largeReader struct { total int n int } func (r *largeReader) Read(buf []byte) (int, error) { if r.n >= r.total { return 0, io.EOF } r.n++ return copy(buf, []byte("a")), nil } func (r *largeReader) Seek(offset int64, whence int) (int64, error) { if offset != 0 || whence != 0 { panic("unexpected seek") } r.n = 0 return 0, nil } func (s *ClientSuite) TestDoWithBodyFailsWithBodyInRequest(c *gc.C) { body := strings.NewReader("foo") // Create a client request. req, err := http.NewRequest("POST", "http://0.1.2.3/", body) c.Assert(err, gc.IsNil) _, err = httpbakery.NewClient().DoWithBody(req, body) c.Assert(err, gc.ErrorMatches, "body unexpectedly supplied in Request struct") } func (s *ClientSuite) TestDischargeServerWithMacaraqOnDischarge(c *gc.C) { locator := bakery.NewPublicKeyRing() var called [3]int // create the services from leaf discharger to primary // service so that each one can know the location // to discharge at. key2, h2 := newHTTPDischarger(locator, func(svc *bakery.Service, req *http.Request, cavId, cav string) ([]checkers.Caveat, error) { called[2]++ if cav != "is-ok" { return nil, fmt.Errorf("unrecognized caveat at srv2") } return nil, nil }) srv2 := httptest.NewServer(h2) locator.AddPublicKeyForLocation(srv2.URL, true, key2) key1, h1 := newHTTPDischarger(locator, func(svc *bakery.Service, req *http.Request, cavId, cav string) ([]checkers.Caveat, error) { called[1]++ if _, err := httpbakery.CheckRequest(svc, req, nil, checkers.New()); err != nil { return nil, newDischargeRequiredError(serverHandlerParams{ service: svc, authLocation: srv2.URL, }, err, req) } if cav != "is-ok" { return nil, fmt.Errorf("unrecognized caveat at srv1") } return nil, nil }) srv1 := httptest.NewServer(h1) locator.AddPublicKeyForLocation(srv1.URL, true, key1) svc0 := newService("loc", locator) srv0 := httptest.NewServer(serverHandler(serverHandlerParams{ service: svc0, authLocation: srv1.URL, })) // Make a client request. client := httpbakery.NewClient() req, err := http.NewRequest("GET", srv0.URL, nil) c.Assert(err, gc.IsNil) resp, err := client.Do(req) c.Assert(err, gc.IsNil) defer resp.Body.Close() assertResponse(c, resp, "done") c.Assert(called, gc.DeepEquals, [3]int{0, 2, 1}) } func (s *ClientSuite) TestVersion0Generates407Status(c *gc.C) { m, err := macaroon.New([]byte("root key"), "id", "location") c.Assert(err, gc.IsNil) srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { httpbakery.WriteDischargeRequiredErrorForRequest(w, m, "", errgo.New("foo"), req) })) defer srv.Close() resp, err := http.Get(srv.URL) c.Assert(err, gc.IsNil) c.Assert(resp.StatusCode, gc.Equals, http.StatusProxyAuthRequired) } func (s *ClientSuite) TestVersion1Generates401Status(c *gc.C) { m, err := macaroon.New([]byte("root key"), "id", "location") c.Assert(err, gc.IsNil) srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { httpbakery.WriteDischargeRequiredErrorForRequest(w, m, "", errgo.New("foo"), req) })) defer srv.Close() req, err := http.NewRequest("GET", srv.URL, nil) c.Assert(err, gc.IsNil) req.Header.Set(httpbakery.BakeryProtocolHeader, "1") resp, err := http.DefaultClient.Do(req) c.Assert(err, gc.IsNil) c.Assert(resp.StatusCode, gc.Equals, http.StatusUnauthorized) c.Assert(resp.Header.Get("WWW-Authenticate"), gc.Equals, "Macaroon") } func newHTTPDischarger(locator bakery.PublicKeyLocator, checker func(svc *bakery.Service, req *http.Request, cavId, cav string) ([]checkers.Caveat, error)) (*bakery.PublicKey, http.Handler) { svc := newService("loc", locator) mux := http.NewServeMux() httpbakery.AddDischargeHandler(mux, "/", svc, func(req *http.Request, cavId, cav string) ([]checkers.Caveat, error) { return checker(svc, req, cavId, cav) }) return svc.PublicKey(), mux } func (s *ClientSuite) TestDischargeAcquirer(c *gc.C) { rootKey := []byte("secret") m, err := macaroon.New(rootKey, "", "here") c.Assert(err, gc.IsNil) dischargeRootKey := []byte("shared root key") thirdPartyCaveatId := "3rd party caveat" err = m.AddThirdPartyCaveat(dischargeRootKey, thirdPartyCaveatId, "there") c.Assert(err, gc.IsNil) dm, err := macaroon.New(dischargeRootKey, thirdPartyCaveatId, "there") c.Assert(err, gc.IsNil) ta := &testAcquirer{dischargeMacaroon: dm} cl := httpbakery.NewClient() cl.DischargeAcquirer = ta ms, err := cl.DischargeAll(m) c.Assert(err, gc.IsNil) c.Assert(ms, gc.HasLen, 2) c.Assert(ta.acquireLocation, gc.Equals, "here") // should be first-party location c.Assert(ta.acquireCaveat.Id, gc.Equals, thirdPartyCaveatId) expectCaveat := "must foo" var lastCaveat string err = ms[0].Verify(rootKey, func(s string) error { if s != expectCaveat { return errgo.Newf(`expected %q, got %q`, expectCaveat, s) } lastCaveat = s return nil }, ms[1:]) c.Assert(err, gc.IsNil) c.Assert(lastCaveat, gc.Equals, expectCaveat) } type testAcquirer struct { dischargeMacaroon *macaroon.Macaroon acquireLocation string acquireCaveat macaroon.Caveat } // AcquireDischarge implements httpbakery.DischargeAcquirer. func (ta *testAcquirer) AcquireDischarge(loc string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { ta.acquireLocation = loc ta.acquireCaveat = cav err := ta.dischargeMacaroon.AddFirstPartyCaveat("must foo") if err != nil { return nil, err } return ta.dischargeMacaroon, nil } // onceOnlyChecker returns a third-party checker that accepts any given // caveat id once only. func onceOnlyChecker() func(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { checked := make(map[string]bool) var mu sync.Mutex return func(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { mu.Lock() defer mu.Unlock() id := cond + " " + arg if checked[id] { return nil, errgo.Newf("caveat %q fails second time", id) } checked[id] = true return nil, nil } } func (s *ClientSuite) TestMacaroonCookieName(c *gc.C) { d := bakerytest.NewDischarger(nil, noCaveatChecker) defer d.Close() svc := newService("loc", nil) // We arrange things so that although we use the same client // (with the same cookie jar), the macaroon verification only // succeeds once, so the client always fetches a new macaroon. caveatSeq := 0 checked := make(map[string]bool) cookieName := "" ts := httptest.NewServer(serverHandler(serverHandlerParams{ service: svc, mutateError: func(e *httpbakery.Error) { e.Info.CookieNameSuffix = cookieName e.Info.MacaroonPath = "/" }, checker: checkers.CheckerFunc{ Condition_: "once", Check_: func(_, arg string) error { if checked[arg] { return errgo.Newf("caveat %q has already been checked once", arg) } checked[arg] = true return nil }, }, caveats: func() []checkers.Caveat { caveatSeq++ return []checkers.Caveat{{ Condition: fmt.Sprintf("once %d", caveatSeq), }} }, })) defer ts.Close() client := httpbakery.NewClient() doRequest := func() { req, err := http.NewRequest("GET", ts.URL+"/foo/bar/", nil) c.Assert(err, gc.IsNil) resp, err := client.Do(req) c.Assert(err, gc.IsNil) defer resp.Body.Close() assertResponse(c, resp, "done") } assertCookieNames := func(names ...string) { u, err := url.Parse(ts.URL) c.Assert(err, gc.IsNil) sort.Strings(names) var gotNames []string for _, c := range client.Jar.Cookies(u) { gotNames = append(gotNames, c.Name) } sort.Strings(gotNames) c.Assert(gotNames, jc.DeepEquals, names) } cookieName = "foo" doRequest() assertCookieNames("macaroon-foo") // Another request with the same cookie name should // overwrite the old cookie. doRequest() assertCookieNames("macaroon-foo") // A subsequent request with a different cookie name // should create a new cookie, but the old one will still // be around. cookieName = "bar" doRequest() assertCookieNames("macaroon-foo", "macaroon-bar") } func (s *ClientSuite) TestMacaroonCookiePath(c *gc.C) { svc := newService("loc", nil) cookiePath := "" ts := httptest.NewServer(serverHandler(serverHandlerParams{ service: svc, mutateError: func(e *httpbakery.Error) { e.Info.MacaroonPath = cookiePath }, })) defer ts.Close() var client *httpbakery.Client doRequest := func() { req, err := http.NewRequest("GET", ts.URL+"/foo/bar/", nil) c.Assert(err, gc.IsNil) client = httpbakery.NewClient() resp, err := client.Do(req) c.Assert(err, gc.IsNil) defer resp.Body.Close() assertResponse(c, resp, "done") } assertCookieCount := func(path string, n int) { u, err := url.Parse(ts.URL + path) c.Assert(err, gc.IsNil) c.Assert(client.Jar.Cookies(u), gc.HasLen, n) } cookiePath = "" c.Logf("- cookie path %q", cookiePath) doRequest() assertCookieCount("", 0) assertCookieCount("/foo", 0) assertCookieCount("/foo", 0) assertCookieCount("/foo/", 0) assertCookieCount("/foo/bar/", 1) assertCookieCount("/foo/bar/baz", 1) cookiePath = "/foo/" c.Logf("- cookie path %q", cookiePath) doRequest() assertCookieCount("", 0) assertCookieCount("/foo", 0) assertCookieCount("/foo/", 1) assertCookieCount("/foo/bar/", 1) assertCookieCount("/foo/bar/baz", 1) cookiePath = "/foo" c.Logf("- cookie path %q", cookiePath) doRequest() assertCookieCount("", 0) assertCookieCount("/bar", 0) assertCookieCount("/foo", 1) assertCookieCount("/foo/", 1) assertCookieCount("/foo/bar/", 1) assertCookieCount("/foo/bar/baz", 1) cookiePath = "../" c.Logf("- cookie path %q", cookiePath) doRequest() assertCookieCount("", 0) assertCookieCount("/bar", 0) assertCookieCount("/foo", 0) assertCookieCount("/foo/", 1) assertCookieCount("/foo/bar/", 1) assertCookieCount("/foo/bar/baz", 1) cookiePath = "../bar" c.Logf("- cookie path %q", cookiePath) doRequest() assertCookieCount("", 0) assertCookieCount("/bar", 0) assertCookieCount("/foo", 0) assertCookieCount("/foo/", 0) assertCookieCount("/foo/bar/", 1) assertCookieCount("/foo/bar/baz", 1) assertCookieCount("/foo/baz", 0) assertCookieCount("/foo/baz/", 0) assertCookieCount("/foo/baz/bar", 0) cookiePath = "/" c.Logf("- cookie path %q", cookiePath) doRequest() assertCookieCount("", 1) assertCookieCount("/bar", 1) assertCookieCount("/foo", 1) assertCookieCount("/foo/", 1) assertCookieCount("/foo/bar/", 1) assertCookieCount("/foo/bar/baz", 1) } func (s *ClientSuite) TestThirdPartyDischargeRefused(c *gc.C) { d := bakerytest.NewDischarger(nil, func(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { return nil, errgo.New("boo! cond " + cond) }) defer d.Close() // Create a target service. svc := newService("loc", d) ts := httptest.NewServer(serverHandler(serverHandlerParams{ service: svc, authLocation: d.Location(), })) defer ts.Close() // Create a client request. req, err := http.NewRequest("GET", ts.URL, nil) c.Assert(err, gc.IsNil) client := httpbakery.NewClient() // Make the request to the server. resp, err := client.Do(req) c.Assert(errgo.Cause(err), gc.FitsTypeOf, (*httpbakery.DischargeError)(nil)) c.Assert(err, gc.ErrorMatches, `cannot get discharge from ".*": third party refused discharge: cannot discharge: boo! cond is-ok`) c.Assert(resp, gc.IsNil) } func (s *ClientSuite) TestDischargeWithInteractionRequiredError(c *gc.C) { d := bakerytest.NewDischarger(nil, func(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { return nil, &httpbakery.Error{ Code: httpbakery.ErrInteractionRequired, Message: "interaction required", Info: &httpbakery.ErrorInfo{ VisitURL: "http://0.1.2.3/", WaitURL: "http://0.1.2.3/", }, } }) defer d.Close() // Create a target service. svc := newService("loc", d) ts := httptest.NewServer(serverHandler(serverHandlerParams{ service: svc, authLocation: d.Location(), })) defer ts.Close() // Create a client request. req, err := http.NewRequest("GET", ts.URL, nil) c.Assert(err, gc.IsNil) errCannotVisit := errgo.New("cannot visit") client := httpbakery.NewClient() client.VisitWebPage = func(*url.URL) error { return errCannotVisit } // Make the request to the server. resp, err := client.Do(req) c.Assert(err, gc.ErrorMatches, `cannot get discharge from "https://.*": cannot start interactive session: cannot visit`) c.Assert(httpbakery.IsInteractionError(errgo.Cause(err)), gc.Equals, true) ierr, ok := errgo.Cause(err).(*httpbakery.InteractionError) c.Assert(ok, gc.Equals, true) c.Assert(ierr.Reason, gc.Equals, errCannotVisit) c.Assert(resp, gc.IsNil) } func (s *ClientSuite) TestDischargeWithInteractionRequiredErrorAndWebPageVisitor(c *gc.C) { d := bakerytest.NewDischarger(nil, func(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { return nil, &httpbakery.Error{ Code: httpbakery.ErrInteractionRequired, Message: "interaction required", Info: &httpbakery.ErrorInfo{ VisitURL: "http://0.1.2.3/", WaitURL: "http://0.1.2.3/", }, } }) defer d.Close() // Create a target service. svc := newService("loc", d) ts := httptest.NewServer(serverHandler(serverHandlerParams{ service: svc, authLocation: d.Location(), })) defer ts.Close() // Create a client request. req, err := http.NewRequest("GET", ts.URL, nil) c.Assert(err, gc.IsNil) errCannotVisit := errgo.New("cannot visit") client := httpbakery.NewClient() client.WebPageVisitor = visitorFunc(func(_ *httpbakery.Client, m map[string]*url.URL) error { return errCannotVisit }) // Make the request to the server. resp, err := client.Do(req) c.Assert(err, gc.ErrorMatches, `cannot get discharge from "https://.*": cannot start interactive session: cannot visit`) c.Assert(httpbakery.IsInteractionError(errgo.Cause(err)), gc.Equals, true) ierr, ok := errgo.Cause(err).(*httpbakery.InteractionError) c.Assert(ok, gc.Equals, true) c.Assert(ierr.Reason, gc.Equals, errCannotVisit) c.Assert(resp, gc.IsNil) } var dischargeWithVisitURLErrorTests = []struct { about string respond func(http.ResponseWriter) expectError string }{{ about: "error message", respond: func(w http.ResponseWriter) { httprequest.ErrorMapper(httpbakery.ErrorToResponse).WriteError(w, fmt.Errorf("an error")) }, expectError: `cannot get discharge from ".*": failed to acquire macaroon after waiting: third party refused discharge: an error`, }, { about: "non-JSON error", respond: func(w http.ResponseWriter) { w.Write([]byte("bad response")) }, // TODO fix this unhelpful error message expectError: `cannot get discharge from ".*": cannot unmarshal wait response: invalid character 'b' looking for beginning of value`, }} func (s *ClientSuite) TestDischargeWithVisitURLError(c *gc.C) { visitor := newVisitHandler(nil) visitSrv := httptest.NewServer(visitor) defer visitSrv.Close() d := bakerytest.NewDischarger(nil, func(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { return nil, &httpbakery.Error{ Code: httpbakery.ErrInteractionRequired, Message: "interaction required", Info: &httpbakery.ErrorInfo{ VisitURL: visitSrv.URL + "/visit", WaitURL: visitSrv.URL + "/wait", }, } }) defer d.Close() // Create a target service. svc := newService("loc", d) ts := httptest.NewServer(serverHandler(serverHandlerParams{ service: svc, authLocation: d.Location(), })) defer ts.Close() for i, test := range dischargeWithVisitURLErrorTests { c.Logf("test %d: %s", i, test.about) visitor.respond = test.respond client := httpbakery.NewClient() client.VisitWebPage = func(u *url.URL) error { resp, err := http.Get(u.String()) if err != nil { return err } resp.Body.Close() return nil } // Create a client request. req, err := http.NewRequest("GET", ts.URL, nil) c.Assert(err, gc.IsNil) // Make the request to the server. _, err = client.Do(req) c.Assert(err, gc.ErrorMatches, test.expectError) } } func (s *ClientSuite) TestMacaroonsForURL(c *gc.C) { // Create a target service. svc := newService("loc", nil) m1, err := svc.NewMacaroon("id1", []byte("key1"), nil) c.Assert(err, gc.IsNil) m2, err := svc.NewMacaroon("id2", []byte("key2"), nil) c.Assert(err, gc.IsNil) u1 := mustParseURL("http://0.1.2.3/") u2 := mustParseURL("http://0.1.2.3/x/") // Create some cookies with different cookie paths. jar, err := cookiejar.New(nil) c.Assert(err, gc.IsNil) httpbakery.SetCookie(jar, u1, macaroon.Slice{m1}) httpbakery.SetCookie(jar, u2, macaroon.Slice{m2}) jar.SetCookies(u1, []*http.Cookie{{ Name: "foo", Path: "/", Value: "ignored", }, { Name: "bar", Path: "/x/", Value: "ignored", }}) // Check that MacaroonsForURL behaves correctly // with both single and multiple cookies. mss := httpbakery.MacaroonsForURL(jar, u1) c.Assert(mss, gc.HasLen, 1) c.Assert(mss[0], gc.HasLen, 1) c.Assert(mss[0][0].Id(), gc.Equals, "id1") mss = httpbakery.MacaroonsForURL(jar, u2) checked := make(map[string]int) for _, ms := range mss { checked[ms[0].Id()]++ err := svc.Check(ms, checkers.New()) c.Assert(err, gc.IsNil) } c.Assert(checked, jc.DeepEquals, map[string]int{ "id1": 1, "id2": 1, }) } func (s *ClientSuite) TestDoWithBodyAndCustomError(c *gc.C) { d := bakerytest.NewDischarger(nil, noCaveatChecker) defer d.Close() // Create a target service. svc := newService("loc", d) type customError struct { CustomError *httpbakery.Error } callCount := 0 handler := func(w http.ResponseWriter, req *http.Request) { callCount++ if _, checkErr := httpbakery.CheckRequest(svc, req, nil, checkers.New()); checkErr != nil { httprequest.WriteJSON(w, http.StatusTeapot, customError{ CustomError: newDischargeRequiredError(serverHandlerParams{ service: svc, authLocation: d.Location(), }, checkErr, req).(*httpbakery.Error), }) return } fmt.Fprintf(w, "hello there") } srv := httptest.NewServer(http.HandlerFunc(handler)) defer srv.Close() req, err := http.NewRequest("GET", srv.URL, nil) c.Assert(err, gc.IsNil) // First check that a normal request fails. resp, err := httpbakery.NewClient().Do(req) c.Assert(err, gc.IsNil) defer resp.Body.Close() c.Assert(resp.StatusCode, gc.Equals, http.StatusTeapot) c.Assert(callCount, gc.Equals, 1) callCount = 0 // Then check that a request with a custom error getter succeeds. errorGetter := func(resp *http.Response) error { if resp.StatusCode != http.StatusTeapot { return nil } data, err := ioutil.ReadAll(resp.Body) if err != nil { panic(err) } var respErr customError if err := json.Unmarshal(data, &respErr); err != nil { panic(err) } return respErr.CustomError } resp, err = httpbakery.NewClient().DoWithBodyAndCustomError(req, nil, errorGetter) c.Assert(err, gc.IsNil) data, err := ioutil.ReadAll(resp.Body) c.Assert(err, gc.IsNil) c.Assert(string(data), gc.Equals, "hello there") c.Assert(callCount, gc.Equals, 2) } func (s *ClientSuite) TestHandleError(c *gc.C) { d := bakerytest.NewDischarger(nil, noCaveatChecker) defer d.Close() // Create a target service. svc := newService("loc", d) srv := httptest.NewServer(serverHandler(serverHandlerParams{ service: svc, authLocation: "unknown", mutateError: nil, })) defer srv.Close() m, err := svc.NewMacaroon("", nil, []checkers.Caveat{{ Location: d.Location(), Condition: "something", }}) c.Assert(err, gc.IsNil) u, err := url.Parse(srv.URL + "/bar") c.Assert(err, gc.IsNil) respErr := &httpbakery.Error{ Message: "an error", Code: httpbakery.ErrDischargeRequired, Info: &httpbakery.ErrorInfo{ Macaroon: m, MacaroonPath: "/foo", }, } client := httpbakery.NewClient() err = client.HandleError(u, respErr) c.Assert(err, gc.Equals, nil) // No cookies at the original location. c.Assert(client.Client.Jar.Cookies(u), gc.HasLen, 0) u.Path = "/foo" cookies := client.Client.Jar.Cookies(u) c.Assert(cookies, gc.HasLen, 1) // Check that we can actually make a request // with the newly acquired macaroon cookies. req, err := http.NewRequest("GET", srv.URL+"/foo", nil) c.Assert(err, gc.IsNil) resp, err := client.Do(req) c.Assert(err, gc.IsNil) resp.Body.Close() c.Assert(resp.StatusCode, gc.Equals, http.StatusOK) } func (s *ClientSuite) TestHandleErrorDifferentError(c *gc.C) { berr := &httpbakery.Error{ Message: "an error", Code: "another code", } client := httpbakery.NewClient() err := client.HandleError(&url.URL{}, berr) c.Assert(err, gc.Equals, berr) } func (s *ClientSuite) TestNewCookieExpires(c *gc.C) { t := time.Now().Add(24 * time.Hour) svc := newService("loc", nil) m, err := svc.NewMacaroon("", nil, []checkers.Caveat{ checkers.TimeBeforeCaveat(t), }) c.Assert(err, gc.IsNil) cookie, err := httpbakery.NewCookie(macaroon.Slice{m}) c.Assert(err, gc.IsNil) c.Assert(cookie.Expires.Equal(t), gc.Equals, true, gc.Commentf("obtained: %s, expected: %s", cookie.Expires, t)) } func mustParseURL(s string) *url.URL { u, err := url.Parse(s) if err != nil { panic(err) } return u } type visitHandler struct { mux *http.ServeMux rendez chan struct{} respond func(w http.ResponseWriter) } func newVisitHandler(respond func(http.ResponseWriter)) *visitHandler { h := &visitHandler{ rendez: make(chan struct{}, 1), respond: respond, mux: http.NewServeMux(), } h.mux.HandleFunc("/visit", h.serveVisit) h.mux.HandleFunc("/wait", h.serveWait) return h } func (h *visitHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { h.mux.ServeHTTP(w, req) } func (h *visitHandler) serveVisit(w http.ResponseWriter, req *http.Request) { h.rendez <- struct{}{} } func (h *visitHandler) serveWait(w http.ResponseWriter, req *http.Request) { <-h.rendez h.respond(w) } // assertResponse asserts that the given response is OK and contains // the expected body text. func assertResponse(c *gc.C, resp *http.Response, expectBody string) { c.Assert(resp.StatusCode, gc.Equals, http.StatusOK) body, err := ioutil.ReadAll(resp.Body) c.Assert(err, gc.IsNil) c.Assert(string(body), gc.DeepEquals, expectBody) } func noVisit(*url.URL) error { return fmt.Errorf("should not be visiting") } type readCounter struct { io.ReadSeeker byteCount int } func (r *readCounter) Read(buf []byte) (int, error) { n, err := r.ReadSeeker.Read(buf) r.byteCount += n return n, err } func newService(location string, locator bakery.PublicKeyLocator) *bakery.Service { svc, err := bakery.NewService(bakery.NewServiceParams{ Location: location, Locator: locator, }) if err != nil { panic(err) } return svc } func clientRequestWithCookies(c *gc.C, u string, macaroons macaroon.Slice) *http.Client { client := httpbakery.NewHTTPClient() url, err := url.Parse(u) c.Assert(err, gc.IsNil) err = httpbakery.SetCookie(client.Jar, url, macaroons) c.Assert(err, gc.IsNil) return client } var handleErrors = httprequest.ErrorMapper(httpbakery.ErrorToResponse).HandleErrors type serverHandlerParams struct { // service holds the service that will be used to check incoming // requests. service *bakery.Service // checker is used to check first party caveats in macaroons. // If it is nil, isChecker("something") will be used. checker checkers.Checker // authLocation holds the location of any 3rd party authorizer. // If this is non-empty, a 3rd party caveat will be added // addressed to this location. authLocation string // When authLocation is non-empty and thirdPartyCondition // is non-zero, it will be called to determine the condition // to address to he third party. thirdPartyCondition func() string // mutateError, if non-zero, will be called with any // discharge-required error before responding // to the client. mutateError func(*httpbakery.Error) // If caveats is non-nil, it is called to get caveats to // add to the returned macaroon. caveats func() []checkers.Caveat } // serverHandler returns an HTTP handler that checks macaroon authorization // and, if that succeeds, writes the string "done" and echos anything in the // request body. // It recognises the single first party caveat "is something". func serverHandler(hp serverHandlerParams) http.Handler { if hp.checker == nil { hp.checker = isChecker("something") } h := handleErrors(func(p httprequest.Params) error { if _, checkErr := httpbakery.CheckRequest(hp.service, p.Request, nil, hp.checker); checkErr != nil { return newDischargeRequiredError(hp, checkErr, p.Request) } fmt.Fprintf(p.Response, "done") // Special case: the no-body path doesn't return the body. if p.Request.URL.Path == "/no-body" { return nil } data, err := ioutil.ReadAll(p.Request.Body) if err != nil { panic(fmt.Errorf("cannot read body: %v", err)) } if len(data) > 0 { fmt.Fprintf(p.Response, " %s", data) } return nil }) return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { h(w, req, nil) }) } // newDischargeRequiredError returns a discharge-required error holding // a newly minted macaroon referencing the original check error // checkErr. If hp.authLocation is non-empty, the issued macaroon will // contain an "is-ok" third party caveat addressed to that location. // // If req is non-nil, it will be used to pass to NewDischargeRequiredErrorForRequest, // otherwise the old protocol (triggered by NewDischargeRequiredError) will be used. func newDischargeRequiredError(hp serverHandlerParams, checkErr error, req *http.Request) error { var caveats []checkers.Caveat if hp.authLocation != "" { caveats = []checkers.Caveat{{ Location: hp.authLocation, Condition: "is-ok", }} } if hp.caveats != nil { caveats = append(caveats, hp.caveats()...) } m, err := hp.service.NewMacaroon("", nil, caveats) if err != nil { panic(fmt.Errorf("cannot make new macaroon: %v", err)) } if req != nil { err = httpbakery.NewDischargeRequiredErrorForRequest(m, "", checkErr, req) } else { err = httpbakery.NewDischargeRequiredError(m, "", checkErr) } if hp.mutateError != nil { hp.mutateError(err.(*httpbakery.Error)) } return err } type isChecker string func (isChecker) Condition() string { return "is" } func (c isChecker) Check(_, arg string) error { if arg != string(c) { return fmt.Errorf("%v doesn't match %s", arg, c) } return nil } func noCaveatChecker(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { return nil, nil } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/keyring_test.go0000664000175000017500000001057312672604475024733 0ustar marcomarcopackage httpbakery_test import ( "fmt" "net/http" "net/http/httptest" "net/http/httputil" "net/url" jujutesting "github.com/juju/testing" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakerytest" "gopkg.in/macaroon-bakery.v1/httpbakery" ) type KeyringSuite struct { jujutesting.LoggingSuite } var _ = gc.Suite(&KeyringSuite{}) func (s *KeyringSuite) TestCachePrepopulated(c *gc.C) { cache := bakery.NewPublicKeyRing() key, err := bakery.GenerateKey() c.Assert(err, gc.IsNil) cache.AddPublicKeyForLocation("https://0.1.2.3/", true, &key.Public) kr := httpbakery.NewPublicKeyRing(nil, cache) pk, err := kr.PublicKeyForLocation("https://0.1.2.3/") c.Assert(err, gc.IsNil) c.Assert(*pk, gc.Equals, key.Public) } func (s *KeyringSuite) TestCacheMiss(c *gc.C) { d := bakerytest.NewDischarger(nil, nil) defer d.Close() kr := httpbakery.NewPublicKeyRing(nil, nil) expectPublicKey := d.Service.PublicKey() pk, err := kr.PublicKeyForLocation(d.Location()) c.Assert(err, gc.IsNil) c.Assert(*pk, gc.Equals, *expectPublicKey) // Close down the service and make sure that // the key is cached. d.Close() pk, err = kr.PublicKeyForLocation(d.Location()) c.Assert(err, gc.IsNil) c.Assert(*pk, gc.Equals, *expectPublicKey) } func (s *KeyringSuite) TestInsecureURL(c *gc.C) { // Set up a discharger with an non-HTTPS access point. d := bakerytest.NewDischarger(nil, nil) defer d.Close() httpsDischargeURL, err := url.Parse(d.Location()) c.Assert(err, gc.IsNil) srv := httptest.NewServer(httputil.NewSingleHostReverseProxy(httpsDischargeURL)) defer srv.Close() // Check that we are refused because it's an insecure URL. kr := httpbakery.NewPublicKeyRing(nil, nil) pk, err := kr.PublicKeyForLocation(srv.URL) c.Assert(err, gc.ErrorMatches, `untrusted discharge URL "http://.*"`) c.Assert(pk, gc.IsNil) // Check that it does work when we've enabled AllowInsecure. kr.AllowInsecure() pk, err = kr.PublicKeyForLocation(srv.URL) c.Assert(err, gc.IsNil) c.Assert(*pk, gc.Equals, *d.Service.PublicKey()) } func (s *KeyringSuite) TestCustomHTTPClient(c *gc.C) { client := &http.Client{ Transport: errorTransport{}, } kr := httpbakery.NewPublicKeyRing(client, nil) pk, err := kr.PublicKeyForLocation("https://0.1.2.3/") c.Assert(err, gc.ErrorMatches, `cannot get public key from "https://0.1.2.3/publickey": Get https://0.1.2.3/publickey: custom round trip error`) c.Assert(pk, gc.IsNil) } func (s *KeyringSuite) TestPublicKey(c *gc.C) { d := bakerytest.NewDischarger(nil, noCaveatChecker) defer d.Close() client := httpbakery.NewHTTPClient() publicKey, err := httpbakery.PublicKeyForLocation(client, d.Location()) c.Assert(err, gc.IsNil) expectedKey := d.Service.PublicKey() c.Assert(publicKey, gc.DeepEquals, expectedKey) // Check that it works with client==nil. publicKey, err = httpbakery.PublicKeyForLocation(nil, d.Location()) c.Assert(err, gc.IsNil) c.Assert(publicKey, gc.DeepEquals, expectedKey) } func (s *KeyringSuite) TestPublicKeyWrongURL(c *gc.C) { client := httpbakery.NewHTTPClient() _, err := httpbakery.PublicKeyForLocation(client, "http://localhost:0") c.Assert(err, gc.ErrorMatches, `cannot get public key from "http://localhost:0/publickey": Get http://localhost:0/publickey: dial tcp 127.0.0.1:0: .*connection refused`) } func (s *KeyringSuite) TestPublicKeyReturnsInvalidJSON(c *gc.C) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, "BADJSON") })) defer ts.Close() client := httpbakery.NewHTTPClient() _, err := httpbakery.PublicKeyForLocation(client, ts.URL) c.Assert(err, gc.ErrorMatches, fmt.Sprintf(`failed to decode response from "%s/publickey": invalid character 'B' looking for beginning of value`, ts.URL)) } func (s *KeyringSuite) TestPublicKeyReturnsStatusInternalServerError(c *gc.C) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusInternalServerError) })) defer ts.Close() client := httpbakery.NewHTTPClient() _, err := httpbakery.PublicKeyForLocation(client, ts.URL) c.Assert(err, gc.ErrorMatches, fmt.Sprintf(`cannot get public key from "%s/publickey": got status 500 Internal Server Error`, ts.URL)) } type errorTransport struct{} func (errorTransport) RoundTrip(req *http.Request) (*http.Response, error) { return nil, errgo.New("custom round trip error") } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/keyring.go0000664000175000017500000000606112672604475023671 0ustar marcomarcopackage httpbakery import ( "encoding/json" "io/ioutil" "net/http" "net/url" "strings" "gopkg.in/errgo.v1" "gopkg.in/macaroon-bakery.v1/bakery" ) // NewPublicKeyRing returns a new public keyring that uses // the given client to find public keys and uses the // given cache as a backing. If cache is nil, a new // cache will be created. If client is nil, http.DefaultClient will // be used. func NewPublicKeyRing(client *http.Client, cache *bakery.PublicKeyRing) *PublicKeyRing { if cache == nil { cache = bakery.NewPublicKeyRing() } if client == nil { client = http.DefaultClient } return &PublicKeyRing{ client: client, cache: cache, } } // PublicKeyRing represents a public keyring that can interrogate // remote services for their public keys. By default it refuses // to use insecure URLs. type PublicKeyRing struct { client *http.Client allowInsecure bool cache *bakery.PublicKeyRing } // AllowInsecure allows insecure URLs. This can be useful // for testing purposes. func (kr *PublicKeyRing) AllowInsecure() { kr.allowInsecure = true } // PublicKeyForLocation implements bakery.PublicKeyLocator // by first looking in the backing cache and, if that fails, // making an HTTP request to the public key associated // with the given discharge location. func (kr *PublicKeyRing) PublicKeyForLocation(loc string) (*bakery.PublicKey, error) { u, err := url.Parse(loc) if err != nil { return nil, errgo.Notef(err, "invalid discharge URL %q", loc) } if u.Scheme != "https" && !kr.allowInsecure { return nil, errgo.Newf("untrusted discharge URL %q", loc) } k, err := kr.cache.PublicKeyForLocation(loc) if err == nil { return k, nil } k, err = PublicKeyForLocation(kr.client, loc) if err != nil { return nil, errgo.Mask(err) } if err := kr.cache.AddPublicKeyForLocation(loc, true, k); err != nil { // Cannot happen in practice as it will only fail if // loc is an invalid URL which we have already checked. return nil, errgo.Notef(err, "cannot cache discharger URL %q", loc) } return k, nil } // PublicKeyForLocation returns the public key from a macaroon // discharge server running at the given location URL. // Note that this is insecure if an http: URL scheme is used. // If client is nil, http.DefaultClient will be used. func PublicKeyForLocation(client *http.Client, url string) (*bakery.PublicKey, error) { if client == nil { client = http.DefaultClient } url = strings.TrimSuffix(url, "/") + "/publickey" resp, err := client.Get(url) if err != nil { return nil, errgo.Notef(err, "cannot get public key from %q", url) } if resp.StatusCode != http.StatusOK { return nil, errgo.Newf("cannot get public key from %q: got status %s", url, resp.Status) } defer resp.Body.Close() data, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, errgo.Notef(err, "failed to read response body from %q", url) } var pubkey struct { PublicKey *bakery.PublicKey } err = json.Unmarshal(data, &pubkey) if err != nil { return nil, errgo.Notef(err, "failed to decode response from %q", url) } return pubkey.PublicKey, nil } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/discharge.go0000664000175000017500000000737312672604475024161 0ustar marcomarcopackage httpbakery import ( "crypto/rand" "fmt" "net/http" "path" "github.com/juju/httprequest" "github.com/julienschmidt/httprouter" "gopkg.in/errgo.v1" "gopkg.in/macaroon.v1" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/checkers" ) type dischargeHandler struct { svc *bakery.Service checker func(req *http.Request, cavId, cav string) ([]checkers.Caveat, error) } // AddDischargeHandler adds handlers to the given // ServeMux to serve third party caveat discharges // using the given service. // // The handlers are added under the given rootPath, // which must be non-empty. // // The check function is used to check whether a client making the given // request should be allowed a discharge for the given caveat. If it // does not return an error, the caveat will be discharged, with any // returned caveats also added to the discharge macaroon. // If it returns an error with a *Error cause, the error will be marshaled // and sent back to the client. // // The name space served by DischargeHandler is as follows. // All parameters can be provided either as URL attributes // or form attributes. The result is always formatted as a JSON // object. // // On failure, all endpoints return an error described by // the Error type. // // POST /discharge // params: // id: id of macaroon to discharge // location: location of original macaroon (optional (?)) // ?? flow=redirect|newwindow // result on success (http.StatusOK): // { // Macaroon *macaroon.Macaroon // } // // GET /publickey // result: // public key of service // expiry time of key func AddDischargeHandler(mux *http.ServeMux, rootPath string, svc *bakery.Service, checker func(req *http.Request, cavId, cav string) ([]checkers.Caveat, error)) { d := &dischargeHandler{ svc: svc, checker: checker, } mux.Handle(path.Join(rootPath, "discharge"), mkHandler(handleJSON(d.serveDischarge))) // TODO(rog) is there a case for making public key caveat signing // optional? mux.Handle(path.Join(rootPath, "publickey"), mkHandler(handleJSON(d.servePublicKey))) } type dischargeResponse struct { Macaroon *macaroon.Macaroon `json:",omitempty"` } func (d *dischargeHandler) serveDischarge(p httprequest.Params) (interface{}, error) { r, err := d.serveDischarge1(p) if err != nil { logger.Debugf("serveDischarge -> error %#v", err) } else { logger.Debugf("serveDischarge -> %#v", r) } return r, err } func (d *dischargeHandler) serveDischarge1(p httprequest.Params) (interface{}, error) { logger.Debugf("dischargeHandler.serveDischarge {") defer logger.Debugf("}") if p.Request.Method != "POST" { // TODO http.StatusMethodNotAllowed) return nil, badRequestErrorf("method not allowed") } p.Request.ParseForm() id := p.Request.Form.Get("id") if id == "" { return nil, badRequestErrorf("id attribute is empty") } checker := func(cavId, cav string) ([]checkers.Caveat, error) { return d.checker(p.Request, cavId, cav) } // TODO(rog) pass location into discharge // location := p.Request.Form.Get("location") var resp dischargeResponse m, err := d.svc.Discharge(bakery.ThirdPartyCheckerFunc(checker), id) if err != nil { return nil, errgo.NoteMask(err, "cannot discharge", errgo.Any) } resp.Macaroon = m return &resp, nil } type publicKeyResponse struct { PublicKey *bakery.PublicKey } func (d *dischargeHandler) servePublicKey(httprequest.Params) (interface{}, error) { return publicKeyResponse{d.svc.PublicKey()}, nil } func randomBytes(n int) ([]byte, error) { b := make([]byte, n) _, err := rand.Read(b) if err != nil { return nil, fmt.Errorf("cannot generate %d random bytes: %v", n, err) } return b, nil } func mkHandler(h httprouter.Handle) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { h(w, req, nil) }) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/package_test.go0000664000175000017500000000017412672604475024652 0ustar marcomarcopackage httpbakery_test import ( "testing" gc "gopkg.in/check.v1" ) func TestPackage(t *testing.T) { gc.TestingT(t) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/visitor.go0000664000175000017500000001076612677511232023720 0ustar marcomarcopackage httpbakery import ( "net/http" "net/url" "github.com/juju/httprequest" "gopkg.in/errgo.v1" ) // ErrMethodNotSupported is the error that a Visitor implementation // should return if it does not support any of the interaction methods. var ErrMethodNotSupported = errgo.New("interaction method not supported") // Visitor represents a handler that can handle ErrInteractionRequired // errors from a client's discharge request. The methodURLs parameter to // VisitWebPage holds a set of possible ways to complete the discharge // request. When called directly from Client, this will contain only a // single entry with the UserInteractionMethod key, specifying that the // associated URL should be opened in a web browser for the user to // interact with. // // See FallbackVisitor for a way to gain access to alternative methods. // // A Visitor implementation should return ErrMethodNotSupported if it // cannot handle any of the supplied methods. type Visitor interface { VisitWebPage(client *Client, methodURLs map[string]*url.URL) error } const ( // UserInteractionMethod is the methodURLs key used for a URL // that should be visited in a user's web browser. This is also // the URL that can be used to fetch the available login methods // (with an appropriate Accept header). UserInteractionMethod = "interactive" ) type multiVisitor struct { supportedMethods []Visitor } // NewMultiVisitor returns a Visitor that queries the discharger for // available methods and then tries each of the given visitors in turn // until one succeeds or fails with an error cause other than // ErrMethodNotSupported. func NewMultiVisitor(methods ...Visitor) Visitor { return &multiVisitor{ supportedMethods: methods, } } // VisitWebPage implements Visitor.VisitWebPage by obtaining all the // available interaction methods and calling v.supportedMethods until it // finds one that recognizes the method. If a Visitor returns an error // other than ErrMethodNotSupported the error will be immediately // returned to the caller; its cause will not be masked. func (v multiVisitor) VisitWebPage(client *Client, methodURLs map[string]*url.URL) error { // The Client implementation will always include a UserInteractionMethod // entry taken from the VisitURL field in the error, so use that // to find the set of supported interaction methods. u := methodURLs[UserInteractionMethod] if u == nil { return errgo.Newf("cannot get interaction methods because no %q URL found", UserInteractionMethod) } if urls, err := GetInteractionMethods(client, u); err == nil { // We succeeded in getting the set of interaction methods from // the discharger. Use them. methodURLs = urls if methodURLs[UserInteractionMethod] == nil { // There's no "interactive" method returned, but we know // the server does actually support it, because all dischargers // are required to, so fill it in with the original URL. methodURLs[UserInteractionMethod] = u } } else { logger.Debugf("ignoring error: cannot get interaction methods: %v", err) } // Go through all the Visitors, looking for one that supports one // of the methods we have found. for _, m := range v.supportedMethods { err := m.VisitWebPage(client, methodURLs) if err == nil { return nil } if errgo.Cause(err) != ErrMethodNotSupported { return errgo.Mask(err, errgo.Any) } } return errgo.Newf("no methods supported") } // GetInteractionMethods queries a URL as found in an // ErrInteractionRequired VisitURL field to find available interaction // methods. // // It does this by sending a GET request to the URL with the Accept // header set to "application/json" and parsing the resulting // response as a map[string]string. // // It uses the given Doer to execute the HTTP GET request. func GetInteractionMethods(client httprequest.Doer, u *url.URL) (map[string]*url.URL, error) { httpReqClient := &httprequest.Client{ Doer: client, } req, err := http.NewRequest("GET", u.String(), nil) if err != nil { return nil, errgo.Notef(err, "cannot create request") } req.Header.Set("Accept", "application/json") var methodURLStrs map[string]string if err := httpReqClient.Do(req, nil, &methodURLStrs); err != nil { return nil, errgo.Mask(err) } // Make all the URLs relative to the request URL. methodURLs := make(map[string]*url.URL) for m, urlStr := range methodURLStrs { relURL, err := url.Parse(urlStr) if err != nil { return nil, errgo.Notef(err, "invalid URL for interaction method %q", m) } methodURLs[m] = u.ResolveReference(relURL) } return methodURLs, nil } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/visitor_test.go0000664000175000017500000001302312677511232024744 0ustar marcomarcopackage httpbakery_test import ( "fmt" "net/http" "net/http/httptest" "net/url" jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/macaroon-bakery.v1/httpbakery" ) type VisitorSuite struct { jujutesting.LoggingSuite } var _ = gc.Suite(&VisitorSuite{}) func (*VisitorSuite) TestGetInteractionMethodsGetFailure(c *gc.C) { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { w.WriteHeader(http.StatusTeapot) w.Write([]byte("failure")) })) defer srv.Close() methods, err := httpbakery.GetInteractionMethods(http.DefaultClient, mustParseURL(srv.URL)) c.Assert(methods, gc.IsNil) c.Assert(err, gc.ErrorMatches, `GET .*: cannot unmarshal error response \(status 418 I'm a teapot\): unexpected content type text/plain; want application/json; content: failure`) } func (*VisitorSuite) TestGetInteractionMethodsSuccess(c *gc.C) { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { w.Header().Set("Content-Type", "application/json") fmt.Fprint(w, `{"method": "http://somewhere/something"}`) })) defer srv.Close() methods, err := httpbakery.GetInteractionMethods(http.DefaultClient, mustParseURL(srv.URL)) c.Assert(err, gc.IsNil) c.Assert(methods, jc.DeepEquals, map[string]*url.URL{ "method": { Scheme: "http", Host: "somewhere", Path: "/something", }, }) } func (*VisitorSuite) TestGetInteractionMethodsInvalidURL(c *gc.C) { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { w.Header().Set("Content-Type", "application/json") fmt.Fprint(w, `{"method": ":::"}`) })) defer srv.Close() methods, err := httpbakery.GetInteractionMethods(http.DefaultClient, mustParseURL(srv.URL)) c.Assert(methods, gc.IsNil) c.Assert(err, gc.ErrorMatches, `invalid URL for interaction method "method": parse :::: missing protocol scheme`) } func (*VisitorSuite) TestMultiVisitorNoUserInteractionMethod(c *gc.C) { v := httpbakery.NewMultiVisitor() err := v.VisitWebPage(httpbakery.NewClient(), nil) c.Assert(err, gc.ErrorMatches, `cannot get interaction methods because no "interactive" URL found`) } func (*VisitorSuite) TestMultiVisitorNoInteractionMethods(c *gc.C) { initialPage := 0 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { w.Header().Set("Content-Type", "text/html") initialPage++ fmt.Fprint(w, `oh yes`) })) defer srv.Close() methods := map[string]*url.URL{ httpbakery.UserInteractionMethod: mustParseURL(srv.URL), } visited := 0 v := httpbakery.NewMultiVisitor( visitorFunc(func(_ *httpbakery.Client, m map[string]*url.URL) error { c.Check(m, jc.DeepEquals, methods) visited++ return nil }), ) err := v.VisitWebPage(httpbakery.NewClient(), methods) c.Assert(err, gc.IsNil) c.Assert(initialPage, gc.Equals, 1) c.Assert(visited, gc.Equals, 1) } func (*VisitorSuite) TestMultiVisitorSequence(c *gc.C) { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { w.Header().Set("Content-Type", "application/json") fmt.Fprint(w, `{"method": "http://somewhere/something"}`) })) defer srv.Close() firstCalled, secondCalled := 0, 0 v := httpbakery.NewMultiVisitor( visitorFunc(func(_ *httpbakery.Client, m map[string]*url.URL) error { c.Check(m["method"], gc.NotNil) firstCalled++ return httpbakery.ErrMethodNotSupported }), visitorFunc(func(_ *httpbakery.Client, m map[string]*url.URL) error { c.Check(m["method"], gc.NotNil) secondCalled++ return nil }), ) err := v.VisitWebPage(httpbakery.NewClient(), map[string]*url.URL{ httpbakery.UserInteractionMethod: mustParseURL(srv.URL), }) c.Assert(err, gc.IsNil) c.Assert(firstCalled, gc.Equals, 1) c.Assert(secondCalled, gc.Equals, 1) } func (*VisitorSuite) TestUserInteractionFallback(c *gc.C) { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { w.Header().Set("Content-Type", "application/json") fmt.Fprint(w, `{"method": "http://somewhere/something"}`) })) defer srv.Close() called := 0 // Check that even though the methods didn't explicitly // include the "interactive" method, it is still supplied. v := httpbakery.NewMultiVisitor( visitorFunc(func(_ *httpbakery.Client, m map[string]*url.URL) error { c.Check(m, jc.DeepEquals, map[string]*url.URL{ "method": mustParseURL("http://somewhere/something"), httpbakery.UserInteractionMethod: mustParseURL(srv.URL), }) called++ return nil }), ) err := v.VisitWebPage(httpbakery.NewClient(), map[string]*url.URL{ httpbakery.UserInteractionMethod: mustParseURL(srv.URL), }) c.Assert(err, gc.IsNil) c.Assert(called, gc.Equals, 1) } func (*VisitorSuite) TestMultiVisitorVisitorError(c *gc.C) { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { w.Header().Set("Content-Type", "application/json") fmt.Fprint(w, `{"method": "http://somewhere/something"}`) })) defer srv.Close() testError := errgo.New("test error") v := httpbakery.NewMultiVisitor( visitorFunc(func(*httpbakery.Client, map[string]*url.URL) error { return testError }), ) err := v.VisitWebPage(httpbakery.NewClient(), map[string]*url.URL{ httpbakery.UserInteractionMethod: mustParseURL(srv.URL), }) c.Assert(errgo.Cause(err), gc.Equals, testError) } type visitorFunc func(*httpbakery.Client, map[string]*url.URL) error func (f visitorFunc) VisitWebPage(c *httpbakery.Client, m map[string]*url.URL) error { return f(c, m) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/checkers.go0000664000175000017500000000414612672604475024012 0ustar marcomarcopackage httpbakery import ( "net" "net/http" "gopkg.in/errgo.v1" "gopkg.in/macaroon-bakery.v1/bakery/checkers" ) type httpContext struct { req *http.Request } // Checkers implements the standard HTTP-request checkers. // It does not include the "declared" checker, as that // must be added for each individual set of macaroons // that are checked. func Checkers(req *http.Request) checkers.Checker { c := httpContext{req} return checkers.Map{ checkers.CondClientIPAddr: c.clientIPAddr, checkers.CondClientOrigin: c.clientOrigin, } } // clientIPAddr implements the IP client address checker // for an HTTP request. func (c httpContext) clientIPAddr(_, addr string) error { ip := net.ParseIP(addr) if ip == nil { return errgo.Newf("cannot parse IP address in caveat") } if c.req.RemoteAddr == "" { return errgo.Newf("client has no remote address") } reqIP, err := requestIPAddr(c.req) if err != nil { return errgo.Mask(err) } if !reqIP.Equal(ip) { return errgo.Newf("client IP address mismatch, got %s", reqIP) } return nil } // clientOrigin implements the Origin header checker // for an HTTP request. func (c httpContext) clientOrigin(_, origin string) error { if reqOrigin := c.req.Header.Get("Origin"); reqOrigin != origin { return errgo.Newf("request has invalid Origin header; got %q", reqOrigin) } return nil } // SameClientIPAddrCaveat returns a caveat that will check that // the remote IP address is the same as that in the given HTTP request. func SameClientIPAddrCaveat(req *http.Request) checkers.Caveat { if req.RemoteAddr == "" { return checkers.ErrorCaveatf("client has no remote IP address") } ip, err := requestIPAddr(req) if err != nil { return checkers.ErrorCaveatf("%v", err) } return checkers.ClientIPAddrCaveat(ip) } func requestIPAddr(req *http.Request) (net.IP, error) { reqHost, _, err := net.SplitHostPort(req.RemoteAddr) if err != nil { return nil, errgo.Newf("cannot parse host port in remote address: %v", err) } ip := net.ParseIP(reqHost) if ip == nil { return nil, errgo.Newf("invalid IP address in remote address %q", req.RemoteAddr) } return ip, nil } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/agent/0000775000175000017500000000000012672604475022765 5ustar marcomarcocharm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/agent/export_test.go0000664000175000017500000000011312672604475025667 0ustar marcomarcopackage agent type AgentLogin agentLogin type AgentResponse agentResponse charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/agent/agent.go0000664000175000017500000001570312672604475024420 0ustar marcomarco// Package agent enables non-interactive (agent) login using macaroons. // To enable agent authorization with a given httpbakery.Client c against // a given third party discharge server URL u: // // SetUpAuth(c, u, agentUsername) // package agent import ( "bytes" "encoding/base64" "encoding/json" "fmt" "io/ioutil" "mime" "net/http" "net/url" "github.com/juju/loggo" "gopkg.in/errgo.v1" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/httpbakery" ) var logger = loggo.GetLogger("httpbakery.agent") /* PROTOCOL An agent login works as follows: Agent Login Service | | | GET visitURL with agent cookie | |----------------------------------->| | | | Macaroon with local third-party | | caveat | |<-----------------------------------| | | | GET visitURL with agent cookie & | | discharged macaroon | |----------------------------------->| | | | Agent login response | |<-----------------------------------| | | The agent cookie is a cookie named "agent-login" holding a base64 encoded JSON object described by the agentLogin struct. A local third-party caveat is a third party caveat with the location set to "local" and the caveat encrypted with the public key declared in the agent cookie. The httpbakery.Client automatically discharges the local third-party caveat. On success the response is a JSON object described by agentResponse with the AgentLogin field set to true. If an error occurs then the response should be a JSON object that unmarshals to an httpbakery.Error. */ const cookieName = "agent-login" // agentLogin defines the structure of an agent login cookie. It is also // returned in a successful agent login attempt to help indicate that an // agent login has occurred. type agentLogin struct { Username string `json:"username"` PublicKey *bakery.PublicKey `json:"public_key"` } // agentResponse contains the response to an agent login attempt. type agentResponse struct { AgentLogin bool `json:"agent_login"` } // ErrNoAgentLoginCookie is the error returned when the expected // agent login cookie has not been found. var ErrNoAgentLoginCookie = errgo.New("no agent-login cookie found") // LoginCookie returns details of the agent login cookie // from the given request. If no agent-login cookie is found, // it returns an ErrNoAgentLoginCookie error. func LoginCookie(req *http.Request) (username string, key *bakery.PublicKey, err error) { c, err := req.Cookie(cookieName) if err != nil { return "", nil, ErrNoAgentLoginCookie } b, err := base64.StdEncoding.DecodeString(c.Value) if err != nil { return "", nil, errgo.Notef(err, "cannot decode cookie value") } var al agentLogin if err := json.Unmarshal(b, &al); err != nil { return "", nil, errgo.Notef(err, "cannot unmarshal agent login") } if al.Username == "" { return "", nil, errgo.Newf("agent login has no user name") } if al.PublicKey == nil { return "", nil, errgo.Newf("agent login has no public key") } return al.Username, al.PublicKey, nil } // SetUpAuth configures agent authentication on c. A cookie is created in // c's cookie jar containing credentials derived from the username and // c.Key. c.VisitWebPage is set to VisitWebPage(c). The return is // non-nil only if c.Key is nil. func SetUpAuth(c *httpbakery.Client, u *url.URL, username string) error { if c.Key == nil { return errgo.New("cannot set-up authentication: client key not configured") } SetCookie(c.Jar, u, username, &c.Key.Public) c.VisitWebPage = VisitWebPage(c) return nil } // SetCookie creates a cookie in jar which is suitable for performing agent // logins to u. // // If using SetUpAuth, it should not be necessary to use // this function. func SetCookie(jar http.CookieJar, u *url.URL, username string, pk *bakery.PublicKey) { al := agentLogin{ Username: username, PublicKey: pk, } b, err := json.Marshal(al) if err != nil { // This shouldn't happen as the agentLogin type has to be marshalable. panic(errgo.Notef(err, "cannot marshal cookie")) } v := base64.StdEncoding.EncodeToString(b) jar.SetCookies(u, []*http.Cookie{{ Name: cookieName, Value: v, }}) } // VisitWebPage creates a function that can be used with // httpbakery.Client.VisitWebPage. The function uses c to access the // visit URL. If no agent-login cookie has been configured for u an error // with the cause of ErrNoAgentLoginCookie will be returned. If the login // fails the returned error will be of type *httpbakery.Error. If the // response from the visitURL cannot be interpreted the error will be of // type *UnexpectedResponseError. // // If using SetUpAuth, it should not be necessary to use // this function. func VisitWebPage(c *httpbakery.Client) func(u *url.URL) error { return func(u *url.URL) error { err := ErrNoAgentLoginCookie for _, c := range c.Jar.Cookies(u) { if c.Name == cookieName { err = nil break } } if err != nil { return errgo.WithCausef(err, http.ErrNoCookie, "cannot perform agent login") } req, err := http.NewRequest("GET", u.String(), nil) if err != nil { return errgo.Notef(err, "cannot create request") } resp, err := c.Do(req) if err != nil { return errgo.Notef(err, "cannot perform request") } defer resp.Body.Close() b, err := ioutil.ReadAll(resp.Body) if err != nil { logger.Errorf("cannot read response body: %s", err) b = []byte{} } mt, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) if err != nil { logger.Warningf("cannot parse response content type: %s", err) mt = "" } if mt != "application/json" { uerr := (*UnexpectedResponseError)(resp) uerr.Body = ioutil.NopCloser(bytes.NewReader(b)) return uerr } if resp.StatusCode != http.StatusOK { var herr httpbakery.Error err := json.Unmarshal(b, &herr) if err == nil && herr.Message != "" { return &herr } if err != nil { logger.Warningf("cannot unmarshal error response: %s", err) } uerr := (*UnexpectedResponseError)(resp) uerr.Body = ioutil.NopCloser(bytes.NewReader(b)) return uerr } var ar agentResponse err = json.Unmarshal(b, &ar) if err == nil && ar.AgentLogin { return nil } if err != nil { logger.Warningf("cannot unmarshal response: %s", err) } uerr := (*UnexpectedResponseError)(resp) uerr.Body = ioutil.NopCloser(bytes.NewReader(b)) return uerr } } // UnexpectedResponseError is the error returned when a response is // received that cannot be interpreted. type UnexpectedResponseError http.Response func (u *UnexpectedResponseError) Error() string { return fmt.Sprintf( "unexpected response to non-interactive web page visit %s (content type %s)", u.Request.URL.String(), u.Header.Get("Content-Type")) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/agent/agent_test.go0000664000175000017500000001602112672604475025451 0ustar marcomarcopackage agent_test import ( "encoding/base64" "net/http" "net/http/httptest" "net/url" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/macaroon-bakery.v1/httpbakery/agent" ) type agentSuite struct { bakery *bakery.Service dischargeKey *bakery.PublicKey discharger *Discharger server *httptest.Server } var _ = gc.Suite(&agentSuite{}) func (s *agentSuite) SetUpSuite(c *gc.C) { key, err := bakery.GenerateKey() c.Assert(err, gc.IsNil) s.dischargeKey = &key.Public c.Assert(err, gc.IsNil) bak, err := bakery.NewService(bakery.NewServiceParams{ Key: key, }) c.Assert(err, gc.IsNil) s.discharger = &Discharger{ Bakery: bak, } s.server = s.discharger.Serve() s.bakery, err = bakery.NewService(bakery.NewServiceParams{ Locator: bakery.PublicKeyLocatorMap{ s.discharger.URL: &key.Public, }, }) } func (s *agentSuite) TearDownSuite(c *gc.C) { s.server.Close() } var agentLoginTests = []struct { about string loginHandler func(*Discharger, http.ResponseWriter, *http.Request) expectError string }{{ about: "success", }, { about: "error response", loginHandler: func(d *Discharger, w http.ResponseWriter, _ *http.Request) { d.WriteJSON(w, http.StatusBadRequest, httpbakery.Error{ Code: "bad request", Message: "test error", }) }, expectError: `cannot get discharge from ".*": cannot start interactive session: test error`, }, { about: "unexpected response", loginHandler: func(d *Discharger, w http.ResponseWriter, _ *http.Request) { w.Write([]byte("OK")) }, expectError: `cannot get discharge from ".*": cannot start interactive session: unexpected response to non-interactive web page visit .* \(content type text/plain; charset=utf-8\)`, }, { about: "unexpected error response", loginHandler: func(d *Discharger, w http.ResponseWriter, _ *http.Request) { d.WriteJSON(w, http.StatusBadRequest, httpbakery.Error{}) }, expectError: `cannot get discharge from ".*": cannot start interactive session: unexpected response to non-interactive web page visit .* \(content type application/json\)`, }, { about: "incorrect JSON", loginHandler: func(d *Discharger, w http.ResponseWriter, _ *http.Request) { d.WriteJSON(w, http.StatusOK, httpbakery.Error{ Code: "bad request", Message: "test error", }) }, expectError: `cannot get discharge from ".*": cannot start interactive session: unexpected response to non-interactive web page visit .* \(content type application/json\)`, }} func (s *agentSuite) TestAgentLogin(c *gc.C) { u, err := url.Parse(s.discharger.URL) c.Assert(err, gc.IsNil) for i, test := range agentLoginTests { c.Logf("%d. %s", i, test.about) s.discharger.LoginHandler = test.loginHandler client := httpbakery.NewClient() client.Key, err = bakery.GenerateKey() c.Assert(err, gc.IsNil) err = agent.SetUpAuth(client, u, "test-user") c.Assert(err, gc.IsNil) m, err := s.bakery.NewMacaroon("", nil, []checkers.Caveat{{ Location: s.discharger.URL, Condition: "test condition", }}) c.Assert(err, gc.IsNil) ms, err := client.DischargeAll(m) if test.expectError != "" { c.Assert(err, gc.ErrorMatches, test.expectError) continue } c.Assert(err, gc.IsNil) err = s.bakery.Check(ms, bakery.FirstPartyCheckerFunc( func(caveat string) error { return nil }, )) c.Assert(err, gc.IsNil) } } func (s *agentSuite) TestSetUpAuthError(c *gc.C) { client := httpbakery.NewClient() err := agent.SetUpAuth(client, nil, "test-user") c.Assert(err, gc.ErrorMatches, "cannot set-up authentication: client key not configured") } func (s *agentSuite) TestNoCookieError(c *gc.C) { client := httpbakery.NewClient() client.VisitWebPage = agent.VisitWebPage(client) m, err := s.bakery.NewMacaroon("", nil, []checkers.Caveat{{ Location: s.discharger.URL, Condition: "test condition", }}) c.Assert(err, gc.IsNil) _, err = client.DischargeAll(m) c.Assert(err, gc.ErrorMatches, "cannot get discharge from .*: cannot start interactive session: cannot perform agent login: no agent-login cookie found") ierr := errgo.Cause(err).(*httpbakery.InteractionError) c.Assert(errgo.Cause(ierr.Reason), gc.Equals, http.ErrNoCookie) } func (s *agentSuite) TestLoginCookie(c *gc.C) { key, err := bakery.GenerateKey() c.Assert(err, gc.IsNil) tests := []struct { about string setCookie func(*httpbakery.Client, *url.URL) expectUser string expectKey *bakery.PublicKey expectError string expectCause error }{{ about: "success", setCookie: func(client *httpbakery.Client, u *url.URL) { agent.SetUpAuth(client, u, "bob") }, expectUser: "bob", expectKey: &key.Public, }, { about: "no cookie", setCookie: func(client *httpbakery.Client, u *url.URL) {}, expectError: "no agent-login cookie found", expectCause: agent.ErrNoAgentLoginCookie, }, { about: "invalid base64 encoding", setCookie: func(client *httpbakery.Client, u *url.URL) { client.Jar.SetCookies(u, []*http.Cookie{{ Name: "agent-login", Value: "x", }}) }, expectError: "cannot decode cookie value: illegal base64 data at input byte 0", }, { about: "invalid JSON", setCookie: func(client *httpbakery.Client, u *url.URL) { client.Jar.SetCookies(u, []*http.Cookie{{ Name: "agent-login", Value: base64.StdEncoding.EncodeToString([]byte("}")), }}) }, expectError: "cannot unmarshal agent login: invalid character '}' looking for beginning of value", }, { about: "no username", setCookie: func(client *httpbakery.Client, u *url.URL) { agent.SetCookie(client.Jar, u, "", &key.Public) }, expectError: "agent login has no user name", }, { about: "no public key", setCookie: func(client *httpbakery.Client, u *url.URL) { agent.SetCookie(client.Jar, u, "hello", nil) }, expectError: "agent login has no public key", }} var ( foundUser string foundKey *bakery.PublicKey foundErr error ) srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { foundUser, foundKey, foundErr = agent.LoginCookie(req) })) defer srv.Close() srvURL, err := url.Parse(srv.URL) c.Assert(err, gc.IsNil) for i, test := range tests { c.Logf("test %d: %s", i, test.about) client := httpbakery.NewClient() client.Key = key test.setCookie(client, srvURL) req, err := http.NewRequest("GET", srv.URL, nil) c.Assert(err, gc.IsNil) resp, err := client.Do(req) c.Assert(err, gc.IsNil) c.Assert(resp.StatusCode, gc.Equals, http.StatusOK) if test.expectError != "" { c.Assert(foundErr, gc.ErrorMatches, test.expectError) if test.expectCause != nil { c.Assert(errgo.Cause(foundErr), gc.Equals, test.expectCause) } continue } c.Assert(foundUser, gc.Equals, test.expectUser) c.Assert(foundKey, gc.DeepEquals, test.expectKey) } } func ExampleVisitWebPage() { var key *bakery.KeyPair var u *url.URL client := httpbakery.NewClient() client.Key = key agent.SetCookie(client.Jar, u, "agent-username", &client.Key.Public) client.VisitWebPage = agent.VisitWebPage(client) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/agent/discharge_test.go0000664000175000017500000001053712672604475026312 0ustar marcomarcopackage agent_test import ( "encoding/base64" "encoding/json" "fmt" "net/http" "net/http/httptest" "strconv" "sync" "gopkg.in/errgo.v1" "gopkg.in/macaroon.v1" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/macaroon-bakery.v1/httpbakery/agent" ) type discharge struct { cavId string c chan error } type Discharger struct { Bakery *bakery.Service URL string LoginHandler func(*Discharger, http.ResponseWriter, *http.Request) mu sync.Mutex waiting []discharge } func (d *Discharger) ServeMux() *http.ServeMux { mux := http.NewServeMux() httpbakery.AddDischargeHandler(mux, "/", d.Bakery, d.checker) mux.Handle("/login", http.HandlerFunc(d.login)) mux.Handle("/wait", http.HandlerFunc(d.wait)) mux.Handle("/", http.HandlerFunc(d.notfound)) return mux } func (d *Discharger) Serve() *httptest.Server { s := httptest.NewServer(d.ServeMux()) d.URL = s.URL return s } func (d *Discharger) WriteJSON(w http.ResponseWriter, status int, v interface{}) error { body, err := json.Marshal(v) if err != nil { return errgo.Notef(err, "cannot marshal v") } w.Header().Set("Content-Type", "application/json") w.WriteHeader(status) if _, err := w.Write(body); err != nil { return errgo.Notef(err, "cannot write response") } return nil } func (d *Discharger) GetAgentLogin(r *http.Request) (*agent.AgentLogin, error) { c, err := r.Cookie("agent-login") if err != nil { return nil, errgo.Notef(err, "cannot find cookie") } b, err := base64.StdEncoding.DecodeString(c.Value) if err != nil { return nil, errgo.Notef(err, "cannot decode cookie") } var al agent.AgentLogin if err := json.Unmarshal(b, &al); err != nil { return nil, errgo.Notef(err, "cannot unmarshal cookie") } return &al, nil } func (d *Discharger) FinishWait(w http.ResponseWriter, r *http.Request, err error) { r.ParseForm() id, err := strconv.Atoi(r.Form.Get("waitid")) if err != nil { d.WriteJSON(w, http.StatusBadRequest, httpbakery.Error{ Message: fmt.Sprintf("cannot read waitid: %s", err), }) return } d.waiting[id].c <- err return } func (d *Discharger) checker(req *http.Request, cavId, cav string) ([]checkers.Caveat, error) { d.mu.Lock() id := len(d.waiting) d.waiting = append(d.waiting, discharge{cavId, make(chan error, 1)}) d.mu.Unlock() return nil, &httpbakery.Error{ Code: httpbakery.ErrInteractionRequired, Message: "test interaction", Info: &httpbakery.ErrorInfo{ VisitURL: fmt.Sprintf("%s/login?waitid=%d", d.URL, id), WaitURL: fmt.Sprintf("%s/wait?waitid=%d", d.URL, id), }, } } func (d *Discharger) login(w http.ResponseWriter, r *http.Request) { r.ParseForm() if d.LoginHandler != nil { d.LoginHandler(d, w, r) return } al, err := d.GetAgentLogin(r) if err != nil { d.WriteJSON(w, http.StatusBadRequest, httpbakery.Error{ Message: fmt.Sprintf("cannot read agent login: %s", err), }) return } _, err = httpbakery.CheckRequest(d.Bakery, r, nil, nil) if err == nil { d.FinishWait(w, r, nil) d.WriteJSON(w, http.StatusOK, agent.AgentResponse{ AgentLogin: true, }) return } m, err := d.Bakery.NewMacaroon("", nil, []checkers.Caveat{ bakery.LocalThirdPartyCaveat(al.PublicKey), }) if err != nil { d.WriteJSON(w, http.StatusInternalServerError, httpbakery.Error{ Message: fmt.Sprintf("cannot create macaroon: %s", err), }) return } httpbakery.WriteDischargeRequiredError(w, m, "", nil) } func (d *Discharger) wait(w http.ResponseWriter, r *http.Request) { r.ParseForm() id, err := strconv.Atoi(r.Form.Get("waitid")) if err != nil { d.WriteJSON(w, http.StatusBadRequest, httpbakery.Error{ Message: fmt.Sprintf("cannot read waitid: %s", err), }) return } err = <-d.waiting[id].c if err != nil { d.WriteJSON(w, http.StatusForbidden, err) return } m, err := d.Bakery.Discharge( bakery.ThirdPartyCheckerFunc( func(cavId, caveat string) ([]checkers.Caveat, error) { return nil, nil }, ), d.waiting[id].cavId, ) if err != nil { d.WriteJSON(w, http.StatusForbidden, err) return } d.WriteJSON( w, http.StatusOK, struct { Macaroon *macaroon.Macaroon }{ Macaroon: m, }, ) } func (d *Discharger) notfound(w http.ResponseWriter, r *http.Request) { d.WriteJSON(w, http.StatusNotFound, httpbakery.Error{ Message: fmt.Sprintf("cannot find %s", r.URL.String()), }) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/httpbakery/agent/package_test.go0000664000175000017500000000016712672604475025752 0ustar marcomarcopackage agent_test import ( "testing" gc "gopkg.in/check.v1" ) func TestPackage(t *testing.T) { gc.TestingT(t) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/0000775000175000017500000000000012677511232020760 5ustar marcomarcocharm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/keys_test.go0000664000175000017500000001270612672604475023336 0ustar marcomarcopackage bakery_test import ( "encoding/base64" "encoding/json" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/macaroon-bakery.v1/bakery" ) type KeysSuite struct{} var _ = gc.Suite(&KeysSuite{}) var testKey = newTestKey(0) func (*KeysSuite) TestMarshalBinary(c *gc.C) { data, err := testKey.MarshalBinary() c.Assert(err, gc.IsNil) c.Assert(data, jc.DeepEquals, []byte(testKey[:])) var key1 bakery.Key err = key1.UnmarshalBinary(data) c.Assert(err, gc.IsNil) c.Assert(key1, gc.DeepEquals, testKey) } func (*KeysSuite) TestMarshalText(c *gc.C) { data, err := testKey.MarshalText() c.Assert(err, gc.IsNil) c.Assert(string(data), gc.Equals, base64.StdEncoding.EncodeToString([]byte(testKey[:]))) var key1 bakery.Key err = key1.UnmarshalText(data) c.Assert(err, gc.IsNil) c.Assert(key1, gc.Equals, testKey) } func (*KeysSuite) TestKeyPairMarshalJSON(c *gc.C) { kp := bakery.KeyPair{ Public: bakery.PublicKey{testKey}, Private: bakery.PrivateKey{testKey}, } kp.Private.Key[0] = 99 data, err := json.Marshal(kp) c.Assert(err, gc.IsNil) var x interface{} err = json.Unmarshal(data, &x) c.Assert(err, gc.IsNil) // Check that the fields have marshaled as strings. c.Assert(x.(map[string]interface{})["private"], gc.FitsTypeOf, "") c.Assert(x.(map[string]interface{})["public"], gc.FitsTypeOf, "") var kp1 bakery.KeyPair err = json.Unmarshal(data, &kp1) c.Assert(err, gc.IsNil) c.Assert(kp1, jc.DeepEquals, kp) } func newTestKey(n byte) bakery.Key { var k bakery.Key for i := range k { k[i] = n + byte(i) } return k } type addPublicKeyArgs struct { loc string prefix bool key bakery.Key } var publicKeyRingTests = []struct { about string add []addPublicKeyArgs loc string expectKey bakery.Key expectNotFound bool }{{ about: "empty keyring", add: []addPublicKeyArgs{}, loc: "something", expectNotFound: true, }, { about: "single non-prefix key", add: []addPublicKeyArgs{{ loc: "http://foo.com/x", key: testKey, }}, loc: "http://foo.com/x", expectKey: testKey, }, { about: "single prefix key", add: []addPublicKeyArgs{{ loc: "http://foo.com/x", key: testKey, prefix: true, }}, loc: "http://foo.com/x", expectKey: testKey, }, { about: "pattern longer than url", add: []addPublicKeyArgs{{ loc: "http://foo.com/x", key: testKey, prefix: true, }}, loc: "http://foo.com/", expectNotFound: true, }, { about: "pattern not ending in /", add: []addPublicKeyArgs{{ loc: "http://foo.com/x", key: testKey, prefix: true, }}, loc: "http://foo.com/x/y", expectNotFound: true, }, { about: "mismatched host", add: []addPublicKeyArgs{{ loc: "http://foo.com/x", key: testKey, prefix: true, }}, loc: "http://bar.com/x/y", expectNotFound: true, }, { about: "http vs https", add: []addPublicKeyArgs{{ loc: "http://foo.com/x", key: testKey, }}, loc: "https://foo.com/x", expectKey: testKey, }, { about: "naked pattern url with prefix", add: []addPublicKeyArgs{{ loc: "http://foo.com", key: testKey, prefix: true, }}, loc: "http://foo.com/arble", expectKey: testKey, }, { about: "naked pattern url with prefix with naked match url", add: []addPublicKeyArgs{{ loc: "http://foo.com", key: testKey, prefix: true, }}, loc: "http://foo.com", expectKey: testKey, }, { about: "naked pattern url, no prefix", add: []addPublicKeyArgs{{ loc: "http://foo.com", key: testKey, }}, loc: "http://foo.com", expectKey: testKey, }, { about: "naked pattern url, no prefix, match with no slash", add: []addPublicKeyArgs{{ loc: "http://foo.com", key: testKey, }}, loc: "http://foo.com/", expectKey: testKey, }, { about: "port mismatch", add: []addPublicKeyArgs{{ loc: "http://foo.com:8080/x", key: testKey, }}, loc: "https://foo.com/x", expectNotFound: true, }, { about: "url longer than pattern", add: []addPublicKeyArgs{{ loc: "http://foo.com/x/", key: testKey, prefix: true, }}, loc: "https://foo.com/x/y/z", expectKey: testKey, }, { about: "longer match preferred", add: []addPublicKeyArgs{{ loc: "http://foo.com/x/", key: newTestKey(0), prefix: true, }, { loc: "http://foo.com/x/y/", key: newTestKey(1), prefix: true, }}, loc: "https://foo.com/x/y/z", expectKey: newTestKey(1), }, { about: "longer match preferred, with other matches", add: []addPublicKeyArgs{{ loc: "http://foo.com/foo/arble", key: newTestKey(0), prefix: true, }, { loc: "http://foo.com/foo/arble/blah/", key: newTestKey(1), prefix: true, }, { loc: "http://foo.com/foo/", key: newTestKey(2), prefix: true, }, { loc: "http://foo.com/foobieblahbletcharbl", key: newTestKey(3), prefix: true, }}, loc: "https://foo.com/foo/arble/blah/x", expectKey: newTestKey(1), }} func (*KeysSuite) TestPublicKeyRing(c *gc.C) { for i, test := range publicKeyRingTests { c.Logf("test %d: %s", i, test.about) kr := bakery.NewPublicKeyRing() for _, add := range test.add { err := kr.AddPublicKeyForLocation(add.loc, add.prefix, &bakery.PublicKey{add.key}) c.Assert(err, gc.IsNil) } key, err := kr.PublicKeyForLocation(test.loc) if test.expectNotFound { c.Assert(err, gc.Equals, bakery.ErrNotFound) c.Assert(key, gc.IsNil) continue } c.Assert(err, gc.IsNil) c.Assert(*key, gc.Equals, bakery.PublicKey{test.expectKey}) } } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/codec.go0000664000175000017500000000730012672604475022373 0ustar marcomarcopackage bakery import ( "bytes" "crypto/rand" "encoding/base64" "encoding/json" "fmt" "golang.org/x/crypto/nacl/box" ) type caveatIdRecord struct { RootKey []byte Condition string } // caveatId defines the format of a third party caveat id. type caveatId struct { ThirdPartyPublicKey *PublicKey FirstPartyPublicKey *PublicKey Nonce []byte Id string } // boxEncoder encodes caveat ids confidentially to a third-party service using // authenticated public key encryption compatible with NaCl box. type boxEncoder struct { key *KeyPair } // newBoxEncoder creates a new boxEncoder that uses the given public key pair. func newBoxEncoder(key *KeyPair) *boxEncoder { return &boxEncoder{ key: key, } } func (enc *boxEncoder) encodeCaveatId(condition string, rootKey []byte, thirdPartyPub *PublicKey) (string, error) { id, err := enc.newCaveatId(condition, rootKey, thirdPartyPub) if err != nil { return "", err } data, err := json.Marshal(id) if err != nil { return "", fmt.Errorf("cannot marshal %#v: %v", id, err) } return base64.StdEncoding.EncodeToString(data), nil } func (enc *boxEncoder) newCaveatId(condition string, rootKey []byte, thirdPartyPub *PublicKey) (*caveatId, error) { var nonce [NonceLen]byte if _, err := rand.Read(nonce[:]); err != nil { return nil, fmt.Errorf("cannot generate random number for nonce: %v", err) } plain := caveatIdRecord{ RootKey: rootKey, Condition: condition, } plainData, err := json.Marshal(&plain) if err != nil { return nil, fmt.Errorf("cannot marshal %#v: %v", &plain, err) } sealed := box.Seal(nil, plainData, &nonce, thirdPartyPub.boxKey(), enc.key.Private.boxKey()) return &caveatId{ ThirdPartyPublicKey: thirdPartyPub, FirstPartyPublicKey: &enc.key.Public, Nonce: nonce[:], Id: base64.StdEncoding.EncodeToString(sealed), }, nil } // boxDecoder decodes caveat ids for third-party service that were encoded to // the third-party with authenticated public key encryption compatible with // NaCl box. type boxDecoder struct { key *KeyPair } // newBoxDecoder creates a new BoxDecoder using the given key pair. func newBoxDecoder(key *KeyPair) *boxDecoder { return &boxDecoder{ key: key, } } func (d *boxDecoder) decodeCaveatId(id string) (rootKey []byte, condition string, err error) { data, err := base64.StdEncoding.DecodeString(id) if err != nil { return nil, "", fmt.Errorf("cannot base64-decode caveat id: %v", err) } var tpid caveatId if err := json.Unmarshal(data, &tpid); err != nil { return nil, "", fmt.Errorf("cannot unmarshal caveat id %q: %v", data, err) } var recordData []byte recordData, err = d.encryptedCaveatId(tpid) if err != nil { return nil, "", err } var record caveatIdRecord if err := json.Unmarshal(recordData, &record); err != nil { return nil, "", fmt.Errorf("cannot decode third party caveat record: %v", err) } return record.RootKey, record.Condition, nil } func (d *boxDecoder) encryptedCaveatId(id caveatId) ([]byte, error) { if d.key == nil { return nil, fmt.Errorf("no public key for caveat id decryption") } if !bytes.Equal(d.key.Public.Key[:], id.ThirdPartyPublicKey.Key[:]) { return nil, fmt.Errorf("public key mismatch") } var nonce [NonceLen]byte if len(id.Nonce) != len(nonce) { return nil, fmt.Errorf("bad nonce length") } copy(nonce[:], id.Nonce) sealed, err := base64.StdEncoding.DecodeString(id.Id) if err != nil { return nil, fmt.Errorf("cannot base64-decode encrypted caveat id: %v", err) } out, ok := box.Open(nil, sealed, &nonce, id.FirstPartyPublicKey.boxKey(), d.key.Private.boxKey()) if !ok { return nil, fmt.Errorf("decryption of public-key encrypted caveat id %#v failed", id) } return out, nil } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/keys.go0000664000175000017500000001423512672604475022276 0ustar marcomarcopackage bakery import ( "crypto/rand" "encoding/base64" "net/url" "sync" "golang.org/x/crypto/nacl/box" "gopkg.in/errgo.v1" ) // KeyLen is the byte length of the Ed25519 public and private keys used for // caveat id encryption. const KeyLen = 32 // NonceLen is the byte length of the nonce values used for caveat id // encryption. const NonceLen = 24 // PublicKey is a 256-bit Ed25519 public key. type PublicKey struct { Key } // PrivateKey is a 256-bit Ed25519 private key. type PrivateKey struct { Key } // Key is a 256-bit Ed25519 key. type Key [KeyLen]byte // String returns the base64 representation of the key. func (k Key) String() string { return base64.StdEncoding.EncodeToString(k[:]) } // MarshalBinary implements encoding.BinaryMarshaler.MarshalBinary. func (k Key) MarshalBinary() ([]byte, error) { return k[:], nil } // UnmarshalBinary implements encoding.BinaryUnmarshaler.UnmarshalBinary. func (k *Key) UnmarshalBinary(data []byte) error { if len(data) != len(k) { return errgo.Newf("wrong length for key, got %d want %d", len(data), len(k)) } copy(k[:], data) return nil } // MarshalText implements encoding.TextMarshaler.MarshalText. func (k Key) MarshalText() ([]byte, error) { data := make([]byte, base64.StdEncoding.EncodedLen(len(k))) base64.StdEncoding.Encode(data, k[:]) return data, nil } // boxKey returns the box package's type for a key. func (k Key) boxKey() *[KeyLen]byte { return (*[KeyLen]byte)(&k) } // UnmarshalText implements encoding.TextUnmarshaler.UnmarshalText. func (k *Key) UnmarshalText(text []byte) error { // Note: we cannot decode directly into key because // DecodedLen can return more than the actual number // of bytes that will be required. data := make([]byte, base64.StdEncoding.DecodedLen(len(text))) n, err := base64.StdEncoding.Decode(data, text) if err != nil { return errgo.Notef(err, "cannot decode base64 key") } if n != len(k) { return errgo.Newf("wrong length for base64 key, got %d want %d", n, len(k)) } copy(k[:], data[0:n]) return nil } // PublicKeyLocator is used to find the public key for a given // caveat or macaroon location. type PublicKeyLocator interface { // PublicKeyForLocation returns the public key matching the caveat or // macaroon location. It returns ErrNotFound if no match is found. PublicKeyForLocation(loc string) (*PublicKey, error) } // PublicKeyLocatorMap implements PublicKeyLocator for a map. // Each entry in the map holds a public key value for // a location named by the map key. type PublicKeyLocatorMap map[string]*PublicKey // PublicKeyForLocation implements the PublicKeyLocator interface. func (m PublicKeyLocatorMap) PublicKeyForLocation(loc string) (*PublicKey, error) { if pk, ok := m[loc]; ok { return pk, nil } return nil, ErrNotFound } // KeyPair holds a public/private pair of keys. type KeyPair struct { Public PublicKey `json:"public"` Private PrivateKey `json:"private"` } // GenerateKey generates a new key pair. func GenerateKey() (*KeyPair, error) { var key KeyPair pub, priv, err := box.GenerateKey(rand.Reader) if err != nil { return nil, err } key.Public = PublicKey{*pub} key.Private = PrivateKey{*priv} return &key, nil } // String implements the fmt.Stringer interface // by returning the base64 representation of the // public key part of key. func (key *KeyPair) String() string { return key.Public.String() } type publicKeyRecord struct { url *url.URL prefix bool key PublicKey } // PublicKeyRing stores public keys for third-party services, accessible by // location string. type PublicKeyRing struct { // mu guards the fields following it. mu sync.Mutex // TODO(rog) use a more efficient data structure publicKeys []publicKeyRecord } // NewPublicKeyRing returns a new PublicKeyRing instance. func NewPublicKeyRing() *PublicKeyRing { return &PublicKeyRing{} } // AddPublicKeyForLocation adds a public key to the keyring for the given // location. If prefix is true, then inexact locations will be allowed // (see PublicKeyForLocation). The matching is similar to that // of http.ServeMux, For example, http://foo.com/x/ matches http://foo.com/x/y // but http://foo.com/x does not. // // As a special case, http://foo.com is always treated the same as http://foo.com/. // // The scheme is not significant. // // It is safe to call methods concurrently on this type. // The loc argument should be a valid URL. func (kr *PublicKeyRing) AddPublicKeyForLocation(loc string, prefix bool, key *PublicKey) error { url, err := url.Parse(loc) if err != nil { return errgo.Notef(err, "invalid location URL") } if url.Path == "" { url.Path = "/" } kr.mu.Lock() defer kr.mu.Unlock() newr := publicKeyRecord{ url: url, prefix: prefix, key: *key, } for i := range kr.publicKeys { k := &kr.publicKeys[i] if k.url.Path == url.Path && k.url.Host == url.Host { *k = newr return nil } } kr.publicKeys = append(kr.publicKeys, newr) return nil } // PublicKeyForLocation implements the PublicKeyLocator interface, // by returning the public key most closely associated with loc. // If loc is not a valid URL, it returns ErrNotFound; otherwise // the host part of the URL must match a registered location. // // Of those registered locations with matching host parts, // longer paths take precedence over short ones. // The matching is similar to that of http.ServeMux, except there // must be a host part. func (kr *PublicKeyRing) PublicKeyForLocation(loc string) (*PublicKey, error) { url, err := url.Parse(loc) if err != nil { return nil, ErrNotFound } if url.Path == "" { url.Path = "/" } kr.mu.Lock() defer kr.mu.Unlock() n := 0 var found *PublicKey for i := range kr.publicKeys { k := &kr.publicKeys[i] if !k.match(url) { continue } if found == nil || len(k.url.Path) > n { found = &k.key n = len(k.url.Path) } } if found == nil { return nil, ErrNotFound } return found, nil } func (r *publicKeyRecord) match(url *url.URL) bool { if url == nil { return false } if url.Host != r.url.Host { return false } if !r.prefix { return url.Path == r.url.Path } pattern := r.url.Path n := len(pattern) if pattern[n-1] != '/' { return pattern == url.Path } return len(url.Path) >= n && url.Path[0:n] == pattern } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/storage.go0000664000175000017500000000421512672604475022764 0ustar marcomarcopackage bakery import ( "encoding/json" "errors" "fmt" "sync" ) // Storage defines storage for macaroons. // Calling its methods concurrently is allowed. type Storage interface { // Put stores the item at the given location, overwriting // any item that might already be there. // TODO(rog) would it be better to lose the overwrite // semantics? Put(location string, item string) error // Get retrieves an item from the given location. // If the item is not there, it returns ErrNotFound. Get(location string) (item string, err error) // Del deletes the item from the given location. Del(location string) error } var ErrNotFound = errors.New("item not found") // NewMemStorage returns an implementation of Storage // that stores all items in memory. func NewMemStorage() Storage { return &memStorage{ values: make(map[string]string), } } type memStorage struct { mu sync.Mutex values map[string]string } func (s *memStorage) Put(location, item string) error { s.mu.Lock() defer s.mu.Unlock() s.values[location] = item return nil } func (s *memStorage) Get(location string) (string, error) { s.mu.Lock() defer s.mu.Unlock() item, ok := s.values[location] if !ok { return "", ErrNotFound } return item, nil } func (s *memStorage) Del(location string) error { s.mu.Lock() defer s.mu.Unlock() delete(s.values, location) return nil } // storageItem is the format used to store items in // the store. type storageItem struct { RootKey []byte } // storage is a thin wrapper around Storage that // converts to and from StorageItems in its // Put and Get methods. type storage struct { store Storage } func (s storage) Get(location string) (*storageItem, error) { itemStr, err := s.store.Get(location) if err != nil { return nil, err } var item storageItem if err := json.Unmarshal([]byte(itemStr), &item); err != nil { return nil, fmt.Errorf("badly formatted item in store: %v", err) } return &item, nil } func (s storage) Put(location string, item *storageItem) error { data, err := json.Marshal(item) if err != nil { panic(fmt.Errorf("cannot marshal storage item: %v", err)) } return s.store.Put(location, string(data)) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/storage_test.go0000664000175000017500000000246512672604475024030 0ustar marcomarcopackage bakery_test import ( "fmt" gc "gopkg.in/check.v1" "gopkg.in/macaroon-bakery.v1/bakery" ) type StorageSuite struct{} var _ = gc.Suite(&StorageSuite{}) func (*StorageSuite) TestMemStorage(c *gc.C) { store := bakery.NewMemStorage() err := store.Put("foo", "bar") c.Assert(err, gc.IsNil) item, err := store.Get("foo") c.Assert(err, gc.IsNil) c.Assert(item, gc.Equals, "bar") err = store.Put("bletch", "blat") c.Assert(err, gc.IsNil) item, err = store.Get("bletch") c.Assert(err, gc.IsNil) c.Assert(item, gc.Equals, "blat") item, err = store.Get("nothing") c.Assert(err, gc.Equals, bakery.ErrNotFound) c.Assert(item, gc.Equals, "") err = store.Del("bletch") c.Assert(err, gc.IsNil) item, err = store.Get("bletch") c.Assert(err, gc.Equals, bakery.ErrNotFound) c.Assert(item, gc.Equals, "") } func (*StorageSuite) TestConcurrentMemStorage(c *gc.C) { // If locking is not done right, this test will // definitely trigger the race detector. done := make(chan struct{}) store := bakery.NewMemStorage() for i := 0; i < 3; i++ { i := i go func() { k := fmt.Sprint(i) err := store.Put(k, k) c.Check(err, gc.IsNil) v, err := store.Get(k) c.Check(v, gc.Equals, k) err = store.Del(k) c.Check(err, gc.IsNil) done <- struct{}{} }() } for i := 0; i < 3; i++ { <-done } } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/service_test.go0000664000175000017500000003575012672604475024027 0ustar marcomarcopackage bakery_test import ( "encoding/json" "fmt" gc "gopkg.in/check.v1" "gopkg.in/macaroon.v1" jc "github.com/juju/testing/checkers" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/checkers" ) type ServiceSuite struct{} var _ = gc.Suite(&ServiceSuite{}) // TestSingleServiceFirstParty creates a single service // with a macaroon with one first party caveat. // It creates a request with this macaroon and checks that the service // can verify this macaroon as valid. func (s *ServiceSuite) TestSingleServiceFirstParty(c *gc.C) { p := bakery.NewServiceParams{ Location: "loc", Store: nil, Key: nil, Locator: nil, } service, err := bakery.NewService(p) c.Assert(err, gc.IsNil) primary, err := service.NewMacaroon("", nil, nil) c.Assert(err, gc.IsNil) c.Assert(primary.Location(), gc.Equals, "loc") cav := checkers.Caveat{ Location: "", Condition: "something", } err = service.AddCaveat(primary, cav) c.Assert(err, gc.IsNil) err = service.Check(macaroon.Slice{primary}, strcmpChecker("something")) c.Assert(err, gc.IsNil) } // TestMacaroonPaperFig6 implements an example flow as described in the macaroons paper: // http://theory.stanford.edu/~ataly/Papers/macaroons.pdf // There are three services, ts, fs, as: // ts is a storage service which has deligated authority to a forum service fs. // The forum service wants to require its users to be logged into to an authentication service as. // // The client obtains a macaroon from fs (minted by ts, with a third party caveat addressed to as). // The client obtains a discharge macaroon from as to satisfy this caveat. // The target service verifies the original macaroon it delegated to fs // No direct contact between as and ts is required func (s *ServiceSuite) TestMacaroonPaperFig6(c *gc.C) { locator := make(bakery.PublicKeyLocatorMap) as := newService(c, "as-loc", locator) ts := newService(c, "ts-loc", locator) fs := newService(c, "fs-loc", locator) // ts creates a macaroon. tsMacaroon, err := ts.NewMacaroon("", nil, nil) c.Assert(err, gc.IsNil) // ts somehow sends the macaroon to fs which adds a third party caveat to be discharged by as. err = fs.AddCaveat(tsMacaroon, checkers.Caveat{Location: "as-loc", Condition: "user==bob"}) c.Assert(err, gc.IsNil) // client asks for a discharge macaroon for each third party caveat d, err := bakery.DischargeAll(tsMacaroon, func(firstPartyLocation string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { c.Assert(firstPartyLocation, gc.Equals, "ts-loc") c.Assert(cav.Location, gc.Equals, "as-loc") mac, err := as.Discharge(strcmpChecker("user==bob"), cav.Id) c.Assert(err, gc.IsNil) return mac, nil }) c.Assert(err, gc.IsNil) err = ts.Check(d, strcmpChecker("")) c.Assert(err, gc.IsNil) } func macStr(m *macaroon.Macaroon) string { data, err := json.MarshalIndent(m, "\t", "\t") if err != nil { panic(err) } return string(data) } // TestMacaroonPaperFig6FailsWithoutDischarges runs a similar test as TestMacaroonPaperFig6 // without the client discharging the third party caveats. func (s *ServiceSuite) TestMacaroonPaperFig6FailsWithoutDischarges(c *gc.C) { locator := make(bakery.PublicKeyLocatorMap) ts := newService(c, "ts-loc", locator) fs := newService(c, "fs-loc", locator) _ = newService(c, "as-loc", locator) // ts creates a macaroon. tsMacaroon, err := ts.NewMacaroon("", nil, nil) c.Assert(err, gc.IsNil) // ts somehow sends the macaroon to fs which adds a third party caveat to be discharged by as. err = fs.AddCaveat(tsMacaroon, checkers.Caveat{Location: "as-loc", Condition: "user==bob"}) c.Assert(err, gc.IsNil) // client makes request to ts err = ts.Check(macaroon.Slice{tsMacaroon}, strcmpChecker("")) c.Assert(err, gc.ErrorMatches, `verification failed: cannot find discharge macaroon for caveat ".*"`) } // TestMacaroonPaperFig6FailsWithBindingOnTamperedSignature runs a similar test as TestMacaroonPaperFig6 // with the discharge macaroon binding being done on a tampered signature. func (s *ServiceSuite) TestMacaroonPaperFig6FailsWithBindingOnTamperedSignature(c *gc.C) { locator := make(bakery.PublicKeyLocatorMap) as := newService(c, "as-loc", locator) ts := newService(c, "ts-loc", locator) fs := newService(c, "fs-loc", locator) // ts creates a macaroon. tsMacaroon, err := ts.NewMacaroon("", nil, nil) c.Assert(err, gc.IsNil) // ts somehow sends the macaroon to fs which adds a third party caveat to be discharged by as. err = fs.AddCaveat(tsMacaroon, checkers.Caveat{Location: "as-loc", Condition: "user==bob"}) c.Assert(err, gc.IsNil) // client asks for a discharge macaroon for each third party caveat d, err := bakery.DischargeAll(tsMacaroon, func(firstPartyLocation string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { c.Assert(firstPartyLocation, gc.Equals, "ts-loc") c.Assert(cav.Location, gc.Equals, "as-loc") mac, err := as.Discharge(strcmpChecker("user==bob"), cav.Id) c.Assert(err, gc.IsNil) return mac, nil }) c.Assert(err, gc.IsNil) // client has all the discharge macaroons. For each discharge macaroon bind it to our tsMacaroon // and add it to our request. for _, dm := range d[1:] { dm.Bind([]byte("tampered-signature")) // Bind against an incorrect signature. } // client makes request to ts. err = ts.Check(d, strcmpChecker("")) c.Assert(err, gc.ErrorMatches, "verification failed: signature mismatch after caveat verification") } func (s *ServiceSuite) TestNeedDeclared(c *gc.C) { locator := make(bakery.PublicKeyLocatorMap) firstParty := newService(c, "first", locator) thirdParty := newService(c, "third", locator) // firstParty mints a macaroon with a third-party caveat addressed // to thirdParty with a need-declared caveat. m, err := firstParty.NewMacaroon("", nil, []checkers.Caveat{ checkers.NeedDeclaredCaveat(checkers.Caveat{ Location: "third", Condition: "something", }, "foo", "bar"), }) c.Assert(err, gc.IsNil) // The client asks for a discharge macaroon for each third party caveat. d, err := bakery.DischargeAll(m, func(_ string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { return thirdParty.Discharge(strcmpChecker("something"), cav.Id) }) c.Assert(err, gc.IsNil) // The required declared attributes should have been added // to the discharge macaroons. declared := checkers.InferDeclared(d) c.Assert(declared, gc.DeepEquals, checkers.Declared{ "foo": "", "bar": "", }) // Make sure the macaroons actually check out correctly // when provided with the declared checker. err = firstParty.Check(d, checkers.New(declared)) c.Assert(err, gc.IsNil) // Try again when the third party does add a required declaration. // The client asks for a discharge macaroon for each third party caveat. d, err = bakery.DischargeAll(m, func(_ string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { checker := thirdPartyCheckerWithCaveats{ checkers.DeclaredCaveat("foo", "a"), checkers.DeclaredCaveat("arble", "b"), } return thirdParty.Discharge(checker, cav.Id) }) c.Assert(err, gc.IsNil) // One attribute should have been added, the other was already there. declared = checkers.InferDeclared(d) c.Assert(declared, gc.DeepEquals, checkers.Declared{ "foo": "a", "bar": "", "arble": "b", }) err = firstParty.Check(d, checkers.New(declared)) c.Assert(err, gc.IsNil) // Try again, but this time pretend a client is sneakily trying // to add another "declared" attribute to alter the declarations. d, err = bakery.DischargeAll(m, func(_ string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { checker := thirdPartyCheckerWithCaveats{ checkers.DeclaredCaveat("foo", "a"), checkers.DeclaredCaveat("arble", "b"), } m, err := thirdParty.Discharge(checker, cav.Id) c.Assert(err, gc.IsNil) // Sneaky client adds a first party caveat. m.AddFirstPartyCaveat(checkers.DeclaredCaveat("foo", "c").Condition) return m, nil }) c.Assert(err, gc.IsNil) declared = checkers.InferDeclared(d) c.Assert(declared, gc.DeepEquals, checkers.Declared{ "bar": "", "arble": "b", }) err = firstParty.Check(d, checkers.New(declared)) c.Assert(err, gc.ErrorMatches, `verification failed: caveat "declared foo a" not satisfied: got foo=null, expected "a"`) } func (s *ServiceSuite) TestDischargeTwoNeedDeclared(c *gc.C) { locator := make(bakery.PublicKeyLocatorMap) firstParty := newService(c, "first", locator) thirdParty := newService(c, "third", locator) // firstParty mints a macaroon with two third party caveats // with overlapping attributes. m, err := firstParty.NewMacaroon("", nil, []checkers.Caveat{ checkers.NeedDeclaredCaveat(checkers.Caveat{ Location: "third", Condition: "x", }, "foo", "bar"), checkers.NeedDeclaredCaveat(checkers.Caveat{ Location: "third", Condition: "y", }, "bar", "baz"), }) c.Assert(err, gc.IsNil) // The client asks for a discharge macaroon for each third party caveat. // Since no declarations are added by the discharger, d, err := bakery.DischargeAll(m, func(_ string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { return thirdParty.Discharge(bakery.ThirdPartyCheckerFunc(func(_, caveat string) ([]checkers.Caveat, error) { return nil, nil }), cav.Id) }) c.Assert(err, gc.IsNil) declared := checkers.InferDeclared(d) c.Assert(declared, gc.DeepEquals, checkers.Declared{ "foo": "", "bar": "", "baz": "", }) err = firstParty.Check(d, checkers.New(declared)) c.Assert(err, gc.IsNil) // If they return conflicting values, the discharge fails. // The client asks for a discharge macaroon for each third party caveat. // Since no declarations are added by the discharger, d, err = bakery.DischargeAll(m, func(_ string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { return thirdParty.Discharge(bakery.ThirdPartyCheckerFunc(func(_, caveat string) ([]checkers.Caveat, error) { switch caveat { case "x": return []checkers.Caveat{ checkers.DeclaredCaveat("foo", "fooval1"), }, nil case "y": return []checkers.Caveat{ checkers.DeclaredCaveat("foo", "fooval2"), checkers.DeclaredCaveat("baz", "bazval"), }, nil } return nil, fmt.Errorf("not matched") }), cav.Id) }) c.Assert(err, gc.IsNil) declared = checkers.InferDeclared(d) c.Assert(declared, gc.DeepEquals, checkers.Declared{ "bar": "", "baz": "bazval", }) err = firstParty.Check(d, checkers.New(declared)) c.Assert(err, gc.ErrorMatches, `verification failed: caveat "declared foo fooval1" not satisfied: got foo=null, expected "fooval1"`) } func (s *ServiceSuite) TestDischargeMacaroonCannotBeUsedAsNormalMacaroon(c *gc.C) { locator := make(bakery.PublicKeyLocatorMap) firstParty := newService(c, "first", locator) thirdParty := newService(c, "third", locator) // First party mints a macaroon with a 3rd party caveat. m, err := firstParty.NewMacaroon("", nil, []checkers.Caveat{{ Location: "third", Condition: "true", }}) c.Assert(err, gc.IsNil) // Acquire the discharge macaroon, but don't bind it to the original. d, err := thirdParty.Discharge(bakery.ThirdPartyCheckerFunc(func(_, caveat string) ([]checkers.Caveat, error) { return nil, nil }), m.Caveats()[0].Id) c.Assert(err, gc.IsNil) // Make sure it cannot be used as a normal macaroon in the third party. err = thirdParty.Check(macaroon.Slice{d}, checkers.New()) c.Assert(err, gc.ErrorMatches, `verification failed: macaroon not found in storage`) } func (*ServiceSuite) TestCheckAny(c *gc.C) { svc := newService(c, "somewhere", nil) newMacaroons := func(id string, caveats ...checkers.Caveat) macaroon.Slice { m, err := svc.NewMacaroon(id, nil, caveats) c.Assert(err, gc.IsNil) return macaroon.Slice{m} } tests := []struct { about string macaroons []macaroon.Slice assert map[string]string checker checkers.Checker expectDeclared map[string]string expectId string expectError string }{{ about: "no macaroons", expectError: "verification failed: no macaroons", }, { about: "one macaroon, no caveats", macaroons: []macaroon.Slice{ newMacaroons("x1"), }, expectId: "x1", }, { about: "one macaroon, one unrecognized caveat", macaroons: []macaroon.Slice{ newMacaroons("x2", checkers.Caveat{ Condition: "bad", }), }, expectError: `verification failed: caveat "bad" not satisfied: caveat not recognized`, }, { about: "two macaroons, only one ok", macaroons: []macaroon.Slice{ newMacaroons("x3", checkers.Caveat{ Condition: "bad", }), newMacaroons("y3"), }, expectId: "y3", }, { about: "macaroon with declared caveats", macaroons: []macaroon.Slice{ newMacaroons("x4", checkers.DeclaredCaveat("key1", "value1"), checkers.DeclaredCaveat("key2", "value2"), ), }, expectDeclared: map[string]string{ "key1": "value1", "key2": "value2", }, expectId: "x4", }, { about: "macaroon with declared values and asserted keys with wrong value", macaroons: []macaroon.Slice{ newMacaroons("x5", checkers.DeclaredCaveat("key1", "value1"), checkers.DeclaredCaveat("key2", "value2"), ), }, assert: map[string]string{ "key1": "valuex", }, expectId: "x5", expectError: `verification failed: caveat "declared key1 value1" not satisfied: got key1="valuex", expected "value1"`, }, { about: "macaroon with declared values and asserted keys with correct value", macaroons: []macaroon.Slice{ newMacaroons("x6", checkers.DeclaredCaveat("key1", "value1"), checkers.DeclaredCaveat("key2", "value2"), ), }, assert: map[string]string{ "key1": "value1", }, expectDeclared: map[string]string{ "key1": "value1", "key2": "value2", }, expectId: "x6", }} for i, test := range tests { c.Logf("test %d: %s", i, test.about) if test.expectDeclared == nil { test.expectDeclared = make(map[string]string) } if test.checker == nil { test.checker = checkers.New() } decl, ms, err := svc.CheckAnyM(test.macaroons, test.assert, test.checker) if test.expectError != "" { c.Assert(err, gc.ErrorMatches, test.expectError) c.Assert(decl, gc.HasLen, 0) c.Assert(ms, gc.IsNil) continue } c.Assert(err, gc.IsNil) c.Assert(decl, jc.DeepEquals, test.expectDeclared) c.Assert(ms[0].Id(), gc.Equals, test.expectId) } } func newService(c *gc.C, location string, locator bakery.PublicKeyLocatorMap) *bakery.Service { keyPair, err := bakery.GenerateKey() c.Assert(err, gc.IsNil) svc, err := bakery.NewService(bakery.NewServiceParams{ Location: location, Key: keyPair, Locator: locator, }) c.Assert(err, gc.IsNil) if locator != nil { locator[location] = &keyPair.Public } return svc } type strcmpChecker string func (c strcmpChecker) CheckFirstPartyCaveat(caveat string) error { if caveat != string(c) { return fmt.Errorf("%v doesn't match %s", caveat, c) } return nil } func (c strcmpChecker) CheckThirdPartyCaveat(caveatId string, caveat string) ([]checkers.Caveat, error) { if caveat != string(c) { return nil, fmt.Errorf("%v doesn't match %s", caveat, c) } return nil, nil } type thirdPartyCheckerWithCaveats []checkers.Caveat func (c thirdPartyCheckerWithCaveats) CheckThirdPartyCaveat(caveatId string, caveat string) ([]checkers.Caveat, error) { return c, nil } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/discharge_test.go0000664000175000017500000000444712672604475024317 0ustar marcomarcopackage bakery_test import ( "fmt" gc "gopkg.in/check.v1" "gopkg.in/macaroon.v1" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/checkers" ) type DischargeSuite struct{} var _ = gc.Suite(&DischargeSuite{}) func alwaysOK(string) error { return nil } func (*DischargeSuite) TestDischargeAllNoDischarges(c *gc.C) { rootKey := []byte("root key") m, err := macaroon.New(rootKey, "id0", "loc0") c.Assert(err, gc.IsNil) ms, err := bakery.DischargeAll(m, noDischarge(c)) c.Assert(err, gc.IsNil) c.Assert(ms, gc.HasLen, 1) c.Assert(ms[0], gc.Equals, m) err = m.Verify(rootKey, alwaysOK, nil) c.Assert(err, gc.IsNil) } func (*DischargeSuite) TestDischargeAllManyDischarges(c *gc.C) { rootKey := []byte("root key") m0, err := macaroon.New(rootKey, "id0", "location0") c.Assert(err, gc.IsNil) totalRequired := 40 id := 1 addCaveats := func(m *macaroon.Macaroon) { for i := 0; i < 2; i++ { if totalRequired == 0 { break } cid := fmt.Sprint("id", id) err := m.AddThirdPartyCaveat([]byte("root key "+cid), cid, "somewhere") c.Assert(err, gc.IsNil) id++ totalRequired-- } } addCaveats(m0) getDischarge := func(loc string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { c.Assert(loc, gc.Equals, "location0") m, err := macaroon.New([]byte("root key "+cav.Id), cav.Id, "") c.Assert(err, gc.IsNil) addCaveats(m) return m, nil } ms, err := bakery.DischargeAll(m0, getDischarge) c.Assert(err, gc.IsNil) c.Assert(ms, gc.HasLen, 41) err = ms[0].Verify(rootKey, alwaysOK, ms[1:]) c.Assert(err, gc.IsNil) } func (*DischargeSuite) TestDischargeAllLocalDischarge(c *gc.C) { svc, err := bakery.NewService(bakery.NewServiceParams{}) c.Assert(err, gc.IsNil) clientKey, err := bakery.GenerateKey() c.Assert(err, gc.IsNil) m, err := svc.NewMacaroon("", nil, []checkers.Caveat{ bakery.LocalThirdPartyCaveat(&clientKey.Public), }) c.Assert(err, gc.IsNil) ms, err := bakery.DischargeAllWithKey(m, noDischarge(c), clientKey) c.Assert(err, gc.IsNil) err = svc.Check(ms, checkers.New()) c.Assert(err, gc.IsNil) } func noDischarge(c *gc.C) func(string, macaroon.Caveat) (*macaroon.Macaroon, error) { return func(string, macaroon.Caveat) (*macaroon.Macaroon, error) { c.Errorf("getDischarge called unexpectedly") return nil, fmt.Errorf("nothing") } } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/example/0000775000175000017500000000000012672604475022422 5ustar marcomarcocharm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/example/targetservice.go0000664000175000017500000000720312672604475025622 0ustar marcomarcopackage main import ( "fmt" "log" "net/http" "time" "gopkg.in/errgo.v1" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/httpbakery" ) type targetServiceHandler struct { svc *bakery.Service authEndpoint string endpoint string mux *http.ServeMux } // targetService implements a "target service", representing // an arbitrary web service that wants to delegate authorization // to third parties. // func targetService(endpoint, authEndpoint string, authPK *bakery.PublicKey) (http.Handler, error) { key, err := bakery.GenerateKey() if err != nil { return nil, err } pkLocator := bakery.NewPublicKeyRing() svc, err := bakery.NewService(bakery.NewServiceParams{ Key: key, Location: endpoint, Locator: pkLocator, }) if err != nil { return nil, err } log.Printf("adding public key for location %s: %v", authEndpoint, authPK) pkLocator.AddPublicKeyForLocation(authEndpoint, true, authPK) mux := http.NewServeMux() srv := &targetServiceHandler{ svc: svc, authEndpoint: authEndpoint, } mux.HandleFunc("/gold/", srv.serveGold) mux.HandleFunc("/silver/", srv.serveSilver) return mux, nil } func (srv *targetServiceHandler) serveGold(w http.ResponseWriter, req *http.Request) { checker := srv.checkers(req, "gold") if _, err := httpbakery.CheckRequest(srv.svc, req, nil, checker); err != nil { srv.writeError(w, req, "gold", err) return } fmt.Fprintf(w, "all is golden") } func (srv *targetServiceHandler) serveSilver(w http.ResponseWriter, req *http.Request) { checker := srv.checkers(req, "silver") if _, err := httpbakery.CheckRequest(srv.svc, req, nil, checker); err != nil { srv.writeError(w, req, "silver", err) return } fmt.Fprintf(w, "every cloud has a silver lining") } // checkers implements the caveat checking for the service. func (svc *targetServiceHandler) checkers(req *http.Request, operation string) checkers.Checker { return checkers.CheckerFunc{ Condition_: "operation", Check_: func(_, op string) error { if op != operation { return fmt.Errorf("macaroon not valid for operation") } return nil }, } } // writeError writes an error to w in response to req. If the error was // generated because of a required macaroon that the client does not // have, we mint a macaroon that, when discharged, will grant the client // the right to execute the given operation. // // The logic in this function is crucial to the security of the service // - it must determine for a given operation what caveats to attach. func (srv *targetServiceHandler) writeError(w http.ResponseWriter, req *http.Request, operation string, verr error) { log.Printf("writing error with operation %q", operation) fail := func(code int, msg string, args ...interface{}) { if code == http.StatusInternalServerError { msg = "internal error: " + msg } http.Error(w, fmt.Sprintf(msg, args...), code) } if _, ok := errgo.Cause(verr).(*bakery.VerificationError); !ok { fail(http.StatusForbidden, "%v", verr) return } // Work out what caveats we need to apply for the given operation. // Could special-case the operation here if desired. caveats := []checkers.Caveat{ checkers.TimeBeforeCaveat(time.Now().Add(5 * time.Minute)), { Location: srv.authEndpoint, Condition: "access-allowed", }, { Condition: "operation " + operation, }} // Mint an appropriate macaroon and send it back to the client. m, err := srv.svc.NewMacaroon("", nil, caveats) if err != nil { fail(http.StatusInternalServerError, "cannot mint macaroon: %v", err) return } httpbakery.WriteDischargeRequiredErrorForRequest(w, m, "", verr, req) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/example/authservice.go0000664000175000017500000000246112672604475025276 0ustar marcomarcopackage main import ( "net/http" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/httpbakery" ) // authService implements an authorization service, // that can discharge third-party caveats added // to other macaroons. func authService(endpoint string, key *bakery.KeyPair) (http.Handler, error) { svc, err := bakery.NewService(bakery.NewServiceParams{ Location: endpoint, Key: key, Locator: bakery.NewPublicKeyRing(), }) if err != nil { return nil, err } mux := http.NewServeMux() httpbakery.AddDischargeHandler(mux, "/", svc, thirdPartyChecker) return mux, nil } // thirdPartyChecker is used to check third party caveats added by other // services. The HTTP request is that of the client - it is attempting // to gather a discharge macaroon. // // Note how this function can return additional first- and third-party // caveats which will be added to the original macaroon's caveats. func thirdPartyChecker(req *http.Request, cavId, condition string) ([]checkers.Caveat, error) { if condition != "access-allowed" { return nil, checkers.ErrCaveatNotRecognized } // TODO check that the HTTP request has cookies that prove // something about the client. return []checkers.Caveat{ httpbakery.SameClientIPAddrCaveat(req), }, nil } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/example/meeting/0000775000175000017500000000000012672604475024052 5ustar marcomarcocharm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/example/meeting/meeting_test.go0000664000175000017500000000326212672604475027073 0ustar marcomarcopackage meeting_test import ( "time" gc "gopkg.in/check.v1" "gopkg.in/macaroon-bakery.v1/bakery/example/meeting" ) type suite struct{} var _ = gc.Suite(&suite{}) func (*suite) TestRendezvousWaitBeforeDone(c *gc.C) { m := meeting.New() id, err := m.NewRendezvous([]byte("first data")) c.Assert(err, gc.IsNil) c.Assert(id, gc.Not(gc.Equals), "") waitDone := make(chan struct{}) go func() { data0, data1, err := m.Wait(id) c.Check(err, gc.IsNil) c.Check(string(data0), gc.Equals, "first data") c.Check(string(data1), gc.Equals, "second data") close(waitDone) }() time.Sleep(10 * time.Millisecond) err = m.Done(id, []byte("second data")) c.Assert(err, gc.IsNil) select { case <-waitDone: case <-time.After(2 * time.Second): c.Errorf("timed out waiting for rendezvous") } // Check that item has now been deleted. data0, data1, err := m.Wait(id) c.Assert(data0, gc.IsNil) c.Assert(data1, gc.IsNil) c.Assert(err, gc.ErrorMatches, `rendezvous ".*" not found`) } func (*suite) TestRendezvousDoneBeforeWait(c *gc.C) { m := meeting.New() id, err := m.NewRendezvous([]byte("first data")) c.Assert(err, gc.IsNil) c.Assert(id, gc.Not(gc.Equals), "") err = m.Done(id, []byte("second data")) c.Assert(err, gc.IsNil) err = m.Done(id, []byte("other second data")) c.Assert(err, gc.ErrorMatches, `rendezvous ".*" done twice`) data0, data1, err := m.Wait(id) c.Assert(err, gc.IsNil) c.Assert(string(data0), gc.Equals, "first data") c.Assert(string(data1), gc.Equals, "second data") // Check that item has now been deleted. data0, data1, err = m.Wait(id) c.Assert(data0, gc.IsNil) c.Assert(data1, gc.IsNil) c.Assert(err, gc.ErrorMatches, `rendezvous ".*" not found`) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/example/meeting/meeting.go0000664000175000017500000000246212672604475026035 0ustar marcomarcopackage meeting import ( "crypto/rand" "fmt" "sync" ) type Place struct { mu sync.Mutex items map[string]*item } type item struct { c chan struct{} data0 []byte data1 []byte } func New() *Place { return &Place{ items: make(map[string]*item), } } func newId() (string, error) { var id [12]byte if _, err := rand.Read(id[:]); err != nil { return "", fmt.Errorf("cannot read random id: %v", err) } return fmt.Sprintf("%x", id[:]), nil } func (m *Place) NewRendezvous(data []byte) (string, error) { id, err := newId() if err != nil { return "", err } m.mu.Lock() defer m.mu.Unlock() m.items[id] = &item{ c: make(chan struct{}), data0: data, } return id, nil } func (m *Place) Wait(id string) (data0, data1 []byte, err error) { m.mu.Lock() item := m.items[id] m.mu.Unlock() if item == nil { return nil, nil, fmt.Errorf("rendezvous %q not found", id) } <-item.c m.mu.Lock() defer m.mu.Unlock() delete(m.items, id) return item.data0, item.data1, nil } func (m *Place) Done(id string, data []byte) error { m.mu.Lock() item := m.items[id] defer m.mu.Unlock() if item == nil { return fmt.Errorf("rendezvous %q not found", id) } select { case <-item.c: return fmt.Errorf("rendezvous %q done twice", id) default: item.data1 = data close(item.c) } return nil } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/example/meeting/package_test.go0000664000175000017500000000017112672604475027032 0ustar marcomarcopackage meeting_test import ( "testing" gc "gopkg.in/check.v1" ) func TestPackage(t *testing.T) { gc.TestingT(t) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/example/example_test.go0000664000175000017500000000302112672604475025437 0ustar marcomarcopackage main import ( "net/http" "testing" gc "gopkg.in/check.v1" "gopkg.in/macaroon-bakery.v1/bakery" ) func TestPackage(t *testing.T) { gc.TestingT(t) } type exampleSuite struct { authEndpoint string authPublicKey *bakery.PublicKey } var _ = gc.Suite(&exampleSuite{}) func (s *exampleSuite) SetUpSuite(c *gc.C) { key, err := bakery.GenerateKey() c.Assert(err, gc.IsNil) s.authPublicKey = &key.Public s.authEndpoint, err = serve(func(endpoint string) (http.Handler, error) { return authService(endpoint, key) }) c.Assert(err, gc.IsNil) } func (s *exampleSuite) TestExample(c *gc.C) { client := newClient() serverEndpoint, err := serve(func(endpoint string) (http.Handler, error) { return targetService(endpoint, s.authEndpoint, s.authPublicKey) }) c.Assert(err, gc.IsNil) c.Logf("gold request") resp, err := clientRequest(client, serverEndpoint+"/gold") c.Assert(err, gc.IsNil) c.Assert(resp, gc.Equals, "all is golden") c.Logf("silver request") resp, err = clientRequest(client, serverEndpoint+"/silver") c.Assert(err, gc.IsNil) c.Assert(resp, gc.Equals, "every cloud has a silver lining") } func (s *exampleSuite) BenchmarkExample(c *gc.C) { client := newClient() serverEndpoint, err := serve(func(endpoint string) (http.Handler, error) { return targetService(endpoint, s.authEndpoint, s.authPublicKey) }) c.Assert(err, gc.IsNil) c.ResetTimer() for i := 0; i < c.N; i++ { resp, err := clientRequest(client, serverEndpoint) c.Assert(err, gc.IsNil) c.Assert(resp, gc.Equals, "hello, world\n") } } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/example/client.go0000664000175000017500000000174012672604475024231 0ustar marcomarcopackage main import ( "fmt" "io/ioutil" "net/http" "gopkg.in/errgo.v1" "gopkg.in/macaroon-bakery.v1/httpbakery" ) // client represents a client of the target service. // In this simple example, it just tries a GET // request, which will fail unless the client // has the required authorization. func clientRequest(client *httpbakery.Client, serverEndpoint string) (string, error) { // The Do function implements the mechanics // of actually gathering discharge macaroons // when required, and retrying the request // when necessary. req, err := http.NewRequest("GET", serverEndpoint, nil) if err != nil { return "", errgo.Notef(err, "cannot make new HTTP request") } resp, err := client.Do(req) if err != nil { return "", errgo.NoteMask(err, "GET failed", errgo.Any) } defer resp.Body.Close() // TODO(rog) unmarshal error data, err := ioutil.ReadAll(resp.Body) if err != nil { return "", fmt.Errorf("cannot read response: %v", err) } return string(data), nil } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/example/main.go0000664000175000017500000000433012672604475023675 0ustar marcomarco// This example demonstrates three components: // // - A target service, representing a web server that // wishes to use macaroons for authorization. // It delegates authorization to a third-party // authorization server by adding third-party // caveats to macaroons that it sends to the user. // // - A client, representing a client wanting to make // requests to the server. // // - An authorization server. // // In a real system, these three components would // live on different machines; the client component // could also be a web browser. // (TODO: write javascript discharge gatherer) package main import ( "fmt" "log" "net" "net/http" "net/url" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/httpbakery" ) var defaultHTTPClient = httpbakery.NewHTTPClient() func main() { key, err := bakery.GenerateKey() if err != nil { log.Fatalf("cannot generate auth service key pair: %v", err) } authPublicKey := &key.Public authEndpoint := mustServe(func(endpoint string) (http.Handler, error) { return authService(endpoint, key) }) serverEndpoint := mustServe(func(endpoint string) (http.Handler, error) { return targetService(endpoint, authEndpoint, authPublicKey) }) resp, err := clientRequest(newClient(), serverEndpoint) if err != nil { log.Fatalf("client failed: %v", err) } fmt.Printf("client success: %q\n", resp) } func mustServe(newHandler func(string) (http.Handler, error)) (endpointURL string) { endpoint, err := serve(newHandler) if err != nil { log.Fatalf("cannot serve: %v", err) } return endpoint } func serve(newHandler func(string) (http.Handler, error)) (endpointURL string, err error) { listener, err := net.Listen("tcp", "localhost:0") if err != nil { return "", fmt.Errorf("cannot listen: %v", err) } endpointURL = "http://" + listener.Addr().String() handler, err := newHandler(endpointURL) if err != nil { return "", fmt.Errorf("cannot start handler: %v", err) } go http.Serve(listener, handler) return endpointURL, nil } func newClient() *httpbakery.Client { return &httpbakery.Client{ Client: httpbakery.NewHTTPClient(), VisitWebPage: func(url *url.URL) error { fmt.Printf("please visit this web page:\n") fmt.Printf("\t%s\n", url) return nil }, } } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/example/idservice/0000775000175000017500000000000012672604475024377 5ustar marcomarcocharm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/example/idservice/idservice.go0000664000175000017500000003307312672604475026711 0ustar marcomarcopackage idservice import ( "fmt" "html/template" "log" "net/http" "github.com/juju/httprequest" "github.com/julienschmidt/httprouter" "gopkg.in/errgo.v1" "gopkg.in/macaroon.v1" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/bakery/example/meeting" "gopkg.in/macaroon-bakery.v1/httpbakery" ) var ( handleJSON = httprequest.ErrorMapper(errorToResponse).HandleJSON ) const ( cookieUser = "username" ) // handler implements http.Handler to serve the name space // provided by the id service. type handler struct { svc *bakery.Service place *place users map[string]*UserInfo } // UserInfo holds information about a user. type UserInfo struct { Password string Groups map[string]bool } // Params holds parameters for New. type Params struct { Service bakery.NewServiceParams Users map[string]*UserInfo } // New returns a new handler that services an identity-providing // service. This acts as a login service and can discharge third-party caveats // for users. func New(p Params) (http.Handler, error) { svc, err := bakery.NewService(p.Service) if err != nil { return nil, err } h := &handler{ svc: svc, users: p.Users, place: &place{meeting.New()}, } mux := http.NewServeMux() httpbakery.AddDischargeHandler(mux, "/", svc, h.checkThirdPartyCaveat) mux.Handle("/user/", mkHandler(handleJSON(h.userHandler))) mux.HandleFunc("/login", h.loginHandler) mux.Handle("/question", mkHandler(handleJSON(h.questionHandler))) mux.Handle("/wait", mkHandler(handleJSON(h.waitHandler))) mux.HandleFunc("/loginattempt", h.loginAttemptHandler) return mux, nil } // userHandler handles requests to add new users, change user details, etc. // It is only accessible to users that are members of the admin group. func (h *handler) userHandler(p httprequest.Params) (interface{}, error) { ctxt := h.newContext(p.Request, "change-user") if _, err := httpbakery.CheckRequest(h.svc, p.Request, nil, ctxt); err != nil { // TODO do this only if the error cause is *bakery.VerificationError // We issue a macaroon with a third-party caveat targetting // the id service itself. This means that the flow for self-created // macaroons is just the same as for any other service. // Theoretically, we could just redirect the user to the // login page, but that would p.Requestuire a different flow // and it's not clear that it would be an advantage. m, err := h.svc.NewMacaroon("", nil, []checkers.Caveat{{ Location: h.svc.Location(), Condition: "member-of-group admin", }, { Condition: "operation change-user", }}) if err != nil { return nil, errgo.Notef(err, "cannot mint new macaroon") } return nil, &httpbakery.Error{ Message: err.Error(), Code: httpbakery.ErrDischargeRequired, Info: &httpbakery.ErrorInfo{ Macaroon: m, }, } } // PUT /user/$user - create new user // PUT /user/$user/group-membership - change group membership of user return nil, errgo.New("not implemented yet") } type loginPageParams struct { WaitId string } var loginPage = template.Must(template.New("").Parse(`
User name:

Password: Log in

`)) // loginHandler serves up a login page for the user to interact with, // having been redirected there as part of a macaroon discharge requirement. // This is a proxy for any third-party authorization service. func (h *handler) loginHandler(w http.ResponseWriter, req *http.Request) { req.ParseForm() waitId := req.Form.Get("waitid") if waitId == "" { http.Error(w, "wait id not found in form", http.StatusBadRequest) return } err := loginPage.Execute(w, loginPageParams{ WaitId: waitId, }) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } } // loginAttemptHandler is invoked when a user clicks on the "Log in" // button on the login page. It checks the credentials and then // completes the rendezvous, allowing the original wait // request to complete. func (h *handler) loginAttemptHandler(w http.ResponseWriter, req *http.Request) { log.Printf("login attempt %s", req.URL) req.ParseForm() waitId := req.Form.Get("waitid") if waitId == "" { http.Error(w, "wait id not found in form", http.StatusBadRequest) return } user := req.Form.Get("user") info, ok := h.users[user] if !ok { http.Error(w, fmt.Sprintf("user %q not found", user), http.StatusUnauthorized) return } if req.Form.Get("password") != info.Password { http.Error(w, "bad password", http.StatusUnauthorized) return } // User and password match; we can allow the user // to have a macaroon that they can use later to prove // to us that they have logged in. We also add a cookie // to hold the logged in user name. m, err := h.svc.NewMacaroon("", nil, []checkers.Caveat{{ Condition: "user-is " + user, }}) // TODO(rog) when this fails, we should complete the rendezvous // to cause the wait request to complete with an appropriate error. if err != nil { http.Error(w, "cannot mint macaroon: "+err.Error(), http.StatusInternalServerError) return } cookie, err := httpbakery.NewCookie(macaroon.Slice{m}) if err != nil { http.Error(w, "cannot make cookie: "+err.Error(), http.StatusInternalServerError) return } http.SetCookie(w, cookie) http.SetCookie(w, &http.Cookie{ Path: "/", Name: cookieUser, Value: user, }) h.place.Done(waitId, &loginInfo{ User: user, }) } // checkThirdPartyCaveat is called by the httpbakery discharge handler. func (h *handler) checkThirdPartyCaveat(req *http.Request, cavId, cav string) ([]checkers.Caveat, error) { return h.newContext(req, "").CheckThirdPartyCaveat(cavId, cav) } // newContext returns a new caveat-checking context // for the client making the given request. func (h *handler) newContext(req *http.Request, operation string) *context { // Determine the current logged-in user, if any. var username string for _, c := range req.Cookies() { if c.Name == cookieUser { // TODO could potentially allow several concurrent // logins - caveats asking about current user privilege // could be satisfied if any of the user names had that // privilege. username = c.Value break } } if username == "" { log.Printf("not logged in") } else { log.Printf("logged in as %q", username) } return &context{ handler: h, req: req, svc: h.svc, declaredUser: username, operation: operation, } } // needLogin returns an error suitable for returning // from a discharge request that can only be satisfied // if the user logs in. func (h *handler) needLogin(cavId string, caveat string, why error, req *http.Request) error { // TODO(rog) If the user is already logged in (username != ""), // we should perhaps just return an error here. log.Printf("login required") waitId, err := h.place.NewRendezvous(&thirdPartyCaveatInfo{ CaveatId: cavId, Caveat: caveat, }) if err != nil { return fmt.Errorf("cannot make rendezvous: %v", err) } log.Printf("returning redirect error") visitURL := "/login?waitid=" + waitId waitURL := "/wait?waitid=" + waitId return httpbakery.NewInteractionRequiredError(visitURL, waitURL, why, req) } // waitHandler serves an HTTP endpoint that waits until a macaroon // has been discharged, and returns the discharge macaroon. func (h *handler) waitHandler(p httprequest.Params) (interface{}, error) { p.Request.ParseForm() waitId := p.Request.Form.Get("waitid") if waitId == "" { return nil, fmt.Errorf("wait id parameter not found") } caveat, login, err := h.place.Wait(waitId) if err != nil { return nil, fmt.Errorf("cannot wait: %v", err) } if login.User == "" { return nil, fmt.Errorf("login failed") } // Create a context to verify the third party caveat. // Note that because the information in login has been // supplied directly by our own code, we can assume // that it can be trusted, so we set verifiedUser to true. ctxt := &context{ handler: h, req: p.Request, svc: h.svc, declaredUser: login.User, verifiedUser: true, } // Now that we've verified the user, we can check again to see // if we can discharge the original caveat. macaroon, err := h.svc.Discharge(ctxt, caveat.CaveatId) if err != nil { return nil, errgo.Mask(err) } return WaitResponse{ Macaroon: macaroon, }, nil } func (h *handler) questionHandler(_ httprequest.Params) (interface{}, error) { return nil, errgo.New("question unimplemented") // TODO // req.ParseForm() // // macStr := req.Form.Get("macaroons") // if macStr == "" { // return nil, fmt.Errorf("macaroon parameter not found") // } // var macaroons []*macaroon.Macaroon // err := json.Unmarshal([]byte(macStr), &macaroons) // if err != nil { // return nil, fmt.Errorf("cannot unmarshal macaroon: %v", err) // } // if len(macaroons) == 0 { // return nil, fmt.Errorf("no macaroons found") // } // q := req.Form.Get("q") // if q == "" { // return nil, fmt.Errorf("q parameter not found") // } // user := req.Form.Get("user") // if user == "" { // return nil, fmt.Errorf("user parameter not found") // } // ctxt := &context{ // declaredUser: user, // operation: "question " + q, // } // breq := h.svc.NewRequest(req, ctxt) // for _, m := range macaroons { // breq.AddClientMacaroon(m) // } // err := breq.Check() // return nil, err } // WaitResponse holds the response from the wait endpoint. type WaitResponse struct { Macaroon *macaroon.Macaroon } // context represents the context in which a caveat // will be checked. type context struct { // handler refers to the idservice handler. handler *handler // declaredUser holds the user name that we want to use for // checking authorization caveats. declaredUser string // verifiedUser is true when the declared user has been verified // directly (by the user login) verifiedUser bool // operation holds the current operation, if any. operation string svc *bakery.Service // req holds the current client's HTTP request. req *http.Request } func (ctxt *context) Condition() string { return "" } func (ctxt *context) Check(cond, arg string) error { switch cond { case "user-is": if arg != ctxt.declaredUser { return fmt.Errorf("not logged in as %q", arg) } return nil case "operation": if ctxt.operation != "" && arg == ctxt.operation { return nil } return errgo.Newf("operation mismatch") default: return checkers.ErrCaveatNotRecognized } } func (ctxt *context) CheckThirdPartyCaveat(cavId, cav string) ([]checkers.Caveat, error) { h := ctxt.handler log.Printf("checking third party caveat %q", cav) op, rest, err := checkers.ParseCaveat(cav) if err != nil { return nil, fmt.Errorf("cannot parse caveat %q: %v", cav, err) } switch op { case "can-speak-for": // TODO(rog) We ignore the currently logged in user here, // but perhaps it would be better to let the user be in control // of which user they're currently "declared" as, rather than // getting privileges of users we currently have macaroons for. checkErr := ctxt.canSpeakFor(rest) if checkErr == nil { return ctxt.firstPartyCaveats(), nil } return nil, h.needLogin(cavId, cav, checkErr, ctxt.req) case "member-of-group": // The third-party caveat is asking if the currently logged in // user is a member of a particular group. // We can find the currently logged in user by checking // the username cookie (which doesn't provide any power, but // indicates which user name to check) if ctxt.declaredUser == "" { return nil, h.needLogin(cavId, cav, errgo.New("not logged in"), ctxt.req) } if err := ctxt.canSpeakFor(ctxt.declaredUser); err != nil { return nil, errgo.Notef(err, "cannot speak for declared user %q", ctxt.declaredUser) } info, ok := h.users[ctxt.declaredUser] if !ok { return nil, errgo.Newf("user %q not found", ctxt.declaredUser) } group := rest if !info.Groups[group] { return nil, errgo.Newf("not privileged enough") } return ctxt.firstPartyCaveats(), nil default: return nil, checkers.ErrCaveatNotRecognized } } // canSpeakFor checks whether the client sending // the given request can speak for the given user. // We do that by declaring that user and checking // whether the supplied macaroons in the request // verify OK. func (ctxt *context) canSpeakFor(user string) error { if user == ctxt.declaredUser && ctxt.verifiedUser { // The context is a direct result of logging in. // No need to check macaroons. return nil } ctxt1 := *ctxt ctxt1.declaredUser = user _, err := httpbakery.CheckRequest(ctxt.svc, ctxt.req, nil, &ctxt1) if err != nil { log.Printf("client cannot speak for %q: %v", user, err) } else { log.Printf("client can speak for %q", user) } return err } // firstPartyCaveats returns first-party caveats suitable // for adding to a third-party caveat discharge macaroon // within the receiving context. func (ctxt *context) firstPartyCaveats() []checkers.Caveat { // TODO return caveat specifying that ip-addr is // the same as that given in ctxt.req.RemoteAddr // and other 1st party caveats, potentially. return nil } func errorToResponse(err error) (int, interface{}) { cause := errgo.Cause(err) if cause, ok := cause.(*httpbakery.Error); ok { err1 := *cause err1.Message = err.Error() return http.StatusInternalServerError, &err1 } return http.StatusInternalServerError, &httpbakery.Error{ Message: err.Error(), } } func mkHandler(h httprouter.Handle) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { h(w, req, nil) }) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/example/idservice/idservice_test.go0000664000175000017500000001230212672604475027740 0ustar marcomarcopackage idservice_test import ( "fmt" "io/ioutil" "log" "net" "net/http" "net/url" "regexp" "time" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/example/idservice" "gopkg.in/macaroon-bakery.v1/httpbakery" ) type suite struct { authEndpoint string authPublicKey *bakery.PublicKey client *httpbakery.Client } var _ = gc.Suite(&suite{}) func (s *suite) SetUpSuite(c *gc.C) { key, err := bakery.GenerateKey() c.Assert(err, gc.IsNil) s.authPublicKey = &key.Public s.authEndpoint = serve(c, func(endpoint string) (http.Handler, error) { return idservice.New(idservice.Params{ Users: map[string]*idservice.UserInfo{ "rog": { Password: "password", }, "root": { Password: "superman", Groups: map[string]bool{ "target-service-users": true, }, }, }, Service: bakery.NewServiceParams{ Location: endpoint, Store: bakery.NewMemStorage(), Key: key, Locator: bakery.NewPublicKeyRing(), }, }) }) c.Logf("auth endpoint at %s", s.authEndpoint) } func (s *suite) SetUpTest(c *gc.C) { s.client = httpbakery.NewClient() } func (s *suite) TestIdService(c *gc.C) { serverEndpoint := serve(c, func(endpoint string) (http.Handler, error) { return targetService(endpoint, s.authEndpoint, s.authPublicKey) }) c.Logf("target service endpoint at %s", serverEndpoint) visitDone := make(chan struct{}) s.client.VisitWebPage = func(u *url.URL) error { go func() { err := s.scrapeLoginPage(u) c.Logf("scrape returned %v", err) c.Check(err, gc.IsNil) visitDone <- struct{}{} }() return nil } resp, err := s.clientRequest(serverEndpoint + "/gold") c.Assert(err, gc.IsNil) c.Assert(resp, gc.Equals, "all is golden") select { case <-visitDone: case <-time.After(5 * time.Second): c.Fatalf("visit never done") } // Try again. We shouldn't need to interact this time. s.client.VisitWebPage = nil resp, err = s.clientRequest(serverEndpoint + "/silver") c.Assert(err, gc.IsNil) c.Assert(resp, gc.Equals, "every cloud has a silver lining") } func serve(c *gc.C, newHandler func(string) (http.Handler, error)) (endpointURL string) { listener, err := net.Listen("tcp", "localhost:0") c.Assert(err, gc.IsNil) endpointURL = "http://" + listener.Addr().String() handler, err := newHandler(endpointURL) c.Assert(err, gc.IsNil) go http.Serve(listener, handler) return endpointURL } // client represents a client of the target service. In this simple // example, it just tries a GET request, which will fail unless the // client has the required authorization. func (s *suite) clientRequest(serverEndpoint string) (string, error) { req, err := http.NewRequest("GET", serverEndpoint, nil) if err != nil { return "", errgo.Notef(err, "cannot make new HTTP request") } // The Do function implements the mechanics // of actually gathering discharge macaroons // when required, and retrying the request // when necessary. resp, err := s.client.Do(req) if err != nil { return "", errgo.NoteMask(err, "GET failed", errgo.Any) } defer resp.Body.Close() // TODO(rog) unmarshal error data, err := ioutil.ReadAll(resp.Body) if err != nil { return "", fmt.Errorf("cannot read response: %v", err) } return string(data), nil } // Patterns to search for the relevant information in the login page. // Alternatives to this might be (in likely ascending order of complexity): // - use the template itself as the pattern. // - parse the html with encoding/xml // - parse the html with code.google.com/p/go.net/html var ( actionPat = regexp.MustCompile(`
0 { cav := need[0] need = need[1:] var dm *macaroon.Macaroon var err error if localKey != nil && cav.Location == "local" { dm, _, err = Discharge(localKey, localDischargeChecker, cav.Id) } else { dm, err = getDischarge(firstPartyLocation, cav) } if err != nil { return nil, errgo.NoteMask(err, fmt.Sprintf("cannot get discharge from %q", cav.Location), errgo.Any) } dm.Bind(sig) discharges = append(discharges, dm) addCaveats(dm) } return discharges, nil } var localDischargeChecker = ThirdPartyCheckerFunc(func(caveatId, caveat string) ([]checkers.Caveat, error) { if caveat != "true" { return nil, checkers.ErrCaveatNotRecognized } return nil, nil }) charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/package_test.go0000664000175000017500000000017012672604475023746 0ustar marcomarcopackage bakery_test import ( "testing" gc "gopkg.in/check.v1" ) func TestPackage(t *testing.T) { gc.TestingT(t) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/0000775000175000017500000000000012672604475023136 5ustar marcomarcocharm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/storage.go0000664000175000017500000000361512672604475025136 0ustar marcomarco// Package mgostorage provides an implementation of the // bakery Storage interface that uses MongoDB to store // items. package mgostorage import ( "gopkg.in/errgo.v1" "gopkg.in/mgo.v2" "gopkg.in/macaroon-bakery.v1/bakery" ) // New returns an implementation of Storage // that stores all items in MongoDB. // It never returns an error (the error return // is for backward compatibility with a previous // version that could return an error). // // Note that the caller is responsible for closing // the mgo session associated with the collection. func New(c *mgo.Collection) (bakery.Storage, error) { return mgoStorage{ col: c, }, nil } type mgoStorage struct { col *mgo.Collection } type storageDoc struct { Location string `bson:"_id"` Item string `bson:"item"` // OldLocation is set for backward compatibility reasons - the // original version of the code used "loc" as a unique index // so we need to maintain the uniqueness otherwise // inserts will fail. // TODO remove this when moving to bakery.v2. OldLocation string `bson:"loc"` } // Put implements bakery.Storage.Put. func (s mgoStorage) Put(location, item string) error { i := storageDoc{ Location: location, OldLocation: location, Item: item, } _, err := s.col.UpsertId(location, i) if err != nil { return errgo.Notef(err, "cannot store item for location %q", location) } return nil } // Get implements bakery.Storage.Get. func (s mgoStorage) Get(location string) (string, error) { var i storageDoc err := s.col.FindId(location).One(&i) if err != nil { if err == mgo.ErrNotFound { return "", bakery.ErrNotFound } return "", errgo.Notef(err, "cannot get %q", location) } return i.Item, nil } // Del implements bakery.Storage.Del. func (s mgoStorage) Del(location string) error { err := s.col.RemoveId(location) if err != nil { return errgo.Notef(err, "cannot remove %q", location) } return nil } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/storage_test.go0000664000175000017500000000566712672604475026206 0ustar marcomarcopackage mgostorage_test import ( "errors" "fmt" "github.com/juju/testing" gc "gopkg.in/check.v1" "gopkg.in/macaroon.v1" "gopkg.in/mgo.v2" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/bakery/mgostorage" ) type StorageSuite struct { testing.MgoSuite session *mgo.Session store bakery.Storage } var _ = gc.Suite(&StorageSuite{}) func (s *StorageSuite) SetUpTest(c *gc.C) { s.MgoSuite.SetUpTest(c) s.session = testing.MgoServer.MustDial() store, err := mgostorage.New(s.session.DB("test").C("items")) c.Assert(err, gc.IsNil) s.store = store } func (s *StorageSuite) TearDownTest(c *gc.C) { s.session.Close() s.MgoSuite.TearDownTest(c) } func (s *StorageSuite) TestMgoStorage(c *gc.C) { err := s.store.Put("foo", "bar") c.Assert(err, gc.IsNil) item, err := s.store.Get("foo") c.Assert(err, gc.IsNil) c.Assert(item, gc.Equals, "bar") err = s.store.Put("bletch", "blat") c.Assert(err, gc.IsNil) item, err = s.store.Get("bletch") c.Assert(err, gc.IsNil) c.Assert(item, gc.Equals, "blat") item, err = s.store.Get("nothing") c.Assert(err, gc.Equals, bakery.ErrNotFound) c.Assert(item, gc.Equals, "") err = s.store.Del("bletch") c.Assert(err, gc.IsNil) item, err = s.store.Get("bletch") c.Assert(err, gc.Equals, bakery.ErrNotFound) c.Assert(item, gc.Equals, "") } func (s *StorageSuite) TestMgoStorageUpsert(c *gc.C) { err := s.store.Put("foo", "bar") c.Assert(err, gc.IsNil) item, err := s.store.Get("foo") c.Assert(err, gc.IsNil) c.Assert(item, gc.Equals, "bar") err = s.store.Put("foo", "ba-ba") c.Assert(err, gc.IsNil) item, err = s.store.Get("foo") c.Assert(err, gc.IsNil) c.Assert(item, gc.Equals, "ba-ba") } func (s *StorageSuite) TestConcurrentMgoStorage(c *gc.C) { done := make(chan struct{}) for i := 0; i < 3; i++ { i := i go func() { k := fmt.Sprint(i) err := s.store.Put(k, k) c.Check(err, gc.IsNil) v, err := s.store.Get(k) c.Check(v, gc.Equals, k) err = s.store.Del(k) c.Check(err, gc.IsNil) done <- struct{}{} }() } for i := 0; i < 3; i++ { <-done } } type testChecker struct{} func (tc *testChecker) CheckFirstPartyCaveat(caveat string) error { if caveat != "is-authorised bob" { return errors.New("not bob") } return nil } func (s *StorageSuite) TestCreateMacaroon(c *gc.C) { keypair, err := bakery.GenerateKey() c.Assert(err, gc.IsNil) params := bakery.NewServiceParams{Location: "local", Store: s.store, Key: keypair} service, err := bakery.NewService(params) c.Assert(err, gc.IsNil) c.Assert(service, gc.NotNil) m, err := service.NewMacaroon( "123", []byte("abc"), []checkers.Caveat{checkers.Caveat{Location: "", Condition: "is-authorised bob"}}, ) c.Assert(err, gc.IsNil) c.Assert(m, gc.NotNil) item, err := s.store.Get("123") c.Assert(err, gc.IsNil) c.Assert(item, gc.DeepEquals, `{"RootKey":"YWJj"}`) err = service.Check(macaroon.Slice{m}, &testChecker{}) c.Assert(err, gc.IsNil) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/package_test.go0000664000175000017500000000023712672604475026121 0ustar marcomarcopackage mgostorage_test import ( "testing" jujutesting "github.com/juju/testing" ) func TestPackage(t *testing.T) { jujutesting.MgoTestPackage(t, nil) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/checkers/0000775000175000017500000000000012672604475022556 5ustar marcomarcocharm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/checkers/declared.go0000664000175000017500000000620212672604475024650 0ustar marcomarcopackage checkers import ( "strings" "gopkg.in/errgo.v1" "gopkg.in/macaroon.v1" ) // DeclaredCaveat returns a "declared" caveat asserting that the given key is // set to the given value. If a macaroon has exactly one first party // caveat asserting the value of a particular key, then InferDeclared // will be able to infer the value, and then DeclaredChecker will allow // the declared value if it has the value specified here. // // If the key is empty or contains a space, DeclaredCaveat // will return an error caveat. func DeclaredCaveat(key string, value string) Caveat { if strings.Contains(key, " ") || key == "" { return ErrorCaveatf("invalid caveat 'declared' key %q", key) } return firstParty(CondDeclared, key+" "+value) } // NeedDeclaredCaveat returns a third party caveat that // wraps the provided third party caveat and requires // that the third party must add "declared" caveats for // all the named keys. func NeedDeclaredCaveat(cav Caveat, keys ...string) Caveat { if cav.Location == "" { return ErrorCaveatf("need-declared caveat is not third-party") } return Caveat{ Location: cav.Location, Condition: CondNeedDeclared + " " + strings.Join(keys, ",") + " " + cav.Condition, } } // Declared implements a checker that will // check that any "declared" caveats have a matching // key for their value in the map. type Declared map[string]string // Condition implements Checker.Condition. func (c Declared) Condition() string { return CondDeclared } // Check implements Checker.Check by checking that the given // argument holds a key in the map with a matching value. func (c Declared) Check(_, arg string) error { // Note that we don't need to check the condition argument // here because it has been specified explicitly in the // return from the Condition method. parts := strings.SplitN(arg, " ", 2) if len(parts) != 2 { return errgo.Newf("declared caveat has no value") } val, ok := c[parts[0]] if !ok { return errgo.Newf("got %s=null, expected %q", parts[0], parts[1]) } if val != parts[1] { return errgo.Newf("got %s=%q, expected %q", parts[0], val, parts[1]) } return nil } // InferDeclared retrieves any declared information from // the given macaroons and returns it as a key-value map. // // Information is declared with a first party caveat as created // by DeclaredCaveat. // // If there are two caveats that declare the same key with // different values, the information is omitted from the map. // When the caveats are later checked, this will cause the // check to fail. func InferDeclared(ms macaroon.Slice) Declared { var conflicts []string info := make(Declared) for _, m := range ms { for _, cav := range m.Caveats() { if cav.Location != "" { continue } name, rest, err := ParseCaveat(cav.Id) if err != nil { continue } if name != CondDeclared { continue } parts := strings.SplitN(rest, " ", 2) if len(parts) != 2 { continue } key, val := parts[0], parts[1] if oldVal, ok := info[key]; ok && oldVal != val { conflicts = append(conflicts, key) continue } info[key] = val } } for _, key := range conflicts { delete(info, key) } return info } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/checkers/export_test.go0000664000175000017500000000005112672604475025461 0ustar marcomarcopackage checkers var TimeNow = &timeNow charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/checkers/time.go0000664000175000017500000000317612672604475024052 0ustar marcomarcopackage checkers import ( "fmt" "strings" "time" "gopkg.in/errgo.v1" "gopkg.in/macaroon.v1" ) var timeNow = time.Now // TimeBefore is a checker that checks caveats // as created by TimeBeforeCaveat. var TimeBefore = CheckerFunc{ Condition_: CondTimeBefore, Check_: func(_, cav string) error { t, err := time.Parse(time.RFC3339Nano, cav) if err != nil { return errgo.Mask(err) } if !timeNow().Before(t) { return fmt.Errorf("macaroon has expired") } return nil }, } // TimeBeforeCaveat returns a caveat that specifies that // the time that it is checked should be before t. func TimeBeforeCaveat(t time.Time) Caveat { return firstParty(CondTimeBefore, t.UTC().Format(time.RFC3339Nano)) } // ExpiryTime returns the minimum time of any time-before caveats found // in the given slice and whether there were any such caveats found. func ExpiryTime(cavs []macaroon.Caveat) (time.Time, bool) { var t time.Time var expires bool for _, cav := range cavs { if !strings.HasPrefix(cav.Id, CondTimeBefore) { continue } et, err := time.Parse(CondTimeBefore+" "+time.RFC3339Nano, cav.Id) if err != nil { continue } if !expires || et.Before(t) { t = et expires = true } } return t, expires } // MacaroonsExpiryTime returns the minimum time of any time-before // caveats found in the given macaroons and whether there were // any such caveats found. func MacaroonsExpiryTime(ms macaroon.Slice) (time.Time, bool) { var t time.Time var expires bool for _, m := range ms { if et, ex := ExpiryTime(m.Caveats()); ex { if !expires || et.Before(t) { t = et expires = true } } } return t, expires } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/checkers/checkers_test.go0000664000175000017500000003341612672604475025742 0ustar marcomarcopackage checkers_test import ( "fmt" "net" "time" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/errgo.v1" "gopkg.in/macaroon.v1" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/checkers" ) type CheckersSuite struct{} var _ = gc.Suite(&CheckersSuite{}) // Freeze time for the tests. var now = func() time.Time { now, err := time.Parse(time.RFC3339Nano, "2006-01-02T15:04:05.123Z") if err != nil { panic(err) } *checkers.TimeNow = func() time.Time { return now } return now }() type checkTest struct { caveat string expectError string expectCause func(err error) bool } var isCaveatNotRecognized = errgo.Is(checkers.ErrCaveatNotRecognized) var checkerTests = []struct { about string checker bakery.FirstPartyChecker checks []checkTest }{{ about: "empty MultiChecker", checker: checkers.New(), checks: []checkTest{{ caveat: "something", expectError: `caveat "something" not satisfied: caveat not recognized`, expectCause: isCaveatNotRecognized, }, { caveat: "", expectError: `cannot parse caveat "": empty caveat`, expectCause: isCaveatNotRecognized, }, { caveat: " hello", expectError: `cannot parse caveat " hello": caveat starts with space character`, expectCause: isCaveatNotRecognized, }}, }, { about: "MultiChecker with some values", checker: checkers.New( argChecker("a", "aval"), argChecker("b", "bval"), ), checks: []checkTest{{ caveat: "a aval", }, { caveat: "b bval", }, { caveat: "a wrong", expectError: `caveat "a wrong" not satisfied: wrong arg`, expectCause: errgo.Is(errWrongArg), }}, }, { about: "MultiChecker with several of the same condition", checker: checkers.New( argChecker("a", "aval"), argChecker("a", "bval"), ), checks: []checkTest{{ caveat: "a aval", expectError: `caveat "a aval" not satisfied: wrong arg`, expectCause: errgo.Is(errWrongArg), }, { caveat: "a bval", expectError: `caveat "a bval" not satisfied: wrong arg`, expectCause: errgo.Is(errWrongArg), }}, }, { about: "nested MultiChecker", checker: checkers.New( argChecker("a", "aval"), argChecker("b", "bval"), checkers.New( argChecker("c", "cval"), checkers.New( argChecker("d", "dval"), ), argChecker("e", "eval"), ), ), checks: []checkTest{{ caveat: "a aval", }, { caveat: "b bval", }, { caveat: "c cval", }, { caveat: "d dval", }, { caveat: "e eval", }, { caveat: "a wrong", expectError: `caveat "a wrong" not satisfied: wrong arg`, expectCause: errgo.Is(errWrongArg), }, { caveat: "c wrong", expectError: `caveat "c wrong" not satisfied: wrong arg`, expectCause: errgo.Is(errWrongArg), }, { caveat: "d wrong", expectError: `caveat "d wrong" not satisfied: wrong arg`, expectCause: errgo.Is(errWrongArg), }, { caveat: "f something", expectError: `caveat "f something" not satisfied: caveat not recognized`, expectCause: isCaveatNotRecognized, }}, }, { about: "Map with no items", checker: checkers.New( checkers.Map{}, ), checks: []checkTest{{ caveat: "a aval", expectError: `caveat "a aval" not satisfied: caveat not recognized`, expectCause: isCaveatNotRecognized, }}, }, { about: "Map with some values", checker: checkers.New( checkers.Map{ "a": argChecker("a", "aval").Check, "b": argChecker("b", "bval").Check, }, ), checks: []checkTest{{ caveat: "a aval", }, { caveat: "b bval", }, { caveat: "a wrong", expectError: `caveat "a wrong" not satisfied: wrong arg`, expectCause: errgo.Is(errWrongArg), }, { caveat: "b wrong", expectError: `caveat "b wrong" not satisfied: wrong arg`, expectCause: errgo.Is(errWrongArg), }}, }, { about: "time within limit", checker: checkers.New( checkers.TimeBefore, ), checks: []checkTest{{ caveat: checkers.TimeBeforeCaveat(now.Add(1)).Condition, }, { caveat: checkers.TimeBeforeCaveat(now).Condition, expectError: `caveat "time-before 2006-01-02T15:04:05.123Z" not satisfied: macaroon has expired`, }, { caveat: checkers.TimeBeforeCaveat(now.Add(-1)).Condition, expectError: `caveat "time-before 2006-01-02T15:04:05.122999999Z" not satisfied: macaroon has expired`, }, { caveat: `time-before bad-date`, expectError: `caveat "time-before bad-date" not satisfied: parsing time "bad-date" as "2006-01-02T15:04:05.999999999Z07:00": cannot parse "bad-date" as "2006"`, }, { caveat: checkers.TimeBeforeCaveat(now).Condition + " ", expectError: `caveat "time-before 2006-01-02T15:04:05.123Z " not satisfied: parsing time "2006-01-02T15:04:05.123Z ": extra text: `, }}, }, { about: "declared, no entries", checker: checkers.New(checkers.Declared{}), checks: []checkTest{{ caveat: checkers.DeclaredCaveat("a", "aval").Condition, expectError: `caveat "declared a aval" not satisfied: got a=null, expected "aval"`, }, { caveat: checkers.CondDeclared, expectError: `caveat "declared" not satisfied: declared caveat has no value`, }}, }, { about: "declared, some entries", checker: checkers.New(checkers.Declared{ "a": "aval", "b": "bval", "spc": " a b", }), checks: []checkTest{{ caveat: checkers.DeclaredCaveat("a", "aval").Condition, }, { caveat: checkers.DeclaredCaveat("b", "bval").Condition, }, { caveat: checkers.DeclaredCaveat("spc", " a b").Condition, }, { caveat: checkers.DeclaredCaveat("a", "bval").Condition, expectError: `caveat "declared a bval" not satisfied: got a="aval", expected "bval"`, }, { caveat: checkers.DeclaredCaveat("a", " aval").Condition, expectError: `caveat "declared a aval" not satisfied: got a="aval", expected " aval"`, }, { caveat: checkers.DeclaredCaveat("spc", "a b").Condition, expectError: `caveat "declared spc a b" not satisfied: got spc=" a b", expected "a b"`, }, { caveat: checkers.DeclaredCaveat("", "a b").Condition, expectError: `caveat "error invalid caveat 'declared' key \\"\\"" not satisfied: bad caveat`, }, { caveat: checkers.DeclaredCaveat("a b", "a b").Condition, expectError: `caveat "error invalid caveat 'declared' key \\"a b\\"" not satisfied: bad caveat`, }}, }, { about: "error caveat", checker: checkers.New(), checks: []checkTest{{ caveat: checkers.ErrorCaveatf("").Condition, expectError: `caveat "error " not satisfied: bad caveat`, }, { caveat: checkers.ErrorCaveatf("something %d", 134).Condition, expectError: `caveat "error something 134" not satisfied: bad caveat`, }}, }, { about: "error caveat overrides other", checker: checkers.New(argChecker("error", "something")), checks: []checkTest{{ caveat: checkers.ErrorCaveatf("something").Condition, expectError: `caveat "error something" not satisfied: bad caveat`, }}, }} var errWrongArg = errgo.New("wrong arg") func argChecker(expectCond, checkArg string) checkers.Checker { return checkers.CheckerFunc{ Condition_: expectCond, Check_: func(cond, arg string) error { if cond != expectCond { panic(fmt.Errorf("got condition %q want %q", cond, expectCond)) } if arg != checkArg { return errWrongArg } return nil }, } } func (s *CheckersSuite) TestCheckers(c *gc.C) { for i, test := range checkerTests { c.Logf("test %d: %s", i, test.about) for j, check := range test.checks { c.Logf("\tcheck %d", j) err := test.checker.CheckFirstPartyCaveat(check.caveat) if check.expectError != "" { c.Assert(err, gc.ErrorMatches, check.expectError) if check.expectCause == nil { check.expectCause = errgo.Any } c.Assert(check.expectCause(errgo.Cause(err)), gc.Equals, true) } else { c.Assert(err, gc.IsNil) } } } } func (s *CheckersSuite) TestClientIPAddrCaveat(c *gc.C) { cav := checkers.ClientIPAddrCaveat(net.IP{127, 0, 0, 1}) c.Assert(cav, gc.Equals, checkers.Caveat{ Condition: "client-ip-addr 127.0.0.1", }) cav = checkers.ClientIPAddrCaveat(net.ParseIP("2001:4860:0:2001::68")) c.Assert(cav, gc.Equals, checkers.Caveat{ Condition: "client-ip-addr 2001:4860:0:2001::68", }) cav = checkers.ClientIPAddrCaveat(nil) c.Assert(cav, gc.Equals, checkers.Caveat{ Condition: "error bad IP address []", }) cav = checkers.ClientIPAddrCaveat(net.IP{123, 3}) c.Assert(cav, gc.Equals, checkers.Caveat{ Condition: "error bad IP address [123 3]", }) } func (s *CheckersSuite) TestClientOriginCaveat(c *gc.C) { cav := checkers.ClientOriginCaveat("") c.Assert(cav, gc.Equals, checkers.Caveat{ Condition: "origin ", }) cav = checkers.ClientOriginCaveat("somewhere") c.Assert(cav, gc.Equals, checkers.Caveat{ Condition: "origin somewhere", }) } var inferDeclaredTests = []struct { about string caveats [][]checkers.Caveat expect checkers.Declared }{{ about: "no macaroons", expect: checkers.Declared{}, }, { about: "single macaroon with one declaration", caveats: [][]checkers.Caveat{{{ Condition: "declared foo bar", }}}, expect: checkers.Declared{ "foo": "bar", }, }, { about: "only one argument to declared", caveats: [][]checkers.Caveat{{{ Condition: "declared foo", }}}, expect: checkers.Declared{}, }, { about: "spaces in value", caveats: [][]checkers.Caveat{{{ Condition: "declared foo bar bloggs", }}}, expect: checkers.Declared{ "foo": "bar bloggs", }, }, { about: "attribute with declared prefix", caveats: [][]checkers.Caveat{{{ Condition: "declaredccf foo", }}}, expect: checkers.Declared{}, }, { about: "several macaroons with different declares", caveats: [][]checkers.Caveat{{ checkers.DeclaredCaveat("a", "aval"), checkers.DeclaredCaveat("b", "bval"), }, { checkers.DeclaredCaveat("c", "cval"), checkers.DeclaredCaveat("d", "dval"), }}, expect: checkers.Declared{ "a": "aval", "b": "bval", "c": "cval", "d": "dval", }, }, { about: "duplicate values", caveats: [][]checkers.Caveat{{ checkers.DeclaredCaveat("a", "aval"), checkers.DeclaredCaveat("a", "aval"), checkers.DeclaredCaveat("b", "bval"), }, { checkers.DeclaredCaveat("a", "aval"), checkers.DeclaredCaveat("b", "bval"), checkers.DeclaredCaveat("c", "cval"), checkers.DeclaredCaveat("d", "dval"), }}, expect: checkers.Declared{ "a": "aval", "b": "bval", "c": "cval", "d": "dval", }, }, { about: "conflicting values", caveats: [][]checkers.Caveat{{ checkers.DeclaredCaveat("a", "aval"), checkers.DeclaredCaveat("a", "conflict"), checkers.DeclaredCaveat("b", "bval"), }, { checkers.DeclaredCaveat("a", "conflict"), checkers.DeclaredCaveat("b", "another conflict"), checkers.DeclaredCaveat("c", "cval"), checkers.DeclaredCaveat("d", "dval"), }}, expect: checkers.Declared{ "c": "cval", "d": "dval", }, }, { about: "third party caveats ignored", caveats: [][]checkers.Caveat{{{ Condition: "declared a no conflict", Location: "location", }, checkers.DeclaredCaveat("a", "aval"), }}, expect: checkers.Declared{ "a": "aval", }, }, { about: "unparseable caveats ignored", caveats: [][]checkers.Caveat{{{ Condition: " bad", }, checkers.DeclaredCaveat("a", "aval"), }}, expect: checkers.Declared{ "a": "aval", }, }} func (*CheckersSuite) TestInferDeclared(c *gc.C) { for i, test := range inferDeclaredTests { c.Logf("test %d: %s", i, test.about) ms := make(macaroon.Slice, len(test.caveats)) for i, caveats := range test.caveats { m, err := macaroon.New(nil, fmt.Sprint(i), "") c.Assert(err, gc.IsNil) for _, cav := range caveats { if cav.Location == "" { m.AddFirstPartyCaveat(cav.Condition) } else { m.AddThirdPartyCaveat(nil, cav.Condition, cav.Location) } } ms[i] = m } c.Assert(checkers.InferDeclared(ms), jc.DeepEquals, test.expect) } } var operationCheckerTests = []struct { about string caveat checkers.Caveat oc checkers.OperationChecker expectError string }{{ about: "allowed operation", caveat: checkers.AllowCaveat("op1", "op2", "op3"), oc: checkers.OperationChecker("op1"), }, { about: "not denied oc", caveat: checkers.DenyCaveat("op1", "op2", "op3"), oc: checkers.OperationChecker("op4"), }, { about: "not allowed oc", caveat: checkers.AllowCaveat("op1", "op2", "op3"), oc: checkers.OperationChecker("op4"), expectError: "op4 not allowed", }, { about: "denied oc", caveat: checkers.DenyCaveat("op1", "op2", "op3"), oc: checkers.OperationChecker("op1"), expectError: "op1 not allowed", }, { about: "unrecognised caveat", caveat: checkers.ErrorCaveatf("unrecognized"), oc: checkers.OperationChecker("op1"), expectError: "caveat not recognized", }, { about: "empty deny caveat", caveat: checkers.DenyCaveat(), oc: checkers.OperationChecker("op1"), }} func (*CheckersSuite) TestOperationChecker(c *gc.C) { for i, test := range operationCheckerTests { c.Logf("%d: %s", i, test.about) cond, arg, err := checkers.ParseCaveat(test.caveat.Condition) c.Assert(err, gc.IsNil) c.Assert(test.oc.Condition(), gc.Equals, "") err = test.oc.Check(cond, arg) if test.expectError == "" { c.Assert(err, gc.IsNil) continue } c.Assert(err, gc.ErrorMatches, test.expectError) } } var operationErrorCaveatTests = []struct { about string caveat checkers.Caveat expectCondition string }{{ about: "empty allow", caveat: checkers.AllowCaveat(), expectCondition: "error no operations allowed", }, { about: "allow: invalid operation name", caveat: checkers.AllowCaveat("op1", "operation number 2"), expectCondition: `error invalid operation name "operation number 2"`, }, { about: "deny: invalid operation name", caveat: checkers.DenyCaveat("op1", "operation number 2"), expectCondition: `error invalid operation name "operation number 2"`, }} func (*CheckersSuite) TestOperationErrorCaveatTest(c *gc.C) { for i, test := range operationErrorCaveatTests { c.Logf("%d: %s", i, test.about) c.Assert(test.caveat.Condition, gc.Matches, test.expectCondition) } } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/checkers/package_test.go0000664000175000017500000000017212672604475025537 0ustar marcomarcopackage checkers_test import ( "testing" gc "gopkg.in/check.v1" ) func TestPackage(t *testing.T) { gc.TestingT(t) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/checkers/time_test.go0000664000175000017500000000743512672604475025113 0ustar marcomarcopackage checkers_test import ( "time" gc "gopkg.in/check.v1" "gopkg.in/macaroon.v1" "gopkg.in/macaroon-bakery.v1/bakery/checkers" ) type timeSuite struct{} var _ = gc.Suite(&timeSuite{}) var t1 = time.Now() var t2 = t1.Add(1 * time.Hour) var t3 = t2.Add(1 * time.Hour) var expireTimeTests = []struct { about string caveats []macaroon.Caveat expectTime time.Time expectExpires bool }{{ about: "nil caveats", }, { about: "empty caveats", caveats: []macaroon.Caveat{}, }, { about: "single time-before caveat", caveats: []macaroon.Caveat{ macaroon.Caveat{ Id: checkers.TimeBeforeCaveat(t1).Condition, }, }, expectTime: t1, expectExpires: true, }, { about: "single deny caveat", caveats: []macaroon.Caveat{ macaroon.Caveat{ Id: checkers.DenyCaveat("abc").Condition, }, }, }, { about: "multiple time-before caveat", caveats: []macaroon.Caveat{ macaroon.Caveat{ Id: checkers.TimeBeforeCaveat(t2).Condition, }, macaroon.Caveat{ Id: checkers.TimeBeforeCaveat(t1).Condition, }, }, expectTime: t1, expectExpires: true, }, { about: "mixed caveats", caveats: []macaroon.Caveat{ macaroon.Caveat{ Id: checkers.TimeBeforeCaveat(t1).Condition, }, macaroon.Caveat{ Id: checkers.AllowCaveat("abc").Condition, }, macaroon.Caveat{ Id: checkers.TimeBeforeCaveat(t2).Condition, }, macaroon.Caveat{ Id: checkers.DenyCaveat("def").Condition, }, }, expectTime: t1, expectExpires: true, }, { about: "invalid time-before caveat", caveats: []macaroon.Caveat{ macaroon.Caveat{ Id: checkers.CondTimeBefore + " tomorrow", }, }, }} func (s *timeSuite) TestExpireTime(c *gc.C) { for i, test := range expireTimeTests { c.Logf("%d. %s", i, test.about) t, expires := checkers.ExpiryTime(test.caveats) c.Assert(t.Equal(test.expectTime), gc.Equals, true, gc.Commentf("obtained: %s, expected: %s", t, test.expectTime)) c.Assert(expires, gc.Equals, test.expectExpires) } } var macaroonsExpireTimeTests = []struct { about string macaroons macaroon.Slice expectTime time.Time expectExpires bool }{{ about: "nil macaroons", }, { about: "empty macaroons", macaroons: macaroon.Slice{}, }, { about: "single macaroon without caveats", macaroons: macaroon.Slice{ mustNewMacaroon(), }, }, { about: "multiple macaroon without caveats", macaroons: macaroon.Slice{ mustNewMacaroon(), mustNewMacaroon(), }, }, { about: "single macaroon with time-before caveat", macaroons: macaroon.Slice{ mustNewMacaroon( checkers.TimeBeforeCaveat(t1).Condition, ), }, expectTime: t1, expectExpires: true, }, { about: "single macaroon with multiple time-before caveats", macaroons: macaroon.Slice{ mustNewMacaroon( checkers.TimeBeforeCaveat(t2).Condition, checkers.TimeBeforeCaveat(t1).Condition, ), }, expectTime: t1, expectExpires: true, }, { about: "multiple macaroons with multiple time-before caveats", macaroons: macaroon.Slice{ mustNewMacaroon( checkers.TimeBeforeCaveat(t3).Condition, checkers.TimeBeforeCaveat(t2).Condition, ), mustNewMacaroon( checkers.TimeBeforeCaveat(t3).Condition, checkers.TimeBeforeCaveat(t1).Condition, ), }, expectTime: t1, expectExpires: true, }} func (s *timeSuite) TestMacaroonsExpireTime(c *gc.C) { for i, test := range macaroonsExpireTimeTests { c.Logf("%d. %s", i, test.about) t, expires := checkers.MacaroonsExpiryTime(test.macaroons) c.Assert(t.Equal(test.expectTime), gc.Equals, true, gc.Commentf("obtained: %s, expected: %s", t, test.expectTime)) c.Assert(expires, gc.Equals, test.expectExpires) } } func mustNewMacaroon(cavs ...string) *macaroon.Macaroon { m, err := macaroon.New(nil, "", "") if err != nil { panic(err) } for _, cav := range cavs { if err := m.AddFirstPartyCaveat(cav); err != nil { panic(err) } } return m } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/checkers/checkers.go0000664000175000017500000002171112672604475024676 0ustar marcomarco// The checkers package provides some standard first-party // caveat checkers and some primitives for combining them. package checkers import ( "fmt" "net" "strings" "gopkg.in/errgo.v1" ) // Constants for all the standard caveat conditions. // First and third party caveat conditions are both defined here, // even though notionally they exist in separate name spaces. const ( CondDeclared = "declared" CondTimeBefore = "time-before" CondClientIPAddr = "client-ip-addr" CondClientOrigin = "origin" CondError = "error" CondNeedDeclared = "need-declared" CondAllow = "allow" CondDeny = "deny" ) // ErrCaveatNotRecognized is the cause of errors returned // from caveat checkers when the caveat was not // recognized. var ErrCaveatNotRecognized = errgo.New("caveat not recognized") // Caveat represents a condition that must be true for a check to // complete successfully. If Location is non-empty, the caveat must be // discharged by a third party at the given location. // This differs from macaroon.Caveat in that the condition // is not encrypted. type Caveat struct { Location string Condition string } // Checker is implemented by types that can check caveats. type Checker interface { // Condition returns the identifier of the condition // to be checked - the Check method will be used // to check caveats with this identifier. // // It may return an empty string, in which case // it will be used to check any condition Condition() string // Check checks that the given caveat holds true. // The condition and arg are as returned // from ParseCaveat. // // For a checker with an empty condition, a // return of bakery.ErrCaveatNotRecognised from // this method indicates that the condition was // not recognized. Check(cond, arg string) error } // New returns a new MultiChecker that uses all the // provided Checkers to check caveats. If several checkers return the // same condition identifier, all of them will be used. // // The cause of any error returned by a checker will be preserved. // // Note that because the returned checker implements Checker // as well as bakery.FirstPartyChecker, calls to New can be nested. // For example, a checker can be easily added to an existing // MultiChecker, by doing: // // checker := checkers.New(old, another) func New(checkers ...Checker) *MultiChecker { return &MultiChecker{ checkers: checkers, } } // MultiChecker implements bakery.FirstPartyChecker // and Checker for a collection of checkers. type MultiChecker struct { // TODO it may be faster to initialize a map, but we'd // be paying the price of creating and initializing // the map vs a few linear scans through a probably-small // slice. Let's wait for some real-world numbers. checkers []Checker } var errBadCaveat = errgo.Newf("bad caveat") // Check implements Checker.Check. func (c *MultiChecker) Check(cond, arg string) error { // Always check for the error caveat so that we're // sure to get a nice error message even when there // are no other checkers. This also prevents someone // from inadvertently overriding the error condition. if cond == CondError { return errBadCaveat } checked := false for _, c := range c.checkers { checkerCond := c.Condition() if checkerCond != "" && checkerCond != cond { continue } if err := c.Check(cond, arg); err != nil { if checkerCond == "" && errgo.Cause(err) == ErrCaveatNotRecognized { continue } return errgo.Mask(err, errgo.Any) } checked = true } if !checked { return ErrCaveatNotRecognized } return nil } // Condition implements Checker.Condition. func (c *MultiChecker) Condition() string { return "" } // CheckFirstPartyCaveat implements bakery.FirstPartyChecker.CheckFirstPartyCaveat. func (c *MultiChecker) CheckFirstPartyCaveat(cav string) error { cond, arg, err := ParseCaveat(cav) if err != nil { // If we can't parse it, perhaps it's in some other format, // return a not-recognised error. return errgo.WithCausef(err, ErrCaveatNotRecognized, "cannot parse caveat %q", cav) } if err := c.Check(cond, arg); err != nil { return errgo.NoteMask(err, fmt.Sprintf("caveat %q not satisfied", cav), errgo.Any) } return nil } // TODO add multiChecker.CheckThirdPartyCaveat ? // i.e. make this stuff reusable for 3rd party caveats too. func firstParty(cond, arg string) Caveat { return Caveat{ Condition: cond + " " + arg, } } // CheckerFunc implements Checker for a function. type CheckerFunc struct { // Condition_ holds the condition that the checker // implements. Condition_ string // Check_ holds the function to call to make the check. Check_ func(cond, arg string) error } // Condition implements Checker.Condition. func (f CheckerFunc) Condition() string { return f.Condition_ } // Check implements Checker.Check func (f CheckerFunc) Check(cond, arg string) error { return f.Check_(cond, arg) } // Map is a checker where the various checkers // are specified as entries in a map, one for each // condition. // The cond argument passed to the function // is always the same as its corresponding key // in the map. type Map map[string]func(cond string, arg string) error // Condition implements Checker.Condition. func (m Map) Condition() string { return "" } // Check implements Checker.Check func (m Map) Check(cond, arg string) error { f, ok := m[cond] if !ok { return ErrCaveatNotRecognized } if err := f(cond, arg); err != nil { return errgo.Mask(err, errgo.Any) } return nil } // ParseCaveat parses a caveat into an identifier, identifying the // checker that should be used, and the argument to the checker (the // rest of the string). // // The identifier is taken from all the characters before the first // space character. func ParseCaveat(cav string) (cond, arg string, err error) { if cav == "" { return "", "", fmt.Errorf("empty caveat") } i := strings.IndexByte(cav, ' ') if i < 0 { return cav, "", nil } if i == 0 { return "", "", fmt.Errorf("caveat starts with space character") } return cav[0:i], cav[i+1:], nil } // ClientIPAddrCaveat returns a caveat that will check whether the // client's IP address is as provided. // Note that the checkers package provides no specific // implementation of the checker for this - that is // left to external transport-specific packages. func ClientIPAddrCaveat(addr net.IP) Caveat { if len(addr) != net.IPv4len && len(addr) != net.IPv6len { return ErrorCaveatf("bad IP address %d", []byte(addr)) } return firstParty(CondClientIPAddr, addr.String()) } // ClientOriginCaveat returns a caveat that will check whether the // client's Origin header in its HTTP request is as provided. func ClientOriginCaveat(origin string) Caveat { return firstParty(CondClientOrigin, origin) } // ErrorCaveatf returns a caveat that will never be satisfied, holding // the given fmt.Sprintf formatted text as the text of the caveat. // // This should only be used for highly unusual conditions that are never // expected to happen in practice, such as a malformed key that is // conventionally passed as a constant. It's not a panic but you should // only use it in cases where a panic might possibly be appropriate. // // This mechanism means that caveats can be created without error // checking and a later systematic check at a higher level (in the // bakery package) can produce an error instead. func ErrorCaveatf(f string, a ...interface{}) Caveat { return firstParty(CondError, fmt.Sprintf(f, a...)) } // AllowCaveat returns a caveat that will deny attempts to use the // macaroon to perform any operation other than those listed. Operations // must not contain a space. func AllowCaveat(op ...string) Caveat { if len(op) == 0 { return ErrorCaveatf("no operations allowed") } return operationCaveat(CondAllow, op) } // DenyCaveat returns a caveat that will deny attempts to use the // macaroon to perform any of the listed operations. Operations // must not contain a space. func DenyCaveat(op ...string) Caveat { return operationCaveat(CondDeny, op) } // operationCaveat is a helper for AllowCaveat and DenyCaveat. It checks // that all operation names are valid before createing the caveat. func operationCaveat(cond string, op []string) Caveat { for _, o := range op { if strings.IndexByte(o, ' ') != -1 { return ErrorCaveatf("invalid operation name %q", o) } } return firstParty(cond, strings.Join(op, " ")) } // OperationChecker checks any allow or deny caveats ensuring they do not // prohibit the named operation. type OperationChecker string // Condition implements Checker.Condition. func (OperationChecker) Condition() string { return "" } // Check implements Checker.Check. func (o OperationChecker) Check(cond, arg string) error { var expect bool switch cond { case CondAllow: expect = true fallthrough case CondDeny: var found bool for _, op := range strings.Fields(arg) { if string(o) == op { found = true break } } if found == expect { return nil } return fmt.Errorf("%s not allowed", o) default: return ErrCaveatNotRecognized } } var _ Checker = OperationChecker("") charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/bakery/service.go0000664000175000017500000003364112677511232022756 0ustar marcomarco// The bakery package layers on top of the macaroon package, providing // a transport and storage-agnostic way of using macaroons to assert // client capabilities. // package bakery import ( "crypto/rand" "fmt" "strings" "github.com/juju/loggo" "gopkg.in/errgo.v1" "gopkg.in/macaroon.v1" "gopkg.in/macaroon-bakery.v1/bakery/checkers" ) var logger = loggo.GetLogger("bakery") // Service represents a service which can use macaroons // to check authorization. type Service struct { location string store storage checker FirstPartyChecker encoder *boxEncoder key *KeyPair locator PublicKeyLocator } // NewServiceParams holds the parameters for a NewService call. type NewServiceParams struct { // Location will be set as the location of any macaroons // minted by the service. Location string // Store will be used to store macaroon // information locally. If it is nil, // an in-memory storage will be used. Store Storage // Key is the public key pair used by the service for // third-party caveat encryption. // It may be nil, in which case a new key pair // will be generated. Key *KeyPair // Locator provides public keys for third-party services by location when // adding a third-party caveat. // It may be nil, in which case, no third-party caveats can be created. Locator PublicKeyLocator } // NewService returns a new service that can mint new // macaroons and store their associated root keys. func NewService(p NewServiceParams) (*Service, error) { if p.Store == nil { p.Store = NewMemStorage() } svc := &Service{ location: p.Location, store: storage{p.Store}, locator: p.Locator, } var err error if p.Key == nil { p.Key, err = GenerateKey() if err != nil { return nil, err } } if svc.locator == nil { svc.locator = PublicKeyLocatorMap(nil) } svc.key = p.Key svc.encoder = newBoxEncoder(p.Key) return svc, nil } // Store returns the store used by the service. func (svc *Service) Store() Storage { return svc.store.store } // Location returns the service's configured macaroon location. func (svc *Service) Location() string { return svc.location } // PublicKey returns the service's public key. func (svc *Service) PublicKey() *PublicKey { return &svc.key.Public } // Check checks that the given macaroons verify // correctly using the provided checker to check // first party caveats. The primary macaroon is in ms[0]; the discharges // fill the rest of the slice. // // If there is a verification error, it returns a VerificationError that // describes the error (other errors might be returned in other // circumstances). func (svc *Service) Check(ms macaroon.Slice, checker FirstPartyChecker) error { if len(ms) == 0 { return &VerificationError{ Reason: fmt.Errorf("no macaroons in slice"), } } item, err := svc.store.Get(ms[0].Id()) if err != nil { if errgo.Cause(err) == ErrNotFound { // If the macaroon was not found, it is probably // because it's been removed after time-expiry, // so return a verification error. return &VerificationError{ Reason: errgo.New("macaroon not found in storage"), } } return errgo.Notef(err, "cannot get macaroon") } err = ms[0].Verify(item.RootKey, checker.CheckFirstPartyCaveat, ms[1:]) if err != nil { return &VerificationError{ Reason: err, } } return nil } // CheckAnyM is like CheckAny except that on success it also returns // the set of macaroons that was successfully checked. // The "M" suffix is for backward compatibility reasons - in a // later bakery version, the signature of CheckAny will be // changed to return the macaroon slice and CheckAnyM will be // removed. func (svc *Service) CheckAnyM(mss []macaroon.Slice, assert map[string]string, checker checkers.Checker) (map[string]string, macaroon.Slice, error) { if len(mss) == 0 { return nil, nil, &VerificationError{ Reason: errgo.Newf("no macaroons"), } } // TODO perhaps return a slice of attribute maps, one // for each successfully validated macaroon slice? var err error for _, ms := range mss { declared := checkers.InferDeclared(ms) for key, val := range assert { declared[key] = val } err = svc.Check(ms, checkers.New(declared, checker)) if err == nil { return declared, ms, nil } } // Return an arbitrary error from the macaroons provided. // TODO return all errors. return nil, nil, errgo.Mask(err, isVerificationError) } // CheckAny checks that the given slice of slices contains at least // one macaroon minted by the given service, using checker to check // any first party caveats. It returns an error with a // *bakery.VerificationError cause if the macaroon verification failed. // // The assert map holds any required attributes of "declared" attributes, // overriding any inferences made from the macaroons themselves. // It has a similar effect to adding a checkers.DeclaredCaveat // for each key and value, but the error message will be more // useful. // // It adds all the standard caveat checkers to the given checker. // // It returns any attributes declared in the successfully validated request. func (svc *Service) CheckAny(mss []macaroon.Slice, assert map[string]string, checker checkers.Checker) (map[string]string, error) { attrs, _, err := svc.CheckAnyM(mss, assert, checker) return attrs, err } func isVerificationError(err error) bool { _, ok := err.(*VerificationError) return ok } // NewMacaroon mints a new macaroon with the given id and caveats. // If the id is empty, a random id will be used. // If rootKey is nil, a random root key will be used. // The macaroon will be stored in the service's storage. // TODO swap the first two arguments so that they're // in the same order as macaroon.New. func (svc *Service) NewMacaroon(id string, rootKey []byte, caveats []checkers.Caveat) (*macaroon.Macaroon, error) { if rootKey == nil { newRootKey, err := randomBytes(24) if err != nil { return nil, fmt.Errorf("cannot generate root key for new macaroon: %v", err) } rootKey = newRootKey } if id == "" { idBytes, err := randomBytes(24) if err != nil { return nil, fmt.Errorf("cannot generate id for new macaroon: %v", err) } id = fmt.Sprintf("%x", idBytes) } m, err := macaroon.New(rootKey, id, svc.location) if err != nil { return nil, fmt.Errorf("cannot bake macaroon: %v", err) } for _, cav := range caveats { if err := svc.AddCaveat(m, cav); err != nil { return nil, errgo.Notef(err, "cannot add caveat") } } // TODO look at the caveats for expiry time and associate // that with the storage item so that the storage can // garbage collect it at an appropriate time. if err := svc.store.Put(m.Id(), &storageItem{ RootKey: rootKey, }); err != nil { return nil, fmt.Errorf("cannot save macaroon to store: %v", err) } return m, nil } // LocalThirdPartyCaveat returns a third-party caveat that, when added // to a macaroon with AddCaveat, results in a caveat // with the location "local", encrypted with the given public key. // This can be automatically discharged by DischargeAllWithKey. func LocalThirdPartyCaveat(key *PublicKey) checkers.Caveat { return checkers.Caveat{ Location: "local " + key.String(), } } // AddCaveat adds a caveat to the given macaroon. // // If it's a third-party caveat, it uses the service's caveat-id encoder // to create the id of the new caveat. // // As a special case, if the caveat's Location field has the prefix // "local " the caveat is added as a client self-discharge caveat // using the public key base64-encoded in the rest of the location. // In this case, the Condition field must be empty. The // resulting third-party caveat will encode the condition "true" // encrypted with that public key. See LocalThirdPartyCaveat // for a way of creating such caveats. func (svc *Service) AddCaveat(m *macaroon.Macaroon, cav checkers.Caveat) error { if cav.Location == "" { m.AddFirstPartyCaveat(cav.Condition) return nil } var thirdPartyPub *PublicKey if strings.HasPrefix(cav.Location, "local ") { var key PublicKey if err := key.UnmarshalText([]byte(cav.Location[len("local "):])); err != nil { return errgo.Notef(err, "cannot unmarshal client's public key in local third-party caveat") } thirdPartyPub = &key cav.Location = "local" if cav.Condition != "" { return errgo.New("cannot specify caveat condition in local third-party caveat") } cav.Condition = "true" } else { var err error thirdPartyPub, err = svc.locator.PublicKeyForLocation(cav.Location) if err != nil { return errgo.Notef(err, "cannot find public key for location %q", cav.Location) } } rootKey, err := randomBytes(24) if err != nil { return errgo.Notef(err, "cannot generate third party secret") } id, err := svc.encoder.encodeCaveatId(cav.Condition, rootKey, thirdPartyPub) if err != nil { return errgo.Notef(err, "cannot create third party caveat id at %q", cav.Location) } if err := m.AddThirdPartyCaveat(rootKey, id, cav.Location); err != nil { return errgo.Notef(err, "cannot add third party caveat") } return nil } // Discharge creates a macaroon that discharges the third party caveat with the // given id that should have been created earlier using key.Public. The // condition implicit in the id is checked for validity using checker. If // it is valid, a new macaroon is returned which discharges the caveat // along with any caveats returned from the checker. func Discharge(key *KeyPair, checker ThirdPartyChecker, id string) (*macaroon.Macaroon, []checkers.Caveat, error) { decoder := newBoxDecoder(key) logger.Infof("server attempting to discharge %q", id) rootKey, condition, err := decoder.decodeCaveatId(id) if err != nil { return nil, nil, errgo.Notef(err, "discharger cannot decode caveat id") } // Note that we don't check the error - we allow the // third party checker to see even caveats that we can't // understand. cond, arg, _ := checkers.ParseCaveat(condition) var caveats []checkers.Caveat if cond == checkers.CondNeedDeclared { caveats, err = checkNeedDeclared(id, arg, checker) } else { caveats, err = checker.CheckThirdPartyCaveat(id, condition) } if err != nil { return nil, nil, errgo.Mask(err, errgo.Any) } // Note that the discharge macaroon does not need to // be stored persistently. Indeed, it would be a problem if // we did, because then the macaroon could potentially be used // for normal authorization with the third party. m, err := macaroon.New(rootKey, id, "") if err != nil { return nil, nil, errgo.Mask(err) } return m, caveats, nil } // Discharge calls Discharge with the service's key and uses the service // to add any returned caveats to the discharge macaroon. func (svc *Service) Discharge(checker ThirdPartyChecker, id string) (*macaroon.Macaroon, error) { m, caveats, err := Discharge(svc.encoder.key, checker, id) if err != nil { return nil, errgo.Mask(err, errgo.Any) } for _, cav := range caveats { if err := svc.AddCaveat(m, cav); err != nil { return nil, errgo.Notef(err, "cannot add caveat") } } return m, nil } func checkNeedDeclared(caveatId, arg string, checker ThirdPartyChecker) ([]checkers.Caveat, error) { i := strings.Index(arg, " ") if i <= 0 { return nil, errgo.Newf("need-declared caveat requires an argument, got %q", arg) } needDeclared := strings.Split(arg[0:i], ",") for _, d := range needDeclared { if d == "" { return nil, errgo.New("need-declared caveat with empty required attribute") } } if len(needDeclared) == 0 { return nil, fmt.Errorf("need-declared caveat with no required attributes") } caveats, err := checker.CheckThirdPartyCaveat(caveatId, arg[i+1:]) if err != nil { return nil, errgo.Mask(err, errgo.Any) } declared := make(map[string]bool) for _, cav := range caveats { if cav.Location != "" { continue } // Note that we ignore the error. We allow the service to // generate caveats that we don't understand here. cond, arg, _ := checkers.ParseCaveat(cav.Condition) if cond != checkers.CondDeclared { continue } parts := strings.SplitN(arg, " ", 2) if len(parts) != 2 { return nil, errgo.Newf("declared caveat has no value") } declared[parts[0]] = true } // Add empty declarations for everything mentioned in need-declared // that was not actually declared. for _, d := range needDeclared { if !declared[d] { caveats = append(caveats, checkers.DeclaredCaveat(d, "")) } } return caveats, nil } func randomBytes(n int) ([]byte, error) { b := make([]byte, n) _, err := rand.Read(b) if err != nil { return nil, fmt.Errorf("cannot generate %d random bytes: %v", n, err) } return b, nil } type VerificationError struct { Reason error } func (e *VerificationError) Error() string { return fmt.Sprintf("verification failed: %v", e.Reason) } // TODO(rog) consider possible options for checkers: // - first and third party checkers could be merged, but // then there would have to be a runtime check // that when used to check first-party caveats, the // checker does not return third-party caveats. // ThirdPartyChecker holds a function that checks third party caveats // for validity. If the caveat is valid, it returns a nil error and // optionally a slice of extra caveats that will be added to the // discharge macaroon. The caveatId parameter holds the still-encoded id // of the caveat. // // If the caveat kind was not recognised, the checker should return an // error with a ErrCaveatNotRecognized cause. type ThirdPartyChecker interface { CheckThirdPartyCaveat(caveatId, caveat string) ([]checkers.Caveat, error) } type ThirdPartyCheckerFunc func(caveatId, caveat string) ([]checkers.Caveat, error) func (c ThirdPartyCheckerFunc) CheckThirdPartyCaveat(caveatId, caveat string) ([]checkers.Caveat, error) { return c(caveatId, caveat) } // FirstPartyChecker holds a function that checks first party caveats // for validity. // // If the caveat kind was not recognised, the checker should return // ErrCaveatNotRecognized. type FirstPartyChecker interface { CheckFirstPartyCaveat(caveat string) error } type FirstPartyCheckerFunc func(caveat string) error func (c FirstPartyCheckerFunc) CheckFirstPartyCaveat(caveat string) error { return c(caveat) } charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/README.md0000664000175000017500000000055012672604475020771 0ustar marcomarco# The macaroon bakery This repository is a companion to http://github.com/go-macaroon . It holds higher level operations for building systems with macaroons. For documentation, see: - http://godoc.org/gopkg.in/macaroon-bakery.v1/bakery - http://godoc.org/gopkg.in/macaroon-bakery.v1/httpbakery - http://godoc.org/gopkg.in/macaroon-bakery.v1/bakery/checkers charm-2.1.1/src/gopkg.in/macaroon-bakery.v1/cmd/0000775000175000017500000000000012672604475020255 5ustar marcomarcocharm-2.1.1/src/gopkg.in/macaroon-bakery.v1/cmd/bakery-keygen/0000775000175000017500000000000012672604475023012 5ustar marcomarcocharm-2.1.1/src/gopkg.in/macaroon-bakery.v1/cmd/bakery-keygen/main.go0000664000175000017500000000052012672604475024262 0ustar marcomarcopackage main import ( "encoding/json" "fmt" "os" "gopkg.in/macaroon-bakery.v1/bakery" ) func main() { kp, err := bakery.GenerateKey() if err != nil { fmt.Fprintf(os.Stderr, "cannot generate key: %s\n", err) os.Exit(1) } b, err := json.MarshalIndent(kp, "", "\t") if err != nil { panic(err) } fmt.Printf("%s\n", b) } charm-2.1.1/src/launchpad.net/0000775000175000017500000000000012701523110015076 5ustar marcomarcocharm-2.1.1/src/launchpad.net/tomb/0000775000175000017500000000000012672604554016062 5ustar marcomarcocharm-2.1.1/src/launchpad.net/tomb/Makefile0000664000175000017500000000062412672604554017524 0ustar marcomarcoinclude $(GOROOT)/src/Make.inc all: package TARG=launchpad.net/tomb GOFILES=\ tomb.go\ GOFMT=gofmt BADFMT:=$(shell $(GOFMT) -l $(GOFILES) $(CGOFILES) $(wildcard *_test.go) 2> /dev/null) gofmt: $(BADFMT) @for F in $(BADFMT); do $(GOFMT) -w $$F && echo $$F; done ifneq ($(BADFMT),) ifneq ($(MAKECMDGOALS),gofmt) $(warning WARNING: make gofmt: $(BADFMT)) endif endif include $(GOROOT)/src/Make.pkg charm-2.1.1/src/launchpad.net/tomb/tomb_test.go0000664000175000017500000000474112672604554020417 0ustar marcomarcopackage tomb_test import ( "errors" "launchpad.net/tomb" "reflect" "testing" ) func TestNewTomb(t *testing.T) { tb := &tomb.Tomb{} testState(t, tb, false, false, tomb.ErrStillAlive) tb.Done() testState(t, tb, true, true, nil) } func TestKill(t *testing.T) { // a nil reason flags the goroutine as dying tb := &tomb.Tomb{} tb.Kill(nil) testState(t, tb, true, false, nil) // a non-nil reason now will override Kill err := errors.New("some error") tb.Kill(err) testState(t, tb, true, false, err) // another non-nil reason won't replace the first one tb.Kill(errors.New("ignore me")) testState(t, tb, true, false, err) tb.Done() testState(t, tb, true, true, err) } func TestKillf(t *testing.T) { tb := &tomb.Tomb{} err := tb.Killf("BO%s", "OM") if s := err.Error(); s != "BOOM" { t.Fatalf(`Killf("BO%s", "OM"): want "BOOM", got %q`, s) } testState(t, tb, true, false, err) // another non-nil reason won't replace the first one tb.Killf("ignore me") testState(t, tb, true, false, err) tb.Done() testState(t, tb, true, true, err) } func TestErrDying(t *testing.T) { // ErrDying being used properly, after a clean death. tb := &tomb.Tomb{} tb.Kill(nil) tb.Kill(tomb.ErrDying) testState(t, tb, true, false, nil) // ErrDying being used properly, after an errorful death. err := errors.New("some error") tb.Kill(err) tb.Kill(tomb.ErrDying) testState(t, tb, true, false, err) // ErrDying being used badly, with an alive tomb. tb = &tomb.Tomb{} defer func() { err := recover() if err != "tomb: Kill with ErrDying while still alive" { t.Fatalf("Wrong panic on Kill(ErrDying): %v", err) } testState(t, tb, false, false, tomb.ErrStillAlive) }() tb.Kill(tomb.ErrDying) } func testState(t *testing.T, tb *tomb.Tomb, wantDying, wantDead bool, wantErr error) { select { case <-tb.Dying(): if !wantDying { t.Error("<-Dying: should block") } default: if wantDying { t.Error("<-Dying: should not block") } } seemsDead := false select { case <-tb.Dead(): if !wantDead { t.Error("<-Dead: should block") } seemsDead = true default: if wantDead { t.Error("<-Dead: should not block") } } if err := tb.Err(); err != wantErr { t.Errorf("Err: want %#v, got %#v", wantErr, err) } if wantDead && seemsDead { waitErr := tb.Wait() switch { case waitErr == tomb.ErrStillAlive: t.Errorf("Wait should not return ErrStillAlive") case !reflect.DeepEqual(waitErr, wantErr): t.Errorf("Wait: want %#v, got %#v", wantErr, waitErr) } } } charm-2.1.1/src/launchpad.net/tomb/LICENSE0000664000175000017500000000311012672604554017062 0ustar marcomarcotomb - support for clean goroutine termination in Go. Copyright (c) 2010-2011 - Gustavo Niemeyer All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. charm-2.1.1/src/launchpad.net/tomb/.lbox0000664000175000017500000000003112672604554017021 0ustar marcomarcopropose -cr -for=lp:tomb charm-2.1.1/src/launchpad.net/tomb/tomb.go0000664000175000017500000001307312672604554017356 0ustar marcomarco// Copyright (c) 2011 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // PROJECT MOVED: https://gopkg.in/tomb.v1 package tomb import ( "errors" "fmt" "sync" ) // A Tomb tracks the lifecycle of a goroutine as alive, dying or dead, // and the reason for its death. // // The zero value of a Tomb assumes that a goroutine is about to be // created or already alive. Once Kill or Killf is called with an // argument that informs the reason for death, the goroutine is in // a dying state and is expected to terminate soon. Right before the // goroutine function or method returns, Done must be called to inform // that the goroutine is indeed dead and about to stop running. // // A Tomb exposes Dying and Dead channels. These channels are closed // when the Tomb state changes in the respective way. They enable // explicit blocking until the state changes, and also to selectively // unblock select statements accordingly. // // When the tomb state changes to dying and there's still logic going // on within the goroutine, nested functions and methods may choose to // return ErrDying as their error value, as this error won't alter the // tomb state if provied to the Kill method. This is a convenient way to // follow standard Go practices in the context of a dying tomb. // // For background and a detailed example, see the following blog post: // // http://blog.labix.org/2011/10/09/death-of-goroutines-under-control // // For a more complex code snippet demonstrating the use of multiple // goroutines with a single Tomb, see: // // http://play.golang.org/p/Xh7qWsDPZP // type Tomb struct { m sync.Mutex dying chan struct{} dead chan struct{} reason error } var ( ErrStillAlive = errors.New("tomb: still alive") ErrDying = errors.New("tomb: dying") ) func (t *Tomb) init() { t.m.Lock() if t.dead == nil { t.dead = make(chan struct{}) t.dying = make(chan struct{}) t.reason = ErrStillAlive } t.m.Unlock() } // Dead returns the channel that can be used to wait // until t.Done has been called. func (t *Tomb) Dead() <-chan struct{} { t.init() return t.dead } // Dying returns the channel that can be used to wait // until t.Kill or t.Done has been called. func (t *Tomb) Dying() <-chan struct{} { t.init() return t.dying } // Wait blocks until the goroutine is in a dead state and returns the // reason for its death. func (t *Tomb) Wait() error { t.init() <-t.dead t.m.Lock() reason := t.reason t.m.Unlock() return reason } // Done flags the goroutine as dead, and should be called a single time // right before the goroutine function or method returns. // If the goroutine was not already in a dying state before Done is // called, it will be flagged as dying and dead at once with no // error. func (t *Tomb) Done() { t.Kill(nil) close(t.dead) } // Kill flags the goroutine as dying for the given reason. // Kill may be called multiple times, but only the first // non-nil error is recorded as the reason for termination. // // If reason is ErrDying, the previous reason isn't replaced // even if it is nil. It's a runtime error to call Kill with // ErrDying if t is not in a dying state. func (t *Tomb) Kill(reason error) { t.init() t.m.Lock() defer t.m.Unlock() if reason == ErrDying { if t.reason == ErrStillAlive { panic("tomb: Kill with ErrDying while still alive") } return } if t.reason == nil || t.reason == ErrStillAlive { t.reason = reason } // If the receive on t.dying succeeds, then // it can only be because we have already closed it. // If it blocks, then we know that it needs to be closed. select { case <-t.dying: default: close(t.dying) } } // Killf works like Kill, but builds the reason providing the received // arguments to fmt.Errorf. The generated error is also returned. func (t *Tomb) Killf(f string, a ...interface{}) error { err := fmt.Errorf(f, a...) t.Kill(err) return err } // Err returns the reason for the goroutine death provided via Kill // or Killf, or ErrStillAlive when the goroutine is still alive. func (t *Tomb) Err() (reason error) { t.init() t.m.Lock() reason = t.reason t.m.Unlock() return } charm-2.1.1/src/launchpad.net/gnuflag/0000775000175000017500000000000012672604575016547 5ustar marcomarcocharm-2.1.1/src/launchpad.net/gnuflag/export_test.go0000664000175000017500000000121112672604575021451 0ustar marcomarco// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gnuflag import ( "os" ) // Additional routines compiled into the package only during testing. // ResetForTesting clears all flag state and sets the usage function as directed. // After calling ResetForTesting, parse errors in flag handling will not // exit the program. func ResetForTesting(usage func()) { commandLine = NewFlagSet(os.Args[0], ContinueOnError) Usage = usage } // CommandLine returns the default FlagSet. func CommandLine() *FlagSet { return commandLine } charm-2.1.1/src/launchpad.net/gnuflag/LICENSE0000664000175000017500000000270712672604574017561 0ustar marcomarcoCopyright (c) 2012 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. charm-2.1.1/src/launchpad.net/gnuflag/flag_test.go0000664000175000017500000002714612672604575021060 0ustar marcomarco// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gnuflag_test import ( "bytes" "fmt" . "launchpad.net/gnuflag" "os" "reflect" "sort" "strings" "testing" "time" ) var ( test_bool = Bool("test_bool", false, "bool value") test_int = Int("test_int", 0, "int value") test_int64 = Int64("test_int64", 0, "int64 value") test_uint = Uint("test_uint", 0, "uint value") test_uint64 = Uint64("test_uint64", 0, "uint64 value") test_string = String("test_string", "0", "string value") test_float64 = Float64("test_float64", 0, "float64 value") test_duration = Duration("test_duration", 0, "time.Duration value") ) func boolString(s string) string { if s == "0" { return "false" } return "true" } func TestEverything(t *testing.T) { m := make(map[string]*Flag) desired := "0" visitor := func(f *Flag) { if len(f.Name) > 5 && f.Name[0:5] == "test_" { m[f.Name] = f ok := false switch { case f.Value.String() == desired: ok = true case f.Name == "test_bool" && f.Value.String() == boolString(desired): ok = true case f.Name == "test_duration" && f.Value.String() == desired+"s": ok = true } if !ok { t.Error("Visit: bad value", f.Value.String(), "for", f.Name) } } } VisitAll(visitor) if len(m) != 8 { t.Error("VisitAll misses some flags") for k, v := range m { t.Log(k, *v) } } m = make(map[string]*Flag) Visit(visitor) if len(m) != 0 { t.Errorf("Visit sees unset flags") for k, v := range m { t.Log(k, *v) } } // Now set all flags Set("test_bool", "true") Set("test_int", "1") Set("test_int64", "1") Set("test_uint", "1") Set("test_uint64", "1") Set("test_string", "1") Set("test_float64", "1") Set("test_duration", "1s") desired = "1" Visit(visitor) if len(m) != 8 { t.Error("Visit fails after set") for k, v := range m { t.Log(k, *v) } } // Now test they're visited in sort order. var flagNames []string Visit(func(f *Flag) { flagNames = append(flagNames, f.Name) }) if !sort.StringsAreSorted(flagNames) { t.Errorf("flag names not sorted: %v", flagNames) } } func TestUsage(t *testing.T) { called := false ResetForTesting(func() { called = true }) f := CommandLine() f.SetOutput(nullWriter{}) if f.Parse(true, []string{"-x"}) == nil { t.Error("parse did not fail for unknown flag") } if !called { t.Error("did not call Usage for unknown flag") } } var parseTests = []struct { about string intersperse bool args []string vals map[string]interface{} remaining []string error string }{{ about: "regular args", intersperse: true, args: []string{ "--bool2", "--int", "22", "--int64", "0x23", "--uint", "24", "--uint64", "25", "--string", "hello", "--float64", "2718e28", "--duration", "2m", "one - extra - argument", }, vals: map[string]interface{}{ "bool": false, "bool2": true, "int": 22, "int64": int64(0x23), "uint": uint(24), "uint64": uint64(25), "string": "hello", "float64": 2718e28, "duration": 2 * 60 * time.Second, }, remaining: []string{ "one - extra - argument", }, }, { about: "playing with -", intersperse: true, args: []string{ "-a", "-", "-bc", "2", "-de1s", "-f2s", "-g", "3s", "--h", "--long", "--long2", "-4s", "3", "4", "--", "-5", }, vals: map[string]interface{}{ "a": true, "b": true, "c": true, "d": true, "e": "1s", "f": "2s", "g": "3s", "h": true, "long": true, "long2": "-4s", "z": "default", "www": 99, }, remaining: []string{ "-", "2", "3", "4", "-5", }, }, { about: "flag after explicit --", intersperse: true, args: []string{ "-a", "--", "-b", }, vals: map[string]interface{}{ "a": true, "b": false, }, remaining: []string{ "-b", }, }, { about: "flag after end", args: []string{ "-a", "foo", "-b", }, vals: map[string]interface{}{ "a": true, "b": false, }, remaining: []string{ "foo", "-b", }, }, { about: "arg and flag after explicit end", args: []string{ "-a", "--", "foo", "-b", }, vals: map[string]interface{}{ "a": true, "b": false, }, remaining: []string{ "foo", "-b", }, }, { about: "boolean args, explicitly and non-explicitly given", args: []string{ "--a=false", "--b=true", "--c", }, vals: map[string]interface{}{ "a": false, "b": true, "c": true, }, }, { about: "using =", args: []string{ "--arble=bar", "--bletch=", "--a=something", "-b=other", "-cdand more", "--curdle=--milk", "--sandwich", "=", "--darn=", "=arg", }, vals: map[string]interface{}{ "arble": "bar", "bletch": "", "a": "something", "b": "=other", "c": true, "d": "and more", "curdle": "--milk", "sandwich": "=", "darn": "", }, remaining: []string{"=arg"}, }, { about: "empty flag #1", args: []string{ "--=bar", }, error: `empty flag in argument "--=bar"`, }, { about: "single-letter equals", args: []string{ "-=bar", }, error: `flag provided but not defined: -=`, }, { about: "empty flag #2", args: []string{ "--=", }, error: `empty flag in argument "--="`, }, { about: "no equals", args: []string{ "-=", }, error: `flag provided but not defined: -=`, }, { args: []string{ "-a=true", }, vals: map[string]interface{}{ "a": true, }, error: `invalid value "=true" for flag -a: strconv.ParseBool: parsing "=true": invalid syntax`, }, { intersperse: true, args: []string{ "-a", "-b", }, vals: map[string]interface{}{ "a": true, }, error: "flag provided but not defined: -b", }, { intersperse: true, args: []string{ "-a", }, vals: map[string]interface{}{ "a": "default", }, error: "flag needs an argument: -a", }, { intersperse: true, args: []string{ "-a", "b", }, vals: map[string]interface{}{ "a": 0, }, error: `invalid value "b" for flag -a: strconv.ParseInt: parsing "b": invalid syntax`, }, } func testParse(newFlagSet func() *FlagSet, t *testing.T) { for i, g := range parseTests { t.Logf("test %d. %s", i, g.about) f := newFlagSet() flags := make(map[string]interface{}) for name, val := range g.vals { switch val.(type) { case bool: flags[name] = f.Bool(name, false, "bool value "+name) case string: flags[name] = f.String(name, "default", "string value "+name) case int: flags[name] = f.Int(name, 99, "int value "+name) case uint: flags[name] = f.Uint(name, 0, "uint value") case uint64: flags[name] = f.Uint64(name, 0, "uint64 value") case int64: flags[name] = f.Int64(name, 0, "uint64 value") case float64: flags[name] = f.Float64(name, 0, "float64 value") case time.Duration: flags[name] = f.Duration(name, 5*time.Second, "duration value") default: t.Fatalf("unhandled type %T", val) } } err := f.Parse(g.intersperse, g.args) if g.error != "" { if err == nil { t.Errorf("expected error %q got nil", g.error) } else if err.Error() != g.error { t.Errorf("expected error %q got %q", g.error, err.Error()) } continue } for name, val := range g.vals { actual := reflect.ValueOf(flags[name]).Elem().Interface() if val != actual { t.Errorf("flag %q, expected %v got %v", name, val, actual) } } if len(f.Args()) != len(g.remaining) { t.Fatalf("remaining args, expected %q got %q", g.remaining, f.Args()) } for j, a := range f.Args() { if a != g.remaining[j] { t.Errorf("arg %d, expected %q got %q", j, g.remaining[i], a) } } } } func TestParse(t *testing.T) { testParse(func() *FlagSet { ResetForTesting(func() {}) f := CommandLine() f.SetOutput(nullWriter{}) return f }, t) } func TestFlagSetParse(t *testing.T) { testParse(func() *FlagSet { f := NewFlagSet("test", ContinueOnError) f.SetOutput(nullWriter{}) return f }, t) } // Declare a user-defined flag type. type flagVar []string func (f *flagVar) String() string { return fmt.Sprint([]string(*f)) } func (f *flagVar) Set(value string) error { *f = append(*f, value) return nil } func TestUserDefined(t *testing.T) { var flags FlagSet flags.Init("test", ContinueOnError) var v flagVar flags.Var(&v, "v", "usage") if err := flags.Parse(true, []string{"-v", "1", "-v", "2", "-v3"}); err != nil { t.Error(err) } if len(v) != 3 { t.Fatal("expected 3 args; got ", len(v)) } expect := "[1 2 3]" if v.String() != expect { t.Errorf("expected value %q got %q", expect, v.String()) } } func TestSetOutput(t *testing.T) { var flags FlagSet var buf bytes.Buffer flags.SetOutput(&buf) flags.Init("test", ContinueOnError) flags.Parse(true, []string{"-unknown"}) if out := buf.String(); !strings.Contains(out, "-unknown") { t.Logf("expected output mentioning unknown; got %q", out) } } // This tests that one can reset the flags. This still works but not well, and is // superseded by FlagSet. func TestChangingArgs(t *testing.T) { ResetForTesting(func() { t.Fatal("bad parse") }) oldArgs := os.Args defer func() { os.Args = oldArgs }() os.Args = []string{"cmd", "--before", "subcmd", "--after", "args"} before := Bool("before", false, "") if err := CommandLine().Parse(false, os.Args[1:]); err != nil { t.Fatal(err) } cmd := Arg(0) os.Args = Args() after := Bool("after", false, "") Parse(false) args := Args() if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" { t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args) } } // Test that -help invokes the usage message and returns ErrHelp. func TestHelp(t *testing.T) { var helpCalled = false fs := NewFlagSet("help test", ContinueOnError) fs.SetOutput(nullWriter{}) fs.Usage = func() { helpCalled = true } var flag bool fs.BoolVar(&flag, "flag", false, "regular flag") // Regular flag invocation should work err := fs.Parse(true, []string{"--flag"}) if err != nil { t.Fatal("expected no error; got ", err) } if !flag { t.Error("flag was not set by --flag") } if helpCalled { t.Error("help called for regular flag") helpCalled = false // reset for next test } // Help flag should work as expected. err = fs.Parse(true, []string{"--help"}) if err == nil { t.Fatal("error expected") } if err != ErrHelp { t.Fatal("expected ErrHelp; got ", err) } if !helpCalled { t.Fatal("help was not called") } // If we define a help flag, that should override. var help bool fs.BoolVar(&help, "help", false, "help flag") helpCalled = false err = fs.Parse(true, []string{"--help"}) if err != nil { t.Fatal("expected no error for defined --help; got ", err) } if helpCalled { t.Fatal("help was called; should not have been for defined help flag") } } type nullWriter struct{} func (nullWriter) Write(buf []byte) (int, error) { return len(buf), nil } func TestPrintDefaults(t *testing.T) { f := NewFlagSet("print test", ContinueOnError) f.SetOutput(nullWriter{}) var b bool var c int var d string var e float64 f.IntVar(&c, "trapclap", 99, "usage not shown") f.IntVar(&c, "c", 99, "c usage") f.BoolVar(&b, "bal", false, "usage not shown") f.BoolVar(&b, "x", false, "usage not shown") f.BoolVar(&b, "b", false, "b usage") f.BoolVar(&b, "balalaika", false, "usage not shown") f.StringVar(&d, "d", "d default", "d usage") f.Float64Var(&e, "elephant", 3.14, "elephant usage") var buf bytes.Buffer f.SetOutput(&buf) f.PrintDefaults() f.SetOutput(nullWriter{}) expect := `-b, -x, --bal, --balalaika (= false) b usage -c, --trapclap (= 99) c usage -d (= "d default") d usage --elephant (= 3.14) elephant usage ` if buf.String() != expect { t.Errorf("expect %q got %q", expect, buf.String()) } } charm-2.1.1/src/launchpad.net/gnuflag/flag.go0000664000175000017500000006661212672604575020022 0ustar marcomarco// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. /* Package flag implements command-line flag parsing in the GNU style. It is almost exactly the same as the standard flag package, the only difference being the extra argument to Parse. Command line flag syntax: -f // single letter flag -fg // two single letter flags together --flag // multiple letter flag --flag x // non-boolean flags only -f x // non-boolean flags only -fx // if f is a non-boolean flag, x is its argument. The last three forms are not permitted for boolean flags because the meaning of the command cmd -f * will change if there is a file called 0, false, etc. There is currently no way to turn off a boolean flag. Flag parsing stops after the terminator "--", or just before the first non-flag argument ("-" is a non-flag argument) if the interspersed argument to Parse is false. */ package gnuflag import ( "bytes" "errors" "fmt" "io" "os" "sort" "strconv" "strings" "time" "unicode/utf8" ) // ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. var ErrHelp = errors.New("flag: help requested") // -- bool Value type boolValue bool func newBoolValue(val bool, p *bool) *boolValue { *p = val return (*boolValue)(p) } func (b *boolValue) Set(s string) error { v, err := strconv.ParseBool(s) *b = boolValue(v) return err } func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } // -- int Value type intValue int func newIntValue(val int, p *int) *intValue { *p = val return (*intValue)(p) } func (i *intValue) Set(s string) error { v, err := strconv.ParseInt(s, 0, 64) *i = intValue(v) return err } func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } // -- int64 Value type int64Value int64 func newInt64Value(val int64, p *int64) *int64Value { *p = val return (*int64Value)(p) } func (i *int64Value) Set(s string) error { v, err := strconv.ParseInt(s, 0, 64) *i = int64Value(v) return err } func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } // -- uint Value type uintValue uint func newUintValue(val uint, p *uint) *uintValue { *p = val return (*uintValue)(p) } func (i *uintValue) Set(s string) error { v, err := strconv.ParseUint(s, 0, 64) *i = uintValue(v) return err } func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } // -- uint64 Value type uint64Value uint64 func newUint64Value(val uint64, p *uint64) *uint64Value { *p = val return (*uint64Value)(p) } func (i *uint64Value) Set(s string) error { v, err := strconv.ParseUint(s, 0, 64) *i = uint64Value(v) return err } func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } // -- string Value type stringValue string func newStringValue(val string, p *string) *stringValue { *p = val return (*stringValue)(p) } func (s *stringValue) Set(val string) error { *s = stringValue(val) return nil } func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } // -- float64 Value type float64Value float64 func newFloat64Value(val float64, p *float64) *float64Value { *p = val return (*float64Value)(p) } func (f *float64Value) Set(s string) error { v, err := strconv.ParseFloat(s, 64) *f = float64Value(v) return err } func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } // -- time.Duration Value type durationValue time.Duration func newDurationValue(val time.Duration, p *time.Duration) *durationValue { *p = val return (*durationValue)(p) } func (d *durationValue) Set(s string) error { v, err := time.ParseDuration(s) *d = durationValue(v) return err } func (d *durationValue) String() string { return (*time.Duration)(d).String() } // Value is the interface to the dynamic value stored in a flag. // (The default value is represented as a string.) type Value interface { String() string Set(string) error } // ErrorHandling defines how to handle flag parsing errors. type ErrorHandling int const ( ContinueOnError ErrorHandling = iota ExitOnError PanicOnError ) // A FlagSet represents a set of defined flags. type FlagSet struct { // Usage is the function called when an error occurs while parsing flags. // The field is a function (not a method) that may be changed to point to // a custom error handler. Usage func() name string parsed bool actual map[string]*Flag formal map[string]*Flag args []string // arguments after flags procArgs []string // arguments being processed (gnu only) procFlag string // flag being processed (gnu only) allowIntersperse bool // (gnu only) exitOnError bool // does the program exit if there's an error? errorHandling ErrorHandling output io.Writer // nil means stderr; use out() accessor } // A Flag represents the state of a flag. type Flag struct { Name string // name as it appears on command line Usage string // help message Value Value // value as set DefValue string // default value (as text); for usage message } // sortFlags returns the flags as a slice in lexicographical sorted order. func sortFlags(flags map[string]*Flag) []*Flag { list := make(sort.StringSlice, len(flags)) i := 0 for _, f := range flags { list[i] = f.Name i++ } list.Sort() result := make([]*Flag, len(list)) for i, name := range list { result[i] = flags[name] } return result } func (f *FlagSet) out() io.Writer { if f.output == nil { return os.Stderr } return f.output } // SetOutput sets the destination for usage and error messages. // If output is nil, os.Stderr is used. func (f *FlagSet) SetOutput(output io.Writer) { f.output = output } // VisitAll visits the flags in lexicographical order, calling fn for each. // It visits all flags, even those not set. func (f *FlagSet) VisitAll(fn func(*Flag)) { for _, flag := range sortFlags(f.formal) { fn(flag) } } // VisitAll visits the command-line flags in lexicographical order, calling // fn for each. It visits all flags, even those not set. func VisitAll(fn func(*Flag)) { commandLine.VisitAll(fn) } // Visit visits the flags in lexicographical order, calling fn for each. // It visits only those flags that have been set. func (f *FlagSet) Visit(fn func(*Flag)) { for _, flag := range sortFlags(f.actual) { fn(flag) } } // Visit visits the command-line flags in lexicographical order, calling fn // for each. It visits only those flags that have been set. func Visit(fn func(*Flag)) { commandLine.Visit(fn) } // Lookup returns the Flag structure of the named flag, returning nil if none exists. func (f *FlagSet) Lookup(name string) *Flag { return f.formal[name] } // Lookup returns the Flag structure of the named command-line flag, // returning nil if none exists. func Lookup(name string) *Flag { return commandLine.formal[name] } // Set sets the value of the named flag. func (f *FlagSet) Set(name, value string) error { flag, ok := f.formal[name] if !ok { return fmt.Errorf("no such flag -%v", name) } err := flag.Value.Set(value) if err != nil { return err } if f.actual == nil { f.actual = make(map[string]*Flag) } f.actual[name] = flag return nil } // Set sets the value of the named command-line flag. func Set(name, value string) error { return commandLine.Set(name, value) } // flagsByLength is a slice of flags implementing sort.Interface, // sorting primarily by the length of the flag, and secondarily // alphabetically. type flagsByLength []*Flag func (f flagsByLength) Less(i, j int) bool { s1, s2 := f[i].Name, f[j].Name if len(s1) != len(s2) { return len(s1) < len(s2) } return s1 < s2 } func (f flagsByLength) Swap(i, j int) { f[i], f[j] = f[j], f[i] } func (f flagsByLength) Len() int { return len(f) } // flagsByName is a slice of slices of flags implementing sort.Interface, // alphabetically sorting by the name of the first flag in each slice. type flagsByName [][]*Flag func (f flagsByName) Less(i, j int) bool { return f[i][0].Name < f[j][0].Name } func (f flagsByName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } func (f flagsByName) Len() int { return len(f) } // PrintDefaults prints, to standard error unless configured // otherwise, the default values of all defined flags in the set. // If there is more than one name for a given flag, the usage information and // default value from the shortest will be printed (or the least alphabetically // if there are several equally short flag names). func (f *FlagSet) PrintDefaults() { // group together all flags for a given value flags := make(map[interface{}]flagsByLength) f.VisitAll(func(f *Flag) { flags[f.Value] = append(flags[f.Value], f) }) // sort the output flags by shortest name for each group. var byName flagsByName for _, f := range flags { sort.Sort(f) byName = append(byName, f) } sort.Sort(byName) var line bytes.Buffer for _, fs := range byName { line.Reset() for i, f := range fs { if i > 0 { line.WriteString(", ") } line.WriteString(flagWithMinus(f.Name)) } format := "%s (= %s)\n %s\n" if _, ok := fs[0].Value.(*stringValue); ok { // put quotes on the value format = "%s (= %q)\n %s\n" } fmt.Fprintf(f.out(), format, line.Bytes(), fs[0].DefValue, fs[0].Usage) } } // PrintDefaults prints to standard error the default values of all defined command-line flags. func PrintDefaults() { commandLine.PrintDefaults() } // defaultUsage is the default function to print a usage message. func defaultUsage(f *FlagSet) { fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) f.PrintDefaults() } // NOTE: Usage is not just defaultUsage(commandLine) // because it serves (via godoc flag Usage) as the example // for how to write your own usage function. // Usage prints to standard error a usage message documenting all defined command-line flags. // The function is a variable that may be changed to point to a custom function. var Usage = func() { fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) PrintDefaults() } // NFlag returns the number of flags that have been set. func (f *FlagSet) NFlag() int { return len(f.actual) } // NFlag returns the number of command-line flags that have been set. func NFlag() int { return len(commandLine.actual) } // Arg returns the i'th argument. Arg(0) is the first remaining argument // after flags have been processed. func (f *FlagSet) Arg(i int) string { if i < 0 || i >= len(f.args) { return "" } return f.args[i] } // Arg returns the i'th command-line argument. Arg(0) is the first remaining argument // after flags have been processed. func Arg(i int) string { return commandLine.Arg(i) } // NArg is the number of arguments remaining after flags have been processed. func (f *FlagSet) NArg() int { return len(f.args) } // NArg is the number of arguments remaining after flags have been processed. func NArg() int { return len(commandLine.args) } // Args returns the non-flag arguments. func (f *FlagSet) Args() []string { return f.args } // Args returns the non-flag command-line arguments. func Args() []string { return commandLine.args } // BoolVar defines a bool flag with specified name, default value, and usage string. // The argument p points to a bool variable in which to store the value of the flag. func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { f.Var(newBoolValue(value, p), name, usage) } // BoolVar defines a bool flag with specified name, default value, and usage string. // The argument p points to a bool variable in which to store the value of the flag. func BoolVar(p *bool, name string, value bool, usage string) { commandLine.Var(newBoolValue(value, p), name, usage) } // Bool defines a bool flag with specified name, default value, and usage string. // The return value is the address of a bool variable that stores the value of the flag. func (f *FlagSet) Bool(name string, value bool, usage string) *bool { p := new(bool) f.BoolVar(p, name, value, usage) return p } // Bool defines a bool flag with specified name, default value, and usage string. // The return value is the address of a bool variable that stores the value of the flag. func Bool(name string, value bool, usage string) *bool { return commandLine.Bool(name, value, usage) } // IntVar defines an int flag with specified name, default value, and usage string. // The argument p points to an int variable in which to store the value of the flag. func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { f.Var(newIntValue(value, p), name, usage) } // IntVar defines an int flag with specified name, default value, and usage string. // The argument p points to an int variable in which to store the value of the flag. func IntVar(p *int, name string, value int, usage string) { commandLine.Var(newIntValue(value, p), name, usage) } // Int defines an int flag with specified name, default value, and usage string. // The return value is the address of an int variable that stores the value of the flag. func (f *FlagSet) Int(name string, value int, usage string) *int { p := new(int) f.IntVar(p, name, value, usage) return p } // Int defines an int flag with specified name, default value, and usage string. // The return value is the address of an int variable that stores the value of the flag. func Int(name string, value int, usage string) *int { return commandLine.Int(name, value, usage) } // Int64Var defines an int64 flag with specified name, default value, and usage string. // The argument p points to an int64 variable in which to store the value of the flag. func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { f.Var(newInt64Value(value, p), name, usage) } // Int64Var defines an int64 flag with specified name, default value, and usage string. // The argument p points to an int64 variable in which to store the value of the flag. func Int64Var(p *int64, name string, value int64, usage string) { commandLine.Var(newInt64Value(value, p), name, usage) } // Int64 defines an int64 flag with specified name, default value, and usage string. // The return value is the address of an int64 variable that stores the value of the flag. func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { p := new(int64) f.Int64Var(p, name, value, usage) return p } // Int64 defines an int64 flag with specified name, default value, and usage string. // The return value is the address of an int64 variable that stores the value of the flag. func Int64(name string, value int64, usage string) *int64 { return commandLine.Int64(name, value, usage) } // UintVar defines a uint flag with specified name, default value, and usage string. // The argument p points to a uint variable in which to store the value of the flag. func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { f.Var(newUintValue(value, p), name, usage) } // UintVar defines a uint flag with specified name, default value, and usage string. // The argument p points to a uint variable in which to store the value of the flag. func UintVar(p *uint, name string, value uint, usage string) { commandLine.Var(newUintValue(value, p), name, usage) } // Uint defines a uint flag with specified name, default value, and usage string. // The return value is the address of a uint variable that stores the value of the flag. func (f *FlagSet) Uint(name string, value uint, usage string) *uint { p := new(uint) f.UintVar(p, name, value, usage) return p } // Uint defines a uint flag with specified name, default value, and usage string. // The return value is the address of a uint variable that stores the value of the flag. func Uint(name string, value uint, usage string) *uint { return commandLine.Uint(name, value, usage) } // Uint64Var defines a uint64 flag with specified name, default value, and usage string. // The argument p points to a uint64 variable in which to store the value of the flag. func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { f.Var(newUint64Value(value, p), name, usage) } // Uint64Var defines a uint64 flag with specified name, default value, and usage string. // The argument p points to a uint64 variable in which to store the value of the flag. func Uint64Var(p *uint64, name string, value uint64, usage string) { commandLine.Var(newUint64Value(value, p), name, usage) } // Uint64 defines a uint64 flag with specified name, default value, and usage string. // The return value is the address of a uint64 variable that stores the value of the flag. func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { p := new(uint64) f.Uint64Var(p, name, value, usage) return p } // Uint64 defines a uint64 flag with specified name, default value, and usage string. // The return value is the address of a uint64 variable that stores the value of the flag. func Uint64(name string, value uint64, usage string) *uint64 { return commandLine.Uint64(name, value, usage) } // StringVar defines a string flag with specified name, default value, and usage string. // The argument p points to a string variable in which to store the value of the flag. func (f *FlagSet) StringVar(p *string, name string, value string, usage string) { f.Var(newStringValue(value, p), name, usage) } // StringVar defines a string flag with specified name, default value, and usage string. // The argument p points to a string variable in which to store the value of the flag. func StringVar(p *string, name string, value string, usage string) { commandLine.Var(newStringValue(value, p), name, usage) } // String defines a string flag with specified name, default value, and usage string. // The return value is the address of a string variable that stores the value of the flag. func (f *FlagSet) String(name string, value string, usage string) *string { p := new(string) f.StringVar(p, name, value, usage) return p } // String defines a string flag with specified name, default value, and usage string. // The return value is the address of a string variable that stores the value of the flag. func String(name string, value string, usage string) *string { return commandLine.String(name, value, usage) } // Float64Var defines a float64 flag with specified name, default value, and usage string. // The argument p points to a float64 variable in which to store the value of the flag. func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { f.Var(newFloat64Value(value, p), name, usage) } // Float64Var defines a float64 flag with specified name, default value, and usage string. // The argument p points to a float64 variable in which to store the value of the flag. func Float64Var(p *float64, name string, value float64, usage string) { commandLine.Var(newFloat64Value(value, p), name, usage) } // Float64 defines a float64 flag with specified name, default value, and usage string. // The return value is the address of a float64 variable that stores the value of the flag. func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { p := new(float64) f.Float64Var(p, name, value, usage) return p } // Float64 defines a float64 flag with specified name, default value, and usage string. // The return value is the address of a float64 variable that stores the value of the flag. func Float64(name string, value float64, usage string) *float64 { return commandLine.Float64(name, value, usage) } // DurationVar defines a time.Duration flag with specified name, default value, and usage string. // The argument p points to a time.Duration variable in which to store the value of the flag. func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { f.Var(newDurationValue(value, p), name, usage) } // DurationVar defines a time.Duration flag with specified name, default value, and usage string. // The argument p points to a time.Duration variable in which to store the value of the flag. func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { commandLine.Var(newDurationValue(value, p), name, usage) } // Duration defines a time.Duration flag with specified name, default value, and usage string. // The return value is the address of a time.Duration variable that stores the value of the flag. func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { p := new(time.Duration) f.DurationVar(p, name, value, usage) return p } // Duration defines a time.Duration flag with specified name, default value, and usage string. // The return value is the address of a time.Duration variable that stores the value of the flag. func Duration(name string, value time.Duration, usage string) *time.Duration { return commandLine.Duration(name, value, usage) } // Var defines a flag with the specified name and usage string. The type and // value of the flag are represented by the first argument, of type Value, which // typically holds a user-defined implementation of Value. For instance, the // caller could create a flag that turns a comma-separated string into a slice // of strings by giving the slice the methods of Value; in particular, Set would // decompose the comma-separated string into the slice. func (f *FlagSet) Var(value Value, name string, usage string) { // Remember the default value as a string; it won't change. flag := &Flag{name, usage, value, value.String()} _, alreadythere := f.formal[name] if alreadythere { fmt.Fprintf(f.out(), "%s flag redefined: %s\n", f.name, name) panic("flag redefinition") // Happens only if flags are declared with identical names } if f.formal == nil { f.formal = make(map[string]*Flag) } f.formal[name] = flag } // Var defines a flag with the specified name and usage string. The type and // value of the flag are represented by the first argument, of type Value, which // typically holds a user-defined implementation of Value. For instance, the // caller could create a flag that turns a comma-separated string into a slice // of strings by giving the slice the methods of Value; in particular, Set would // decompose the comma-separated string into the slice. func Var(value Value, name string, usage string) { commandLine.Var(value, name, usage) } // failf prints to standard error a formatted error and usage message and // returns the error. func (f *FlagSet) failf(format string, a ...interface{}) error { err := fmt.Errorf(format, a...) fmt.Fprintln(f.out(), err) f.usage() return err } // usage calls the Usage method for the flag set, or the usage function if // the flag set is commandLine. func (f *FlagSet) usage() { if f == commandLine { Usage() } else if f.Usage == nil { defaultUsage(f) } else { f.Usage() } } func (f *FlagSet) parseOneGnu() (flagName string, long, finished bool, err error) { if len(f.procArgs) == 0 { finished = true return } // processing previously encountered single-rune flag if flag := f.procFlag; len(flag) > 0 { _, n := utf8.DecodeRuneInString(flag) f.procFlag = flag[n:] flagName = flag[0:n] return } a := f.procArgs[0] // one non-flag argument if a == "-" || a == "" || a[0] != '-' { if f.allowIntersperse { f.args = append(f.args, a) f.procArgs = f.procArgs[1:] return } f.args = append(f.args, f.procArgs...) f.procArgs = nil finished = true return } // end of flags if f.procArgs[0] == "--" { f.args = append(f.args, f.procArgs[1:]...) f.procArgs = nil finished = true return } // long flag signified with "--" prefix if a[1] == '-' { long = true i := strings.Index(a, "=") if i < 0 { f.procArgs = f.procArgs[1:] flagName = a[2:] return } flagName = a[2:i] if flagName == "" { err = fmt.Errorf("empty flag in argument %q", a) return } f.procArgs = f.procArgs[1:] f.procFlag = a[i:] return } // some number of single-rune flags a = a[1:] _, n := utf8.DecodeRuneInString(a) flagName = a[0:n] f.procFlag = a[n:] f.procArgs = f.procArgs[1:] return } func flagWithMinus(name string) string { if len(name) > 1 { return "--" + name } return "-" + name } func (f *FlagSet) parseGnuFlagArg(name string, long bool) (finished bool, err error) { m := f.formal flag, alreadythere := m[name] // BUG if !alreadythere { if name == "help" || name == "h" { // special case for nice help message. f.usage() return false, ErrHelp } // TODO print --xxx when flag is more than one rune. return false, f.failf("flag provided but not defined: %s", flagWithMinus(name)) } if fv, ok := flag.Value.(*boolValue); ok && !strings.HasPrefix(f.procFlag, "=") { // special case: doesn't need an arg, and an arg hasn't // been provided explicitly. fv.Set("true") } else { // It must have a value, which might be the next argument. var hasValue bool var value string if f.procFlag != "" { // value directly follows flag value = f.procFlag if long { if value[0] != '=' { panic("no leading '=' in long flag") } value = value[1:] } hasValue = true f.procFlag = "" } if !hasValue && len(f.procArgs) > 0 { // value is the next arg hasValue = true value, f.procArgs = f.procArgs[0], f.procArgs[1:] } if !hasValue { return false, f.failf("flag needs an argument: %s", flagWithMinus(name)) } if err := flag.Value.Set(value); err != nil { return false, f.failf("invalid value %q for flag %s: %v", value, flagWithMinus(name), err) } } if f.actual == nil { f.actual = make(map[string]*Flag) } f.actual[name] = flag return } // Parse parses flag definitions from the argument list, which should not // include the command name. Must be called after all flags in the FlagSet // are defined and before flags are accessed by the program. // The return value will be ErrHelp if --help was set but not defined. // If allowIntersperse is set, arguments and flags can be interspersed, that // is flags can follow positional arguments. func (f *FlagSet) Parse(allowIntersperse bool, arguments []string) error { f.parsed = true f.procArgs = arguments f.procFlag = "" f.args = nil f.allowIntersperse = allowIntersperse for { name, long, finished, err := f.parseOneGnu() if !finished { if name != "" { finished, err = f.parseGnuFlagArg(name, long) } } if err != nil { switch f.errorHandling { case ContinueOnError: return err case ExitOnError: os.Exit(2) case PanicOnError: panic(err) } } if !finished { continue } if err == nil { break } } return nil } // Parsed reports whether f.Parse has been called. func (f *FlagSet) Parsed() bool { return f.parsed } // Parse parses the command-line flags from os.Args[1:]. Must be called // after all flags are defined and before flags are accessed by the program. // If allowIntersperse is set, arguments and flags can be interspersed, that // is flags can follow positional arguments. func Parse(allowIntersperse bool) { // Ignore errors; commandLine is set for ExitOnError. commandLine.Parse(allowIntersperse, os.Args[1:]) } // Parsed returns true if the command-line flags have been parsed. func Parsed() bool { return commandLine.Parsed() } // The default set of command-line flags, parsed from os.Args. var commandLine = NewFlagSet(os.Args[0], ExitOnError) // NewFlagSet returns a new, empty flag set with the specified name and // error handling property. func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { f := &FlagSet{ name: name, errorHandling: errorHandling, } return f } // Init sets the name and error handling property for a flag set. // By default, the zero FlagSet uses an empty name and the // ContinueOnError error handling policy. func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { f.name = name f.errorHandling = errorHandling } charm-2.1.1/src/golang.org/0000775000175000017500000000000012672604436014431 5ustar marcomarcocharm-2.1.1/src/golang.org/x/0000775000175000017500000000000012672604475014703 5ustar marcomarcocharm-2.1.1/src/golang.org/x/net/0000775000175000017500000000000012672604440015461 5ustar marcomarcocharm-2.1.1/src/golang.org/x/net/html/0000775000175000017500000000000012672604440016425 5ustar marcomarcocharm-2.1.1/src/golang.org/x/net/html/doc.go0000664000175000017500000000647612672604440017536 0ustar marcomarco// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. /* Package html implements an HTML5-compliant tokenizer and parser. Tokenization is done by creating a Tokenizer for an io.Reader r. It is the caller's responsibility to ensure that r provides UTF-8 encoded HTML. z := html.NewTokenizer(r) Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(), which parses the next token and returns its type, or an error: for { tt := z.Next() if tt == html.ErrorToken { // ... return ... } // Process the current token. } There are two APIs for retrieving the current token. The high-level API is to call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs allow optionally calling Raw after Next but before Token, Text, TagName, or TagAttr. In EBNF notation, the valid call sequence per token is: Next {Raw} [ Token | Text | TagName {TagAttr} ] Token returns an independent data structure that completely describes a token. Entities (such as "<") are unescaped, tag names and attribute keys are lower-cased, and attributes are collected into a []Attribute. For example: for { if z.Next() == html.ErrorToken { // Returning io.EOF indicates success. return z.Err() } emitToken(z.Token()) } The low-level API performs fewer allocations and copies, but the contents of the []byte values returned by Text, TagName and TagAttr may change on the next call to Next. For example, to extract an HTML page's anchor text: depth := 0 for { tt := z.Next() switch tt { case ErrorToken: return z.Err() case TextToken: if depth > 0 { // emitBytes should copy the []byte it receives, // if it doesn't process it immediately. emitBytes(z.Text()) } case StartTagToken, EndTagToken: tn, _ := z.TagName() if len(tn) == 1 && tn[0] == 'a' { if tt == StartTagToken { depth++ } else { depth-- } } } } Parsing is done by calling Parse with an io.Reader, which returns the root of the parse tree (the document element) as a *Node. It is the caller's responsibility to ensure that the Reader provides UTF-8 encoded HTML. For example, to process each anchor node in depth-first order: doc, err := html.Parse(r) if err != nil { // ... } var f func(*html.Node) f = func(n *html.Node) { if n.Type == html.ElementNode && n.Data == "a" { // Do something with n... } for c := n.FirstChild; c != nil; c = c.NextSibling { f(c) } } f(doc) The relevant specifications include: https://html.spec.whatwg.org/multipage/syntax.html and https://html.spec.whatwg.org/multipage/syntax.html#tokenization */ package html // import "golang.org/x/net/html" // The tokenization algorithm implemented by this package is not a line-by-line // transliteration of the relatively verbose state-machine in the WHATWG // specification. A more direct approach is used instead, where the program // counter implies the state, such as whether it is tokenizing a tag or a text // node. Specification compliance is verified by checking expected and actual // outputs over a test suite rather than aiming for algorithmic fidelity. // TODO(nigeltao): Does a DOM API belong in this package or a separate one? // TODO(nigeltao): How does parsing interact with a JavaScript engine? charm-2.1.1/src/golang.org/x/net/html/node.go0000664000175000017500000001134312672604440017703 0ustar marcomarco// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package html import ( "golang.org/x/net/html/atom" ) // A NodeType is the type of a Node. type NodeType uint32 const ( ErrorNode NodeType = iota TextNode DocumentNode ElementNode CommentNode DoctypeNode scopeMarkerNode ) // Section 12.2.3.3 says "scope markers are inserted when entering applet // elements, buttons, object elements, marquees, table cells, and table // captions, and are used to prevent formatting from 'leaking'". var scopeMarker = Node{Type: scopeMarkerNode} // A Node consists of a NodeType and some Data (tag name for element nodes, // content for text) and are part of a tree of Nodes. Element nodes may also // have a Namespace and contain a slice of Attributes. Data is unescaped, so // that it looks like "a 0 { return (*s)[i-1] } return nil } // index returns the index of the top-most occurrence of n in the stack, or -1 // if n is not present. func (s *nodeStack) index(n *Node) int { for i := len(*s) - 1; i >= 0; i-- { if (*s)[i] == n { return i } } return -1 } // insert inserts a node at the given index. func (s *nodeStack) insert(i int, n *Node) { (*s) = append(*s, nil) copy((*s)[i+1:], (*s)[i:]) (*s)[i] = n } // remove removes a node from the stack. It is a no-op if n is not present. func (s *nodeStack) remove(n *Node) { i := s.index(n) if i == -1 { return } copy((*s)[i:], (*s)[i+1:]) j := len(*s) - 1 (*s)[j] = nil *s = (*s)[:j] } charm-2.1.1/src/golang.org/x/net/html/const.go0000664000175000017500000000443612672604440020111 0ustar marcomarco// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package html // Section 12.2.3.2 of the HTML5 specification says "The following elements // have varying levels of special parsing rules". // https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements var isSpecialElementMap = map[string]bool{ "address": true, "applet": true, "area": true, "article": true, "aside": true, "base": true, "basefont": true, "bgsound": true, "blockquote": true, "body": true, "br": true, "button": true, "caption": true, "center": true, "col": true, "colgroup": true, "dd": true, "details": true, "dir": true, "div": true, "dl": true, "dt": true, "embed": true, "fieldset": true, "figcaption": true, "figure": true, "footer": true, "form": true, "frame": true, "frameset": true, "h1": true, "h2": true, "h3": true, "h4": true, "h5": true, "h6": true, "head": true, "header": true, "hgroup": true, "hr": true, "html": true, "iframe": true, "img": true, "input": true, "isindex": true, "li": true, "link": true, "listing": true, "marquee": true, "menu": true, "meta": true, "nav": true, "noembed": true, "noframes": true, "noscript": true, "object": true, "ol": true, "p": true, "param": true, "plaintext": true, "pre": true, "script": true, "section": true, "select": true, "source": true, "style": true, "summary": true, "table": true, "tbody": true, "td": true, "template": true, "textarea": true, "tfoot": true, "th": true, "thead": true, "title": true, "tr": true, "track": true, "ul": true, "wbr": true, "xmp": true, } func isSpecialElement(element *Node) bool { switch element.Namespace { case "", "html": return isSpecialElementMap[element.Data] case "svg": return element.Data == "foreignObject" } return false } charm-2.1.1/src/golang.org/x/net/html/testdata/0000775000175000017500000000000012672604440020236 5ustar marcomarcocharm-2.1.1/src/golang.org/x/net/html/testdata/webkit/0000775000175000017500000000000012672604440021523 5ustar marcomarcocharm-2.1.1/src/golang.org/x/net/html/testdata/webkit/tests5.dat0000664000175000017500000000613112672604440023445 0ustar marcomarco#data x #errors Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. Line: 1 Col: 22 Unexpected end of file. Expected end tag (style). #document | | | --> x #errors Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. #document | | | x #errors Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. #document | | | x #errors Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. #document | | | x #errors Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. #document | | | --> #errors Line: 1 Col: 52 Unexpected end tag (style). #document | | | | X #errors #document | | | | ...--> #errors Line: 1 Col: 51 Unexpected end tag (style). #document | | | | X #errors #document | | | | --> #errors Line: 1 Col: 66 Unexpected end tag (style). #document | | | | #errors #document | | | | #errors Line: 1 Col: 63 Unexpected end tag (style). #document | | | | X #errors #document | | | | --> #errors Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. Line: 1 Col: 37 Unexpected end tag (style). #document | | | X #errors Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. #document | | | ...--> #errors Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. Line: 1 Col: 36 Unexpected end tag (style). #document | | | X #errors Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. #document | | | --> #errors Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. Line: 1 Col: 51 Unexpected end tag (style). #document | | | #errors Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. #document | | | #errors Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. Line: 1 Col: 48 Unexpected end tag (style). #document | | | X #errors Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. #document | | | ddd #errors #document | | | #errors #document | | | | |
  • | | #errors #document | | | | | | --> #errors #document | | | --> #errors #document | | |
    #errors #document | | | | |
    | | #errors #document | | | | | | "X" | <meta> | name="z" | <link> | rel="foo" | <style> | " x { content:"</style" } " #data <!DOCTYPE html><select><optgroup></optgroup></select> #errors #document | <!DOCTYPE html> | <html> | <head> | <body> | <select> | <optgroup> #data #errors Line: 2 Col: 1 Unexpected End of file. Expected DOCTYPE. #document | <html> | <head> | <body> #data <!DOCTYPE html> <html> #errors #document | <!DOCTYPE html> | <html> | <head> | <body> #data <!DOCTYPE html><script> </script> <title>x #errors #document | | | | abc #errors #document | | | | | "abc" |
    | | | abc #errors #document | | | | | "abc" |
    | | | abc #errors #document | | | | |
    | #errors Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. Line: 1 Col: 20 Unexpected end tag (strong) in table context caused voodoo mode. Line: 1 Col: 20 End tag (strong) violates step 1, paragraph 1 of the adoption agency algorithm. Line: 1 Col: 24 Unexpected end tag (b) in table context caused voodoo mode. Line: 1 Col: 24 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm. Line: 1 Col: 29 Unexpected end tag (em) in table context caused voodoo mode. Line: 1 Col: 29 End tag (em) violates step 1, paragraph 1 of the adoption agency algorithm. Line: 1 Col: 33 Unexpected end tag (i) in table context caused voodoo mode. Line: 1 Col: 33 End tag (i) violates step 1, paragraph 1 of the adoption agency algorithm. Line: 1 Col: 37 Unexpected end tag (u) in table context caused voodoo mode. Line: 1 Col: 37 End tag (u) violates step 1, paragraph 1 of the adoption agency algorithm. Line: 1 Col: 46 Unexpected end tag (strike) in table context caused voodoo mode. Line: 1 Col: 46 End tag (strike) violates step 1, paragraph 1 of the adoption agency algorithm. Line: 1 Col: 50 Unexpected end tag (s) in table context caused voodoo mode. Line: 1 Col: 50 End tag (s) violates step 1, paragraph 1 of the adoption agency algorithm. Line: 1 Col: 58 Unexpected end tag (blink) in table context caused voodoo mode. Line: 1 Col: 58 Unexpected end tag (blink). Ignored. Line: 1 Col: 63 Unexpected end tag (tt) in table context caused voodoo mode. Line: 1 Col: 63 End tag (tt) violates step 1, paragraph 1 of the adoption agency algorithm. Line: 1 Col: 69 Unexpected end tag (pre) in table context caused voodoo mode. Line: 1 Col: 69 End tag (pre) seen too early. Expected other end tag. Line: 1 Col: 75 Unexpected end tag (big) in table context caused voodoo mode. Line: 1 Col: 75 End tag (big) violates step 1, paragraph 1 of the adoption agency algorithm. Line: 1 Col: 83 Unexpected end tag (small) in table context caused voodoo mode. Line: 1 Col: 83 End tag (small) violates step 1, paragraph 1 of the adoption agency algorithm. Line: 1 Col: 90 Unexpected end tag (font) in table context caused voodoo mode. Line: 1 Col: 90 End tag (font) violates step 1, paragraph 1 of the adoption agency algorithm. Line: 1 Col: 99 Unexpected end tag (select) in table context caused voodoo mode. Line: 1 Col: 99 Unexpected end tag (select). Ignored. Line: 1 Col: 104 Unexpected end tag (h1) in table context caused voodoo mode. Line: 1 Col: 104 End tag (h1) seen too early. Expected other end tag. Line: 1 Col: 109 Unexpected end tag (h2) in table context caused voodoo mode. Line: 1 Col: 109 End tag (h2) seen too early. Expected other end tag. Line: 1 Col: 114 Unexpected end tag (h3) in table context caused voodoo mode. Line: 1 Col: 114 End tag (h3) seen too early. Expected other end tag. Line: 1 Col: 119 Unexpected end tag (h4) in table context caused voodoo mode. Line: 1 Col: 119 End tag (h4) seen too early. Expected other end tag. Line: 1 Col: 124 Unexpected end tag (h5) in table context caused voodoo mode. Line: 1 Col: 124 End tag (h5) seen too early. Expected other end tag. Line: 1 Col: 129 Unexpected end tag (h6) in table context caused voodoo mode. Line: 1 Col: 129 End tag (h6) seen too early. Expected other end tag. Line: 1 Col: 136 Unexpected end tag (body) in the table row phase. Ignored. Line: 1 Col: 141 Unexpected end tag (br) in table context caused voodoo mode. Line: 1 Col: 141 Unexpected end tag (br). Treated as br element. Line: 1 Col: 145 Unexpected end tag (a) in table context caused voodoo mode. Line: 1 Col: 145 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm. Line: 1 Col: 151 Unexpected end tag (img) in table context caused voodoo mode. Line: 1 Col: 151 This element (img) has no end tag. Line: 1 Col: 159 Unexpected end tag (title) in table context caused voodoo mode. Line: 1 Col: 159 Unexpected end tag (title). Ignored. Line: 1 Col: 166 Unexpected end tag (span) in table context caused voodoo mode. Line: 1 Col: 166 Unexpected end tag (span). Ignored. Line: 1 Col: 174 Unexpected end tag (style) in table context caused voodoo mode. Line: 1 Col: 174 Unexpected end tag (style). Ignored. Line: 1 Col: 183 Unexpected end tag (script) in table context caused voodoo mode. Line: 1 Col: 183 Unexpected end tag (script). Ignored. Line: 1 Col: 196 Unexpected end tag (th). Ignored. Line: 1 Col: 201 Unexpected end tag (td). Ignored. Line: 1 Col: 206 Unexpected end tag (tr). Ignored. Line: 1 Col: 214 This element (frame) has no end tag. Line: 1 Col: 221 This element (area) has no end tag. Line: 1 Col: 228 Unexpected end tag (link). Ignored. Line: 1 Col: 236 This element (param) has no end tag. Line: 1 Col: 241 This element (hr) has no end tag. Line: 1 Col: 249 This element (input) has no end tag. Line: 1 Col: 255 Unexpected end tag (col). Ignored. Line: 1 Col: 262 Unexpected end tag (base). Ignored. Line: 1 Col: 269 Unexpected end tag (meta). Ignored. Line: 1 Col: 280 This element (basefont) has no end tag. Line: 1 Col: 290 This element (bgsound) has no end tag. Line: 1 Col: 298 This element (embed) has no end tag. Line: 1 Col: 307 This element (spacer) has no end tag. Line: 1 Col: 311 Unexpected end tag (p). Ignored. Line: 1 Col: 316 End tag (dd) seen too early. Expected other end tag. Line: 1 Col: 321 End tag (dt) seen too early. Expected other end tag. Line: 1 Col: 331 Unexpected end tag (caption). Ignored. Line: 1 Col: 342 Unexpected end tag (colgroup). Ignored. Line: 1 Col: 350 Unexpected end tag (tbody). Ignored. Line: 1 Col: 358 Unexpected end tag (tfoot). Ignored. Line: 1 Col: 366 Unexpected end tag (thead). Ignored. Line: 1 Col: 376 End tag (address) seen too early. Expected other end tag. Line: 1 Col: 389 End tag (blockquote) seen too early. Expected other end tag. Line: 1 Col: 398 End tag (center) seen too early. Expected other end tag. Line: 1 Col: 404 Unexpected end tag (dir). Ignored. Line: 1 Col: 410 End tag (div) seen too early. Expected other end tag. Line: 1 Col: 415 End tag (dl) seen too early. Expected other end tag. Line: 1 Col: 426 End tag (fieldset) seen too early. Expected other end tag. Line: 1 Col: 436 End tag (listing) seen too early. Expected other end tag. Line: 1 Col: 443 End tag (menu) seen too early. Expected other end tag. Line: 1 Col: 448 End tag (ol) seen too early. Expected other end tag. Line: 1 Col: 453 End tag (ul) seen too early. Expected other end tag. Line: 1 Col: 458 End tag (li) seen too early. Expected other end tag. Line: 1 Col: 465 End tag (nobr) violates step 1, paragraph 1 of the adoption agency algorithm. Line: 1 Col: 471 This element (wbr) has no end tag. Line: 1 Col: 487 End tag (button) seen too early. Expected other end tag. Line: 1 Col: 497 End tag (marquee) seen too early. Expected other end tag. Line: 1 Col: 506 End tag (object) seen too early. Expected other end tag. Line: 1 Col: 524 Unexpected end tag (html). Ignored. Line: 1 Col: 524 Unexpected end tag (frameset). Ignored. Line: 1 Col: 531 Unexpected end tag (head). Ignored. Line: 1 Col: 540 Unexpected end tag (iframe). Ignored. Line: 1 Col: 548 This element (image) has no end tag. Line: 1 Col: 558 This element (isindex) has no end tag. Line: 1 Col: 568 Unexpected end tag (noembed). Ignored. Line: 1 Col: 579 Unexpected end tag (noframes). Ignored. Line: 1 Col: 590 Unexpected end tag (noscript). Ignored. Line: 1 Col: 601 Unexpected end tag (optgroup). Ignored. Line: 1 Col: 610 Unexpected end tag (option). Ignored. Line: 1 Col: 622 Unexpected end tag (plaintext). Ignored. Line: 1 Col: 633 Unexpected end tag (textarea). Ignored. #document | | | |
    |
    | abc #errors #document | | | | | | | |

    #errors Line: 1 Col: 9 Unexpected end tag (strong). Expected DOCTYPE. Line: 1 Col: 9 Unexpected end tag (strong) after the (implied) root element. Line: 1 Col: 13 Unexpected end tag (b) after the (implied) root element. Line: 1 Col: 18 Unexpected end tag (em) after the (implied) root element. Line: 1 Col: 22 Unexpected end tag (i) after the (implied) root element. Line: 1 Col: 26 Unexpected end tag (u) after the (implied) root element. Line: 1 Col: 35 Unexpected end tag (strike) after the (implied) root element. Line: 1 Col: 39 Unexpected end tag (s) after the (implied) root element. Line: 1 Col: 47 Unexpected end tag (blink) after the (implied) root element. Line: 1 Col: 52 Unexpected end tag (tt) after the (implied) root element. Line: 1 Col: 58 Unexpected end tag (pre) after the (implied) root element. Line: 1 Col: 64 Unexpected end tag (big) after the (implied) root element. Line: 1 Col: 72 Unexpected end tag (small) after the (implied) root element. Line: 1 Col: 79 Unexpected end tag (font) after the (implied) root element. Line: 1 Col: 88 Unexpected end tag (select) after the (implied) root element. Line: 1 Col: 93 Unexpected end tag (h1) after the (implied) root element. Line: 1 Col: 98 Unexpected end tag (h2) after the (implied) root element. Line: 1 Col: 103 Unexpected end tag (h3) after the (implied) root element. Line: 1 Col: 108 Unexpected end tag (h4) after the (implied) root element. Line: 1 Col: 113 Unexpected end tag (h5) after the (implied) root element. Line: 1 Col: 118 Unexpected end tag (h6) after the (implied) root element. Line: 1 Col: 125 Unexpected end tag (body) after the (implied) root element. Line: 1 Col: 130 Unexpected end tag (br). Treated as br element. Line: 1 Col: 134 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm. Line: 1 Col: 140 This element (img) has no end tag. Line: 1 Col: 148 Unexpected end tag (title). Ignored. Line: 1 Col: 155 Unexpected end tag (span). Ignored. Line: 1 Col: 163 Unexpected end tag (style). Ignored. Line: 1 Col: 172 Unexpected end tag (script). Ignored. Line: 1 Col: 180 Unexpected end tag (table). Ignored. Line: 1 Col: 185 Unexpected end tag (th). Ignored. Line: 1 Col: 190 Unexpected end tag (td). Ignored. Line: 1 Col: 195 Unexpected end tag (tr). Ignored. Line: 1 Col: 203 This element (frame) has no end tag. Line: 1 Col: 210 This element (area) has no end tag. Line: 1 Col: 217 Unexpected end tag (link). Ignored. Line: 1 Col: 225 This element (param) has no end tag. Line: 1 Col: 230 This element (hr) has no end tag. Line: 1 Col: 238 This element (input) has no end tag. Line: 1 Col: 244 Unexpected end tag (col). Ignored. Line: 1 Col: 251 Unexpected end tag (base). Ignored. Line: 1 Col: 258 Unexpected end tag (meta). Ignored. Line: 1 Col: 269 This element (basefont) has no end tag. Line: 1 Col: 279 This element (bgsound) has no end tag. Line: 1 Col: 287 This element (embed) has no end tag. Line: 1 Col: 296 This element (spacer) has no end tag. Line: 1 Col: 300 Unexpected end tag (p). Ignored. Line: 1 Col: 305 End tag (dd) seen too early. Expected other end tag. Line: 1 Col: 310 End tag (dt) seen too early. Expected other end tag. Line: 1 Col: 320 Unexpected end tag (caption). Ignored. Line: 1 Col: 331 Unexpected end tag (colgroup). Ignored. Line: 1 Col: 339 Unexpected end tag (tbody). Ignored. Line: 1 Col: 347 Unexpected end tag (tfoot). Ignored. Line: 1 Col: 355 Unexpected end tag (thead). Ignored. Line: 1 Col: 365 End tag (address) seen too early. Expected other end tag. Line: 1 Col: 378 End tag (blockquote) seen too early. Expected other end tag. Line: 1 Col: 387 End tag (center) seen too early. Expected other end tag. Line: 1 Col: 393 Unexpected end tag (dir). Ignored. Line: 1 Col: 399 End tag (div) seen too early. Expected other end tag. Line: 1 Col: 404 End tag (dl) seen too early. Expected other end tag. Line: 1 Col: 415 End tag (fieldset) seen too early. Expected other end tag. Line: 1 Col: 425 End tag (listing) seen too early. Expected other end tag. Line: 1 Col: 432 End tag (menu) seen too early. Expected other end tag. Line: 1 Col: 437 End tag (ol) seen too early. Expected other end tag. Line: 1 Col: 442 End tag (ul) seen too early. Expected other end tag. Line: 1 Col: 447 End tag (li) seen too early. Expected other end tag. Line: 1 Col: 454 End tag (nobr) violates step 1, paragraph 1 of the adoption agency algorithm. Line: 1 Col: 460 This element (wbr) has no end tag. Line: 1 Col: 476 End tag (button) seen too early. Expected other end tag. Line: 1 Col: 486 End tag (marquee) seen too early. Expected other end tag. Line: 1 Col: 495 End tag (object) seen too early. Expected other end tag. Line: 1 Col: 513 Unexpected end tag (html). Ignored. Line: 1 Col: 513 Unexpected end tag (frameset). Ignored. Line: 1 Col: 520 Unexpected end tag (head). Ignored. Line: 1 Col: 529 Unexpected end tag (iframe). Ignored. Line: 1 Col: 537 This element (image) has no end tag. Line: 1 Col: 547 This element (isindex) has no end tag. Line: 1 Col: 557 Unexpected end tag (noembed). Ignored. Line: 1 Col: 568 Unexpected end tag (noframes). Ignored. Line: 1 Col: 579 Unexpected end tag (noscript). Ignored. Line: 1 Col: 590 Unexpected end tag (optgroup). Ignored. Line: 1 Col: 599 Unexpected end tag (option). Ignored. Line: 1 Col: 611 Unexpected end tag (plaintext). Ignored. Line: 1 Col: 622 Unexpected end tag (textarea). Ignored. #document | | | |
    |

    #data

    | abc #errors #document | | | | | | abc #errors #document | | | | |
    X
    #errors Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. Line: 1 Col: 33 Got table cell end tag (td) while required end tags are missing. Line: 1 Col: 48 Got table cell end tag (th) while required end tags are missing. #document | | | | | | | |
    | | | | | "X" #data <p>

    #errors Line: 1 Col: 6 Unexpected start tag (body). Expected DOCTYPE. Line: 1 Col: 12 Unexpected start tag (body). Line: 1 Col: 54 Unexpected start tag (body). Line: 1 Col: 64 Unexpected end tag (p). Missing end tag (body). #document | | | | | | | | "<p>" | <p> #data <textarea><p></textarea> #errors Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE. #document | <html> | <head> | <body> | <textarea> | "<p>" #data <p><image></p> #errors Line: 1 Col: 3 Unexpected start tag (p). Expected DOCTYPE. Line: 1 Col: 10 Unexpected start tag (image). Treated as img. #document | <html> | <head> | <body> | <p> | <img> #data <a><table><a></table><p><a><div><a> #errors Line: 1 Col: 3 Unexpected start tag (a). Expected DOCTYPE. Line: 1 Col: 13 Unexpected start tag (a) in table context caused voodoo mode. Line: 1 Col: 13 Unexpected start tag (a) implies end tag (a). Line: 1 Col: 13 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm. Line: 1 Col: 21 Unexpected end tag (table). Expected end tag (a). Line: 1 Col: 27 Unexpected start tag (a) implies end tag (a). Line: 1 Col: 27 End tag (a) violates step 1, paragraph 2 of the adoption agency algorithm. Line: 1 Col: 32 Unexpected end tag (p). Ignored. Line: 1 Col: 35 Unexpected start tag (a) implies end tag (a). Line: 1 Col: 35 End tag (a) violates step 1, paragraph 2 of the adoption agency algorithm. Line: 1 Col: 35 Expected closing tag. Unexpected end of file. #document | <html> | <head> | <body> | <a> | <a> | <table> | <p> | <a> | <div> | <a> #data <head></p><meta><p> #errors Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE. Line: 1 Col: 10 Unexpected end tag (p). Ignored. #document | <html> | <head> | <meta> | <body> | <p> #data <head></html><meta><p> #errors Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE. Line: 1 Col: 19 Unexpected start tag (meta). #document | <html> | <head> | <body> | <meta> | <p> #data <b><table><td><i></table> #errors Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE. Line: 1 Col: 14 Unexpected table cell start tag (td) in the table body phase. Line: 1 Col: 25 Got table cell end tag (td) while required end tags are missing. Line: 1 Col: 25 Expected closing tag. Unexpected end of file. #document | <html> | <head> | <body> | <b> | <table> | <tbody> | <tr> | <td> | <i> #data <b><table><td></b><i></table> #errors Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE. Line: 1 Col: 14 Unexpected table cell start tag (td) in the table body phase. Line: 1 Col: 18 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm. Line: 1 Col: 29 Got table cell end tag (td) while required end tags are missing. Line: 1 Col: 29 Expected closing tag. Unexpected end of file. #document | <html> | <head> | <body> | <b> | <table> | <tbody> | <tr> | <td> | <i> #data <h1><h2> #errors 4: Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>â€. 8: Heading cannot be a child of another heading. 8: End of file seen and there were open elements. #document | <html> | <head> | <body> | <h1> | <h2> #data <a><p><a></a></p></a> #errors Line: 1 Col: 3 Unexpected start tag (a). Expected DOCTYPE. Line: 1 Col: 9 Unexpected start tag (a) implies end tag (a). Line: 1 Col: 9 End tag (a) violates step 1, paragraph 3 of the adoption agency algorithm. Line: 1 Col: 21 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm. #document | <html> | <head> | <body> | <a> | <p> | <a> | <a> #data <b><button></b></button></b> #errors Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE. Line: 1 Col: 15 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm. #document | <html> | <head> | <body> | <b> | <button> | <b> #data <p><b><div><marquee></p></b></div> #errors Line: 1 Col: 3 Unexpected start tag (p). Expected DOCTYPE. Line: 1 Col: 11 Unexpected end tag (p). Ignored. Line: 1 Col: 24 Unexpected end tag (p). Ignored. Line: 1 Col: 28 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm. Line: 1 Col: 34 End tag (div) seen too early. Expected other end tag. Line: 1 Col: 34 Expected closing tag. Unexpected end of file. #document | <html> | <head> | <body> | <p> | <b> | <div> | <b> | <marquee> | <p> #data <script></script></div><title>

    #errors Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. Line: 1 Col: 23 Unexpected end tag (div). Ignored. #document | | |


    | | |

    #data #errors Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE. Line: 1 Col: 10 Expected closing tag. Unexpected end of file. #document | | | charm-2.1.1/src/golang.org/x/net/html/testdata/webkit/entities01.dat0000664000175000017500000001342112672604440024203 0ustar marcomarco#data FOO>BAR #errors #document | | | | "FOO>BAR" #data FOO>BAR #errors #document | | | | "FOO>BAR" #data FOO> BAR #errors #document | | | | "FOO> BAR" #data FOO>;;BAR #errors #document | | | | "FOO>;;BAR" #data I'm ¬it; I tell you #errors #document | | | | "I'm ¬it; I tell you" #data I'm ∉ I tell you #errors #document | | | | "I'm ∉ I tell you" #data FOO& BAR #errors #document | | | | "FOO& BAR" #data FOO& #errors #document | | | | "FOO&" | #data FOO&&&>BAR #errors #document | | | | "FOO&&&>BAR" #data FOO)BAR #errors #document | | | | "FOO)BAR" #data FOOABAR #errors #document | | | | "FOOABAR" #data FOOABAR #errors #document | | | | "FOOABAR" #data FOO&#BAR #errors #document | | | | "FOO&#BAR" #data FOO&#ZOO #errors #document | | | | "FOO&#ZOO" #data FOOºR #errors #document | | | | "FOOºR" #data FOO&#xZOO #errors #document | | | | "FOO&#xZOO" #data FOO&#XZOO #errors #document | | | | "FOO&#XZOO" #data FOO)BAR #errors #document | | | | "FOO)BAR" #data FOO䆺R #errors #document | | | | "FOO䆺R" #data FOOAZOO #errors #document | | | | "FOOAZOO" #data FOO�ZOO #errors #document | | | | "FOO�ZOO" #data FOOxZOO #errors #document | | | | "FOOxZOO" #data FOOyZOO #errors #document | | | | "FOOyZOO" #data FOO€ZOO #errors #document | | | | "FOO€ZOO" #data FOOZOO #errors #document | | | | "FOOÂZOO" #data FOO‚ZOO #errors #document | | | | "FOO‚ZOO" #data FOOƒZOO #errors #document | | | | "FOOÆ’ZOO" #data FOO„ZOO #errors #document | | | | "FOO„ZOO" #data FOO…ZOO #errors #document | | | | "FOO…ZOO" #data FOO†ZOO #errors #document | | | | "FOO†ZOO" #data FOO‡ZOO #errors #document | | | | "FOO‡ZOO" #data FOOˆZOO #errors #document | | | | "FOOˆZOO" #data FOO‰ZOO #errors #document | | | | "FOO‰ZOO" #data FOOŠZOO #errors #document | | | | "FOOÅ ZOO" #data FOO‹ZOO #errors #document | | | | "FOO‹ZOO" #data FOOŒZOO #errors #document | | | | "FOOÅ’ZOO" #data FOOZOO #errors #document | | | | "FOOÂZOO" #data FOOŽZOO #errors #document | | | | "FOOŽZOO" #data FOOZOO #errors #document | | | | "FOOÂZOO" #data FOOZOO #errors #document | | | | "FOOÂZOO" #data FOO‘ZOO #errors #document | | | | "FOO‘ZOO" #data FOO’ZOO #errors #document | | | | "FOO’ZOO" #data FOO“ZOO #errors #document | | | | "FOO“ZOO" #data FOO”ZOO #errors #document | | | | "FOOâ€ZOO" #data FOO•ZOO #errors #document | | | | "FOO•ZOO" #data FOO–ZOO #errors #document | | | | "FOO–ZOO" #data FOO—ZOO #errors #document | | | | "FOO—ZOO" #data FOO˜ZOO #errors #document | | | | "FOOËœZOO" #data FOO™ZOO #errors #document | | | | "FOOâ„¢ZOO" #data FOOšZOO #errors #document | | | | "FOOÅ¡ZOO" #data FOO›ZOO #errors #document | | | | "FOO›ZOO" #data FOOœZOO #errors #document | | | | "FOOÅ“ZOO" #data FOOZOO #errors #document | | | | "FOOÂZOO" #data FOOžZOO #errors #document | | | | "FOOžZOO" #data FOOŸZOO #errors #document | | | | "FOOŸZOO" #data FOO ZOO #errors #document | | | | "FOO ZOO" #data FOO퟿ZOO #errors #document | | | | "FOO퟿ZOO" #data FOO�ZOO #errors #document | | | | "FOO�ZOO" #data FOO�ZOO #errors #document | | | | "FOO�ZOO" #data FOO�ZOO #errors #document | | | | "FOO�ZOO" #data FOO�ZOO #errors #document | | | | "FOO�ZOO" #data FOOZOO #errors #document | | | | "FOOZOO" #data FOO􏿾ZOO #errors #document | | | | "FOOô¿¾ZOO" #data FOO􈟔ZOO #errors #document | | | | "FOOôˆŸ”ZOO" #data FOO􏿿ZOO #errors #document | | | | "FOOô¿¿ZOO" #data FOO�ZOO #errors #document | | | | "FOO�ZOO" #data FOO�ZOO #errors #document | | | | "FOO�ZOO" charm-2.1.1/src/golang.org/x/net/html/testdata/webkit/tests17.dat0000664000175000017500000000372112672604440023532 0ustar marcomarco#data

    #errors #document | | | | |
    | | #data
    #errors #document | | | | | | | |
    #data
    #errors #document | | | | | | | |
    | #data
    #errors #document | | | | | | | | #errors #document-fragment tbody #document | | #data | | |
    | #data #errors #document | | | | |
    | | #data #errors #document | | | | | #errors #document | | | | | #errors #document | | | | | #errors #document | | | | | #errors #document-fragment colgroup #document | #data #errors #document-fragment colgroup #document | #data #errors #document-fragment tbody #document | #data #errors #document-fragment tbody #document | #data #errors #document-fragment tbody #document | #data #errors #document-fragment tbody #document | #data #errors #document-fragment tbody #document | #data
    |
    #errors #document | | | | | #errors #document | | | | |
    #errors #document | | | | |
    a #errors #document | | | | | | | | "a" charm-2.1.1/src/golang.org/x/net/html/testdata/webkit/pending-spec-changes.dat0000664000175000017500000000245512672604440026205 0ustar marcomarco#data #errors 21: Start tag seen without seeing a doctype first. Expected “â€. 31: “frameset†start tag seen. 31: End of file seen and there were open elements. #document | | | #data
    foo
    bar #errors 47: End tag “table†did not match the name of the current open element (“svgâ€). 47: “table†closed but “caption†was still open. 47: End tag “table†seen, but there were open elements. 36: Unclosed element “svgâ€. #document | | | | | | #errors #document-fragment caption #document | #data #errors #document-fragment caption #document | | #data #errors #document-fragment caption #document | | #data #errors #document-fragment caption #document | | #data #errors #document-fragment caption #document | | #data #errors #document-fragment caption #document | | #data #errors #document-fragment caption #document | | #data #errors #document-fragment caption #document | | #data #errors #document-fragment caption #document | | #data
    | | "foo" | "bar" #data
    #errors 7: Start tag seen without seeing a doctype first. Expected “â€. 30: A table cell was implicitly closed, but there were open elements. 26: Unclosed element “descâ€. 20: Unclosed element “svgâ€. 37: Stray end tag “descâ€. 45: End of file seen and there were open elements. 45: Unclosed element “circleâ€. 7: Unclosed element “tableâ€. #document | | | | | | | #errors #document-fragment table #document | | #data #errors #document-fragment table #document | #data #errors #document-fragment table #document | #data #errors #document-fragment table #document | | | #data #errors #document-fragment table #document | | | #data #errors #document-fragment table #document | | | #data #errors #document-fragment table #document | | | #data #errors #document-fragment table #document | | | #data | | | | #errors #document-fragment caption #document |
    | | | | �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������charm-2.1.1/src/golang.org/x/net/html/testdata/webkit/isindex.dat�����������������������������������0000664�0001750�0001750�00000001074�12672604440�023662� 0����������������������������������������������������������������������������������������������������ustar �marco���������������������������marco������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������#data #errors #document | | | |
    |
    |
    a #errors #document-fragment table #document | | | "a" #data
    #errors #document-fragment table #document | |
    #data #errors #document-fragment table #document | |
    #data
    #data
    #errors #document-fragment caption #document | #data
    #errors #document-fragment caption #document | #data
    #errors #document-fragment caption #document | | #data
    #errors #document-fragment caption #document | | #data
    #errors #document-fragment caption #document | | #data
    #errors #document-fragment caption #document | | #data
    #errors #document-fragment tbody #document | #data
    #errors #document-fragment tbody #document | #data
    #errors #document-fragment tbody #document | |
    #data #errors #document-fragment tbody #document | |
    #data #errors #document-fragment tbody #document | |
    #data #errors #document-fragment tbody #document | |
    | | | | #data
    #errors #document-fragment tr #document | #data #errors #document-fragment tr #document |
    | | | | | #data
    #errors #document-fragment tr #document | #data
    #errors #document-fragment tr #document | #data
    #errors #document-fragment tr #document | #data
    #errors #document-fragment tr #document | #data
    #errors #document-fragment tr #document | #data
    #errors #document-fragment tr #document | #data
    #errors #document-fragment tr #document | #data
    #errors #document-fragment tr #document | #data
    #errors #document-fragment tr #document | | | #errors #document-fragment td #document | #data #errors #document-fragment td #document | #data #errors #document-fragment td #document | #data #errors #document-fragment td #document | #data #errors #document-fragment td #document | #data #errors #document-fragment td #document | #data
    #data
    #errors #document-fragment tr #document | | | #errors #document-fragment td #document | #data #errors #document-fragment td #document | #data #errors #document-fragment td #document | #data #errors #document-fragment td #document | #data #errors #document-fragment td #document | #data #errors #document-fragment td #document | #data
    #data
    #errors #document-fragment td #document | #data
    #errors #document-fragment td #document | #data
    #errors #document-fragment td #document | #data
    #errors #document-fragment td #document | | | |
    | #data
    ` var helloWorld = []byte(`package main import ( "math/rand" "os" "time" "github.com/ajstarks/svgo" ) func ri(n int) int { return rand.Intn(n) } func main() { canvas := svg.New(os.Stdout) width := 500 height := 500 nstars := 250 style := "font-size:48pt;fill:white;text-anchor:middle" rand.Seed(time.Now().Unix()) canvas.Start(width, height) canvas.Rect(0,0,width,height) for i := 0; i < nstars; i++ { canvas.Circle(ri(width), ri(height), ri(3), "fill:white") } canvas.Circle(width/2, height, width/2, "fill:rgb(77, 117, 232)") canvas.Text(width/2, height*4/5, "hello, world", style) canvas.End() }`) charm-2.1.1/src/github.com/ajstarks/svgo/html5logo/0000775000175000017500000000000012672604563021146 5ustar marcomarcocharm-2.1.1/src/github.com/ajstarks/svgo/html5logo/html5logo.go0000664000175000017500000000307412672604563023413 0ustar marcomarco// html5logo draws the w3c HTML5 logo, with scripting added // +build !appengine package main import ( "github.com/ajstarks/svgo" "os" ) func main() { // HTML5 logo data from // "Understanding and Optimizing Web Graphics", Session 508, // Dean Jackson, Apple WWDC 2011 // // Draggable elements via Jeff Schiller's dragsvg Javascript library // shield var sx = []int{71, 30, 481, 440, 255} var sy = []int{460, 0, 0, 460, 512} // highlight var hx = []int{256, 405, 440, 256} var hy = []int{472, 431, 37, 37} // "five" var fx = []int{181, 176, 392, 393, 396, 397, 114, 115, 129, 325, 318, 256, 192, 188, 132, 139, 256, 371, 372, 385, 387, 371} var fy = []int{208, 150, 150, 138, 109, 94, 94, 109, 265, 265, 338, 355, 338, 293, 293, 382, 414, 382, 372, 223, 208, 208} canvas := svg.New(os.Stdout) width := 512 height := 512 // begin the document with the onload event, and namespace for dragging canvas.Start(width, height, `onload="initializeDraggableElements();"`, `xmlns:drag="http://www.codedread.com/dragsvg"`) canvas.Title("HTML5 Logo") canvas.Rect(0, 0, width, height) // black background canvas.Script("application/javascript", "http://www.codedread.com/dragsvg.js") // reference the drag script canvas.Polygon(sx, sy, `drag:enable="true"`, canvas.RGB(227, 79, 38)) // draggable shield canvas.Polygon(hx, hy, `drag:enable="true"`, canvas.RGBA(255, 255, 255, 0.3)) // draggable highlight canvas.Polygon(fx, fy, `drag:enable="true"`, canvas.RGB(219, 219, 219)) // draggable five canvas.End() } charm-2.1.1/src/github.com/ajstarks/svgo/barchart/0000775000175000017500000000000012672604563021022 5ustar marcomarcocharm-2.1.1/src/github.com/ajstarks/svgo/barchart/barchart.go0000775000175000017500000002711112672604563023144 0ustar marcomarco// barchart - bar chart package main import ( "encoding/xml" "flag" "fmt" "github.com/ajstarks/svgo" "io" "math" "os" "strconv" "strings" ) var ( width, height, iscale, fontsize, barheight, gutter, cornerRadius, labelimit int bgcolor, barcolor, title, inbar, valformat string showtitle, showdata, showgrid, showscale, endtitle, trace bool ) const ( gstyle = "font-family:Calibri,sans-serif;font-size:%dpx" borderstyle = "stroke:lightgray;stroke-width:1px" scalestyle = "text-anchor:middle;font-size:75%" btitlestyle = "font-style:italic;font-size:150%;text-anchor:" notestyle = "font-style:italic;text-anchor:" datastyle = "text-anchor:end;fill:" titlestyle = "text-anchor:start;font-size:300%" labelstyle = "fill:black;baseline-shift:-25%" ) // a Barchart Defintion // // This is a note // More expository text // // // // // // type Barchart struct { Top int `xml:"top,attr"` Left int `xml:"left,attr"` Right int `xml:"right,attr"` Title string `xml:"title,attr"` Bdata []bdata `xml:"bdata"` Note []note `xml:"note"` } type bdata struct { Title string `xml:"title,attr"` Scale string `xml:"scale,attr"` Color string `xml:"color,attr"` Unit string `xml:"unit,attr"` Showdata bool `xml:"showdata,attr"` Showgrid bool `xml:"showgrid,attr"` Samebar bool `xml:"samebar,attr"` Bitem []bitem `xml:"bitem"` Bstack []bstack `xml:"bstack"` Note []note `xml:"note"` } type bitem struct { Name string `xml:"name,attr"` Value float64 `xml:"value,attr"` Color string `xml:"color,attr"` Samebar bool `xml:"samebar,attr"` } type bstack struct { Name string `xml:"name,attr"` Value string `xml:"value,attr"` Color string `xml:"color,attr"` } type note struct { Text string `xml:",chardata"` } // dobc does file i/o func dobc(location string, s *svg.SVG) { var f *os.File var err error if len(location) > 0 { f, err = os.Open(location) } else { f = os.Stdin } if err == nil { readbc(f, s) f.Close() } else { fmt.Fprintf(os.Stderr, "%v\n", err) } } // readbc reads and parses the XML specification func readbc(r io.Reader, s *svg.SVG) { var bc Barchart if err := xml.NewDecoder(r).Decode(&bc); err == nil { drawbc(bc, s) } else { fmt.Fprintf(os.Stderr, "%v\n", err) } } // drawbc draws the bar chart func drawbc(bg Barchart, canvas *svg.SVG) { if bg.Left == 0 { bg.Left = 250 } if bg.Right == 0 { bg.Right = 50 } if bg.Top == 0 { bg.Top = 50 } if len(title) > 0 { bg.Title = title } labelimit = bg.Left/8 cr := cornerRadius maxwidth := width - (bg.Left + bg.Right) x := bg.Left y := bg.Top sep := 10 color := barcolor scfmt := "%v" canvas.Title(bg.Title) // for each bdata element... for _, b := range bg.Bdata { if trace { fmt.Fprintf(os.Stderr, "# %s\n", b.Title) } // overide the color if specified if len(b.Color) > 0 { color = b.Color } else { color = barcolor } // extract the scale data from the XML attributes // if not specified, compute the scale factors sc := strings.Split(b.Scale, ",") var scalemin, scalemax, scaleincr float64 if len(sc) != 3 { if len(b.Bitem) > 0 { scalemin, scalemax, scaleincr = scalevalues(b.Bitem) } if len(b.Bstack) > 0 { scalemin, scalemax, scaleincr = scalestack(b.Bstack) } } else { scalemin, _ = strconv.ParseFloat(sc[0], 64) scalemax, _ = strconv.ParseFloat(sc[1], 64) scaleincr, _ = strconv.ParseFloat(sc[2], 64) } // label the graph canvas.Text(x, y, b.Title, btitlestyle+anchor()) y += sep * 2 chartop := y // draw the data items canvas.Gstyle(datastyle + color) // stacked bars for _, stack := range b.Bstack { if trace { fmt.Fprintf(os.Stderr, "%s~%s\n", stack.Value, stack.Name) } stackdata := stackvalues(stack.Value) if len(stackdata) < 1 { continue } sx := x canvas.Text(x-sep, y+barheight/2, textlimit(stack.Name, labelimit), labelstyle) barop := colorange(1.0, 0.3, len(stackdata)) for ns, sd := range stackdata { dw := vmap(sd, scalemin, scalemax, 0, float64(maxwidth)) if len(stack.Color) > 0 { canvas.Roundrect(sx, y, int(dw), barheight, cr, cr, fmt.Sprintf("fill:%s;fill-opacity:%.2f", stack.Color, barop[ns])) } else { canvas.Roundrect(sx, y, int(dw), barheight, cr, cr, fmt.Sprintf("fill-opacity:%.2f", barop[ns])) } if (showdata || b.Showdata) && sd > 0 { var valuestyle = "fill-opacity:1;font-style:italic;font-size:75%;text-anchor:middle;baseline-shift:-25%;" var ditem string var datax int if len(b.Unit) > 0 { ditem = fmt.Sprintf(valformat+"%s", sd, b.Unit) } else { ditem = fmt.Sprintf(valformat, sd) } if len(inbar) > 0 { valuestyle += inbar } else { valuestyle += "fill:black" } datax = sx + int(dw)/2 canvas.Text(datax, y+barheight/2, ditem, valuestyle) } sx += int(dw) } y += barheight + gutter } // plain bars for _, d := range b.Bitem { if trace { fmt.Fprintf(os.Stderr, "%.2f~%s\n", d.Value, d.Name) } canvas.Text(x-sep, y+barheight/2, textlimit(d.Name, labelimit), labelstyle) dw := vmap(d.Value, scalemin, scalemax, 0, float64(maxwidth)) var barop float64 if b.Samebar { barop = 0.3 } else { barop = 1.0 } if len(d.Color) > 0 { canvas.Roundrect(x, y, int(dw), barheight, cr, cr, fmt.Sprintf("fill:%s;fill-opacity:%.2f", d.Color, barop)) } else { canvas.Roundrect(x, y, int(dw), barheight, cr, cr, fmt.Sprintf("fill-opacity:%.2f", barop)) } if showdata || b.Showdata { var valuestyle = "fill-opacity:1;font-style:italic;font-size:75%;text-anchor:start;baseline-shift:-25%;" var ditem string var datax int if len(b.Unit) > 0 { ditem = fmt.Sprintf(valformat+"%s", d.Value, b.Unit) } else { ditem = fmt.Sprintf(valformat, d.Value) } if len(inbar) > 0 { valuestyle += inbar datax = x + fontsize/2 } else { valuestyle += "fill:black" datax = x + int(dw) + fontsize/2 } canvas.Text(datax, y+barheight/2, ditem, valuestyle) } if !d.Samebar { y += barheight + gutter } } canvas.Gend() // draw the scale and borders chartbot := y + gutter if showgrid || b.Showgrid { canvas.Line(x, chartop, x+maxwidth, chartop, borderstyle) // top border canvas.Line(x, chartbot-gutter, x+maxwidth, chartbot-gutter, borderstyle) // bottom border } if showscale { if scaleincr < 1 { scfmt = "%.1f" } else { scfmt = "%0.f" } canvas.Gstyle(scalestyle) for sc := scalemin; sc <= scalemax; sc += scaleincr { scx := vmap(sc, scalemin, scalemax, 0, float64(maxwidth)) canvas.Text(x+int(scx), chartbot+fontsize, fmt.Sprintf(scfmt, sc)) if showgrid || b.Showgrid { canvas.Line(x+int(scx), chartbot, x+int(scx), chartop, borderstyle) // grid line } } canvas.Gend() } // apply the note if present if len(b.Note) > 0 { canvas.Gstyle(notestyle + anchor()) y += fontsize * 2 leading := 3 for _, note := range b.Note { canvas.Text(bg.Left, y, note.Text) y += fontsize + leading } canvas.Gend() } y += sep * 7 // advance vertically for the next chart } // if requested, place the title below the last chart if showtitle && len(bg.Title) > 0 { y += fontsize * 2 canvas.Text(bg.Left, y, bg.Title, titlestyle) } // apply overall note if present if len(bg.Note) > 0 { canvas.Gstyle(notestyle + anchor()) y += fontsize * 2 leading := 3 for _, note := range bg.Note { canvas.Text(bg.Left, y, note.Text) y += fontsize + leading } canvas.Gend() } } func anchor() string { if endtitle { return "end" } return "start" } // vmap maps one interval to another func vmap(value float64, low1 float64, high1 float64, low2 float64, high2 float64) float64 { return low2 + (high2-low2)*(value-low1)/(high1-low1) } // maxitem finds the maxima is a collection of bar items func maxitem(data []bitem) float64 { max := -math.SmallestNonzeroFloat64 for _, d := range data { if d.Value > max { max = d.Value } } return max } // maxstack finds the maxima is a stack of bars func maxstack(stacks []bstack) float64 { max := -math.SmallestNonzeroFloat64 for _, s := range stacks { sv := stackvalues(s.Value) sum := 0.0 for _, d := range sv { sum += d } if sum > max { max = sum } } return max } // scale values returns the min, max, increment from a set of bar items func scalevalues(data []bitem) (float64, float64, float64) { var m, max, increment float64 rui := 5 m = maxitem(data) max = roundup(m, 100) if max > 2 { increment = roundup(max/float64(rui), 10) } else { increment = 0.4 } return 0, max, increment } // scalestack returns the min, max, increment from a stack of bars func scalestack(data []bstack) (float64, float64, float64) { var m, max, increment float64 rui := 5 m = maxstack(data) max = roundup(m, 100) if max > 2 { increment = roundup(max/float64(rui), 10) } else { increment = 0.4 } return 0, max, increment } // roundup rouds a floating point number up func roundup(n float64, m int) float64 { i := int(n) if i <= 2 { return 2 } for ; i%m != 0; i++ { } return float64(i) } // stack value returns the values from the value string of a stack func stackvalues(s string) []float64 { v := strings.Split(s, "/") if len(v) <= 0 { return nil } vals := make([]float64, len(v)) for i, x := range v { f, err := strconv.ParseFloat(x, 64) if err != nil { vals[i] = 0 } else { vals[i] = f } } return vals } // colorange evenly distributes opacity across a range of values func colorange(start, end float64, n int) []float64 { v := make([]float64, n) v[0] = start v[n-1] = end if n == 2 { return v } incr := (end-start)/float64(n-1) for i:=1; i < n-1; i++ { v[i] = v[i-1] + incr } return v } func textlimit(s string, n int) string { l := len(s) if l <= n { return s } return s[0:n-3]+"..." } // init sets up the command flags func init() { flag.StringVar(&bgcolor, "bg", "white", "background color") flag.StringVar(&barcolor, "bc", "rgb(200,200,200)", "bar color") flag.StringVar(&valformat, "vfmt", "%v", "value format") flag.IntVar(&width, "w", 1024, "width") flag.IntVar(&height, "h", 800, "height") flag.IntVar(&barheight, "bh", 20, "bar height") flag.IntVar(&gutter, "g", 5, "gutter") flag.IntVar(&cornerRadius, "cr", 0, "corner radius") flag.IntVar(&fontsize, "f", 18, "fontsize (px)") flag.BoolVar(&showscale, "showscale", true, "show scale") flag.BoolVar(&showgrid, "showgrid", false, "show grid") flag.BoolVar(&showdata, "showdata", false, "show data values") flag.BoolVar(&showtitle, "showtitle", false, "show title") flag.BoolVar(&endtitle, "endtitle", false, "align title to the end") flag.BoolVar(&trace, "trace", false, "show name/value pairs") flag.StringVar(&inbar, "inbar", "", "data in bar format") flag.StringVar(&title, "t", "", "title") } // for every input file (or stdin), draw a bar graph // as specified by command flags func main() { flag.Parse() canvas := svg.New(os.Stdout) canvas.Start(width, height) canvas.Rect(0, 0, width, height, "fill:"+bgcolor) canvas.Gstyle(fmt.Sprintf(gstyle, fontsize)) if len(flag.Args()) == 0 { dobc("", canvas) } else { for _, f := range flag.Args() { dobc(f, canvas) } } canvas.Gend() canvas.End() } charm-2.1.1/src/github.com/ajstarks/svgo/skewabc/0000775000175000017500000000000012672604563020653 5ustar marcomarcocharm-2.1.1/src/github.com/ajstarks/svgo/skewabc/skewabc.go0000664000175000017500000000144412672604563022624 0ustar marcomarco// skewabc - exercise the skew functions // +build !appengine package main import ( "fmt" "os" "github.com/ajstarks/svgo" ) var ( g = svg.New(os.Stdout) width = 500 height = 500 ) func sky(x, y, w, h int, a float64, s string) { g.Gstyle(fmt.Sprintf("font-family:sans-serif;font-size:%dpx;text-anchor:middle", w/2)) g.SkewY(a) g.Rect(x, y, w, h, `fill:black; fill-opacity:0.3`) g.Text(x+w/2, y+h/2, s, `fill:white;baseline-shift:-33%`) g.Gend() g.Gend() } func main() { g.Start(width, height) g.Title("Skew") g.Rect(0, 0, width, height, "fill:white") g.Grid(0, 0, width, height, 50, "stroke:lightblue") sky(100, 100, 100, 100, 30, "A") sky(200, 332, 100, 100, -30, "B") sky(300, -15, 100, 100, 30, "C") g.End() } charm-2.1.1/src/github.com/ajstarks/svgo/ltr/0000775000175000017500000000000012672604563020035 5ustar marcomarcocharm-2.1.1/src/github.com/ajstarks/svgo/ltr/ltr.go0000664000175000017500000000755412672604563021200 0ustar marcomarco// ltr: Layer Tennis remixes package main import ( "flag" "fmt" "os" "github.com/ajstarks/svgo" ) var ( canvas = svg.New(os.Stdout) poster, opacity, row, col, offset bool title string width, height int ) const ( stdwidth = 900 stdheight = 280 ni = 11 ) // imagefiles returns a list of files in the specifed directory // or nil on error. Each file includes the prepended directory name func imagefiles(directory string) []string { f, ferr := os.Open(directory) if ferr != nil { return nil } defer f.Close() files, derr := f.Readdir(-1) if derr != nil || len(files) == 0 { return nil } names := make([]string, len(files)) for i, v := range files { names[i] = directory + "/" + v.Name() } return names } // ltposter creates poster style: a title, followed by a list // of volleys func ltposter(x, y, w, h int, f []string) { canvas.Image(x, y, w*2, h*2, f[0]) // first file, assumed to be the banner y = y + (h * 2) for i := 1; i < len(f); i += 2 { canvas.Image(x, y, w, h, f[i]) canvas.Image(x+w, y, w, h, f[i+1]) if i%2 == 1 { y += h } } } // ltcol creates a single column of volley images func ltcol(x, y, w, h int, f []string) { for i := 0; i < len(f); i++ { canvas.Image(x, y, w, h, f[i]) y += h } } // ltop creates a view with each volley stacked together with // semi-transparent opacity func ltop(x, y, w, h int, f []string) { for i := 1; i < len(f); i++ { // skip the first file, assumed to be the banner canvas.Image(x, y, w, h, f[i], "opacity:0.2") } } // ltrow creates a row-wise view of volley images. func ltrow(x, y, w, h int, f []string) { for i := 0; i < len(f); i++ { canvas.Image(x, y, w, h, f[i]) x += w } } // ltoffset creates a view where each volley is offset from its opposing volley. func ltoffset(x, y, w, h int, f []string) { for i := 1; i < len(f); i++ { // skip the first file, assumed to be the banner if i%2 == 0 { x += w } else { x = 0 } canvas.Image(x, y, w, h, f[i]) y += h } } // dotitle creates the title func dotitle(s string) { if len(title) > 0 { canvas.Title(title) } else { canvas.Title(s) } } // init sets up the command line flags. func init() { flag.BoolVar(&poster, "poster", false, "poster style") flag.BoolVar(&opacity, "opacity", false, "opacity style") flag.BoolVar(&row, "row", false, "display is a single row") flag.BoolVar(&col, "col", false, "display in a single column") flag.BoolVar(&offset, "offset", false, "display in a row, even layers offset") flag.IntVar(&width, "width", stdwidth, "image width") flag.IntVar(&height, "height", stdheight, "image height") flag.StringVar(&title, "title", "", "title") flag.Parse() } func main() { x := 0 y := 0 nd := len(flag.Args()) for i, dir := range flag.Args() { filelist := imagefiles(dir) if len(filelist) != ni || filelist == nil { fmt.Fprintf(os.Stderr, "in the %s directory, need %d images, read %d\n", dir, ni, len(filelist)) continue } switch { case opacity: if i == 0 { canvas.Start(width*nd, height*nd) dotitle(dir) } ltop(x, y, width, height, filelist) y += height case poster: if i == 0 { canvas.Start(width, ((height*(ni-1)/4)+height)*nd) dotitle(dir) } ltposter(x, y, width/2, height/2, filelist) y += (height * 3) + (height / 2) case col: if i == 0 { canvas.Start(width*nd, height*ni) dotitle(dir) } ltcol(x, y, width, height, filelist) x += width case row: if i == 0 { canvas.Start(width*ni, height*nd) dotitle(dir) } ltrow(x, y, width, height, filelist) y += height case offset: n := ni - 1 pw := width * 2 ph := nd * (height * (n)) if i == 0 { canvas.Start(pw, ph) canvas.Rect(0, 0, pw, ph, "fill:white") dotitle(dir) } ltoffset(x, y, width, height, filelist) y += n * height } } canvas.End() } charm-2.1.1/src/github.com/ajstarks/svgo/benchviz/0000775000175000017500000000000012672604563021044 5ustar marcomarcocharm-2.1.1/src/github.com/ajstarks/svgo/benchviz/benchviz.go0000664000175000017500000001412112672604563023202 0ustar marcomarco// benchviz: visualize benchmark data from benchcmp package main import ( "bufio" "bytes" "flag" "fmt" "io" "math" "os" "strconv" "strings" "github.com/ajstarks/svgo" ) // geometry defines the layout of the visualization type geometry struct { top, left, width, height, vwidth, vp, barHeight int dolines, coldata bool title, rcolor, scolor, style string deltamax, speedupmax float64 } // process reads the input and calls the visualization function func process(canvas *svg.SVG, filename string, g geometry) int { if filename == "" { return g.visualize(canvas, filename, os.Stdin) } f, err := os.Open(filename) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) return 0 } defer f.Close() return g.visualize(canvas, filename, f) } // vmap maps world to canvas coordinates func vmap(value, low1, high1, low2, high2 float64) float64 { return low2 + (high2-low2)*(value-low1)/(high1-low1) } // visualize performs the visualization of the input, reading a line a time func (g *geometry) visualize(canvas *svg.SVG, filename string, f io.Reader) int { var ( err error line, vs, bmtitle string dmin, dmax float64 ) bh := g.barHeight vizwidth := g.vwidth vspacing := g.barHeight + (g.barHeight / 3) // vertical spacing bmtype := "delta" in := bufio.NewReader(f) canvas.Gstyle(fmt.Sprintf("font-size:%dpx;font-family:sans-serif", bh)) if g.title == "" { bmtitle = filename } else { bmtitle = g.title } canvas.Text(g.left, g.top, bmtitle, "font-size:150%") height := 0 for x, y, nr := g.left+g.vp, g.top+vspacing, 0; err == nil; nr++ { line, err = in.ReadString('\n') fields := strings.Split(strings.TrimSpace(line), ` `) if len(fields) <= 1 || len(line) < 2 { continue } name := fields[0] value := fields[len(fields)-1] if len(value) > 2 { vs = value[:len(value)-1] } v, _ := strconv.ParseFloat(vs, 64) av := math.Abs(v) switch { case strings.HasPrefix(value, "delt"): bmtype = "delta" dmin = 0.0 dmax = g.deltamax // 100.0 y += vspacing * 2 continue case strings.HasPrefix(value, "speed"): bmtype = "speedup" dmin = 0.0 dmax = g.speedupmax // 10.0 y += vspacing * 2 continue case strings.HasPrefix(name, "#"): y += vspacing canvas.Text(g.left, y, line[1:], "font-style:italic;fill:gray") continue } bw := int(vmap(av, dmin, dmax, 0, float64(vizwidth))) switch g.style { case "bar": g.bars(canvas, x, y, bw, bh, vspacing/2, bmtype, name, value, v) case "inline": g.inline(canvas, g.left, y, bw, bh, bmtype, name, value, v) default: g.bars(canvas, x, y, bw, bh, vspacing/2, bmtype, name, value, v) } y += vspacing height = y } canvas.Gend() return height } // inline makes the inline style pf visualization func (g *geometry) inline(canvas *svg.SVG, x, y, w, h int, bmtype, name, value string, v float64) { var color string switch bmtype { case "delta": if v > 0 { color = g.rcolor } else { color = g.scolor } case "speedup": if v < 1.0 { color = g.rcolor } else { color = g.scolor } } canvas.Text(x-10, y, value, "text-anchor:end") canvas.Text(x, y, name) canvas.Rect(x, y-h, w, h, "fill-opacity:0.3;fill:"+color) } // bars creates barchart style visualization func (g *geometry) bars(canvas *svg.SVG, x, y, w, h, vs int, bmtype, name, value string, v float64) { canvas.Gstyle("font-style:italic;font-size:75%") toffset := h / 4 var tx int var tstyle string switch bmtype { case "delta": if v > 0 { canvas.Rect(x-w, y-h/2, w, h, "fill-opacity:0.3;fill:"+g.rcolor) tx = x - w - toffset tstyle = "text-anchor:end" } else { canvas.Rect(x, y-h/2, w, h, "fill-opacity:0.3;fill:"+g.scolor) tx = x + w + toffset tstyle = "text-anchor:start" } case "speedup": if v < 1.0 { canvas.Rect(x-w, y-h/2, w, h, "fill-opacity:0.3;fill:"+g.rcolor) tx = x - w - toffset tstyle = "text-anchor:end" } else { canvas.Rect(x, y-h/2, w, h, "fill-opacity:0.3;fill:"+g.scolor) tx = x + w + toffset tstyle = "text-anchor:start" } } if g.coldata { canvas.Text(x-toffset, y+toffset, value, "text-anchor:end") } else { canvas.Text(tx, y+toffset, value, tstyle) } canvas.Gend() canvas.Text(g.left, y+(h/2), name, "text-anchor:start") if g.dolines { canvas.Line(g.left, y+vs, g.left+(g.width-g.left), y+vs, "stroke:lightgray;stroke-width:1") } } func main() { var ( width = flag.Int("w", 1024, "width") top = flag.Int("top", 50, "top") left = flag.Int("left", 100, "left margin") vp = flag.Int("vp", 512, "visualization point") vw = flag.Int("vw", 300, "visual area width") bh = flag.Int("bh", 20, "bar height") smax = flag.Float64("sm", 10, "maximum speedup") dmax = flag.Float64("dm", 100, "maximum delta") title = flag.String("title", "", "title") speedcolor = flag.String("scolor", "green", "speedup color") regresscolor = flag.String("rcolor", "red", "regression color") style = flag.String("style", "bar", "set the style (bar or inline)") lines = flag.Bool("line", false, "show lines between entries") coldata = flag.Bool("col", false, "show data in a single column") ) flag.Parse() g := geometry{ width: *width, top: *top, left: *left, vp: *vp, vwidth: *vw, barHeight: *bh, title: *title, scolor: *speedcolor, rcolor: *regresscolor, style: *style, dolines: *lines, coldata: *coldata, speedupmax: *smax, deltamax: *dmax, } // For every named file or stdin, render the SVG in memory, accumulating the height. var b bytes.Buffer canvas := svg.New(&b) height := 0 if len(flag.Args()) > 0 { for _, f := range flag.Args() { height = process(canvas, f, g) g.top = height + 50 } } else { height = process(canvas, "", g) } g.height = height + 15 // Write the rendered SVG to stdout out := svg.New(os.Stdout) out.Start(g.width, g.height) out.Rect(0, 0, g.width, g.height, "fill:white;stroke-width:2px;stroke:lightgray") b.WriteTo(os.Stdout) out.End() } charm-2.1.1/src/github.com/ajstarks/svgo/stockproduct/0000775000175000017500000000000012672604563021760 5ustar marcomarcocharm-2.1.1/src/github.com/ajstarks/svgo/stockproduct/stockproduct.go0000664000175000017500000001305212672604563025034 0ustar marcomarco// stockproduct draws a bar chart comparing stock price to products // +build !appengine package main import ( "encoding/xml" "flag" "fmt" "os" "github.com/ajstarks/svgo" ) // Parameters defines options type Parameters struct { showline, showimage, showproduct, showprice, showdate, showgrid bool x, y, w, h, width, height, spacing, fontsize, dot int minvalue, maxvalue, ginterval, opacity, rotatetext float64 barcolor string } // // // // // // // // // StockProduct is the top-level drawing type StockProduct struct { Title string `xml:"title,attr"` Sdata []Sdata `xml:"sdata"` } // Sdata defines stock data type Sdata struct { Price float64 `xml:"price,attr"` Date string `xml:"date,attr"` Product string `xml:"product,attr"` Image string `xml:"image,attr"` } // vmap maps ranges func vmap(value float64, low1 float64, high1 float64, low2 float64, high2 float64) float64 { return low2 + (high2-low2)*(value-low1)/(high1-low1) } // barchart draws a chart from data read at location, on a SVG canvas // if the location is the empty string, read from standard input. // Data items are scaled according to the width, with parameters controlling the visibility // of lines, products, images, and dates func (p *Parameters) barchart(location string, canvas *svg.SVG) { var ( f *os.File err error sp StockProduct ) if len(location) > 0 { f, err = os.Open(location) } else { f = os.Stdin } if err != nil { fmt.Fprintln(os.Stderr, err) return } defer f.Close() if err := xml.NewDecoder(f).Decode(&sp); err != nil { fmt.Fprintln(os.Stderr, err) return } bottom := p.y + p.h interval := p.w / (len(sp.Sdata) - 1) bw := interval - p.spacing offset := 120 halfoffset := offset / 2 if bw < 2 { bw = 2 } canvas.Text(p.x, p.y-halfoffset, sp.Title, "font-size:400%") if p.showgrid { canvas.Gstyle("stroke:lightgray;stroke-width:1px") gx := p.x - (bw / 2) for i := p.maxvalue; i >= p.minvalue; i -= p.ginterval { yp := int(vmap(i, p.minvalue, p.maxvalue, float64(p.y), float64(bottom))) by := p.y + (bottom - yp) canvas.Line(gx, by, p.x+p.w+(bw/2), by) canvas.Text(gx-halfoffset, by, fmt.Sprintf("%.0f", i), "fill:black;stroke:none") } canvas.Gend() } canvas.Gstyle(fmt.Sprintf("stroke-opacity:%.2f;stroke:%s;stroke-width:%d;text-anchor:middle", p.opacity, p.barcolor, bw)) for _, d := range sp.Sdata { yp := int(vmap(d.Price, p.minvalue, p.maxvalue, float64(p.y), float64(bottom))) by := p.y + (bottom - yp) if p.showline { canvas.Line(p.x, bottom, p.x, by) } if p.dot > 0 { canvas.Circle(p.x, by, p.dot, fmt.Sprintf("stroke:none;fill-opacity:%.2f;fill:%s", p.opacity, p.barcolor)) } if p.showimage { if len(d.Image) > 0 { canvas.Image(p.x-bw/2, by-offset-2, bw, offset, d.Image) } } canvas.Gstyle("stroke:none;fill:black") if p.showproduct { if p.rotatetext != 0 { canvas.TranslateRotate(p.x, bottom+40, p.rotatetext) canvas.Text(0, 0, d.Product) canvas.Gend() } else { canvas.Text(p.x, bottom+40, d.Product) } } if p.showprice { canvas.Text(p.x, by, fmt.Sprintf("%.2f", d.Price), "font-weight:bold") } if p.showdate { canvas.Text(p.x, bottom+20, d.Date) } canvas.Gend() p.x += interval } canvas.Gend() } var param Parameters // set parameters according to command flags func init() { flag.BoolVar(¶m.showline, "line", true, "show lines") flag.BoolVar(¶m.showimage, "image", true, "show images") flag.BoolVar(¶m.showproduct, "product", true, "show products") flag.BoolVar(¶m.showprice, "price", true, "show prices") flag.BoolVar(¶m.showdate, "date", true, "show dates") flag.BoolVar(¶m.showgrid, "grid", true, "show grid") flag.IntVar(¶m.width, "w", 1600, "overall width") flag.IntVar(¶m.height, "h", 900, "overall height") flag.IntVar(¶m.x, "left", 150, "left") flag.IntVar(¶m.y, "top", 120, "top") flag.IntVar(¶m.w, "gw", 1400, "graph width") flag.IntVar(¶m.h, "gh", 700, "graph height") flag.IntVar(¶m.dot, "dot", 0, "dotsize") flag.IntVar(¶m.fontsize, "fs", 14, "font size (px)") flag.IntVar(¶m.spacing, "spacing", 15, "bar spacing") flag.Float64Var(¶m.maxvalue, "max", 400, "max value") flag.Float64Var(¶m.minvalue, "min", 0, "max value") flag.Float64Var(¶m.ginterval, "ginterval", 50, "max value") flag.Float64Var(¶m.opacity, "opacity", 0.5, "bar opacity") flag.Float64Var(¶m.rotatetext, "rt", 0, "rotate text") flag.StringVar(¶m.barcolor, "color", "lightgray", "bar color") flag.Parse() } func main() { width := 1600 height := 900 canvas := svg.New(os.Stdout) canvas.Start(param.width, param.height) canvas.Rect(0, 0, width, height, canvas.RGB(255, 255, 255)) canvas.Gstyle(fmt.Sprintf("font-family:Calibri;font-size:%dpx", param.fontsize)) if len(flag.Args()) == 0 { param.barchart("", canvas) } else { for _, f := range flag.Args() { param.barchart(f, canvas) } } canvas.Gend() canvas.End() } charm-2.1.1/src/github.com/ajstarks/svgo/lewitt/0000775000175000017500000000000012672604563020544 5ustar marcomarcocharm-2.1.1/src/github.com/ajstarks/svgo/lewitt/lewitt.go0000664000175000017500000000411012672604563022377 0ustar marcomarco// lewitt: inspired by by Sol LeWitt's Wall Drawing 91: // +build !appengine package main // // A six-inch (15 cm) grid covering the wall. // Within each square, not straight lines from side to side, using // red, yellow and blue pencils. Each square contains at least // one line of each color. // // This version violates the original instructions in that straight lines // as well as arcs are used import ( "flag" "fmt" "math/rand" "os" "time" "github.com/ajstarks/svgo" ) var canvas = svg.New(os.Stdout) const tilestyle = `stroke-width:1; stroke:rgb(128,128,128); stroke-opacity:0.5; fill:white` const penstyle = `stroke:rgb%s; fill:none; stroke-opacity:%.2f; stroke-width:%d` var width = 720 var height = 720 var nlines = flag.Int("n", 20, "number of lines/square") var nw = flag.Int("w", 3, "maximum pencil width") var pencils = []string{"(250, 13, 44)", "(247, 212, 70)", "(52, 114, 245)"} func background(v int) { canvas.Rect(0, 0, width, height, canvas.RGB(v, v, v)) } func lewitt(x int, y int, gsize int, n int, w int) { var x1, x2, y1, y2 int var op float64 canvas.Rect(x, y, gsize, gsize, tilestyle) for i := 0; i < n; i++ { choice := rand.Intn(len(pencils)) op = float64(random(1, 10)) / 10.0 x1 = random(x, x+gsize) y1 = random(y, y+gsize) x2 = random(x, x+gsize) y2 = random(y, y+gsize) if random(0, 100) > 50 { canvas.Line(x1, y1, x2, y2, fmt.Sprintf(penstyle, pencils[choice], op, random(1, w))) } else { canvas.Arc(x1, y1, gsize, gsize, 0, false, true, x2, y2, fmt.Sprintf(penstyle, pencils[choice], op, random(1, w))) } } } func random(howsmall, howbig int) int { if howsmall >= howbig { return howsmall } return rand.Intn(howbig-howsmall) + howsmall } func init() { flag.Parse() rand.Seed(int64(time.Now().Nanosecond()) % 1e9) } func main() { canvas.Start(width, height) canvas.Title("Sol Lewitt's Wall Drawing 91") background(255) gsize := 120 nc := width / gsize nr := height / gsize for cols := 0; cols < nc; cols++ { for rows := 0; rows < nr; rows++ { lewitt(cols*gsize, rows*gsize, gsize, *nlines, *nw) } } canvas.End() } charm-2.1.1/src/github.com/ajstarks/svgo/turbulence/0000775000175000017500000000000012672604563021404 5ustar marcomarcocharm-2.1.1/src/github.com/ajstarks/svgo/turbulence/turbulence.go0000664000175000017500000000334212672604563024105 0ustar marcomarco// turbulence example from http://www.w3.org/TR/2003/REC-SVG11-20030114/filters.html#feTurbulence // +build !appengine package main import ( "fmt" "github.com/ajstarks/svgo" "os" ) var ( canvas = svg.New(os.Stdout) width = 500 height = 500 ) type perlin struct { id string ftype string basefreqx float64 basefreqy float64 octave int seed int64 tile bool } func (p perlin) defturbulence() { x := svg.Filterspec{} canvas.Filter(p.id) canvas.FeTurbulence(x, p.ftype, p.basefreqx, p.basefreqy, p.octave, p.seed, p.tile) canvas.Fend() } func (p perlin) frect(x, y, w, h int) { bot := y + h canvas.Rect(x, y, w, h, fmt.Sprintf(`filter="url(#%s)"`, p.id)) canvas.Text(x+w/2, bot+25, fmt.Sprintf("type=%s", p.ftype)) canvas.Text(x+w/2, bot+40, fmt.Sprintf("baseFrequency=%.2f", p.basefreqx)) canvas.Text(x+w/2, bot+55, fmt.Sprintf("numOctaves=%d", p.octave)) } func main() { var t1, t2, t3, t4, t5, t6 perlin t1 = perlin{"Turb1", "t", 0.05, 0.05, 2, 0, false} t2 = perlin{"Turb2", "t", 0.10, 0.10, 2, 0, false} t3 = perlin{"Turb3", "t", 0.05, 0.05, 8, 0, false} t4 = perlin{"Turb4", "f", 0.10, 0.10, 4, 0, false} t5 = perlin{"Turb5", "f", 0.40, 0.40, 4, 0, false} t6 = perlin{"Turb6", "f", 0.10, 0.10, 1, 0, false} canvas.Start(width, height) canvas.Title("Example of feTurbulence") canvas.Def() t1.defturbulence() t2.defturbulence() t3.defturbulence() t4.defturbulence() t5.defturbulence() t6.defturbulence() canvas.DefEnd() canvas.Gstyle("font-size:10;font-family:Verdana;text-anchor:middle") t1.frect(25, 25, 100, 75) t2.frect(175, 25, 100, 75) t3.frect(325, 25, 100, 75) t4.frect(25, 180, 100, 75) t5.frect(175, 180, 100, 75) t6.frect(325, 180, 100, 75) canvas.Gend() canvas.End() } charm-2.1.1/src/github.com/ajstarks/svgo/svgdef.png0000664000175000017500000135761112672604563021236 0ustar marcomarco‰PNG  IHDRB ±†T cHRMz&€„ú€èu0ê`:˜pœºQ<gAMA±Ž|ûQ“sRGB®ÎébKGDÿÿÿ ½§“ pHYsÄÄ•+ IDATxÚìÝÈWu‚/ðsç+Üà #\ÿpY‡¼LCã0FÆ:Œ‘‘‘‘2†††ŠFIEIJJ…I†E†EFEEJÆ*))cä2ã0-9ŒÃ:ŒË×e½\¼Ð‚º0—½÷}ö>­9êó9Ïóýq¾ÏózÁw-ç<Ï÷|?ŸÏ9ßÃvÞçýªªú×  …®Éÿ™3gNýh‹Ÿÿüçÿ„Ü´i“Zã_ÿõ_«o ­!€Ö„ZKh-AH µ!€Ö„ZKh-AH µ!€Ö„ZKh-AH µ!€Ö„ZKh-AH µ!€Ö„ZKh-AH µ!€Ö„ZKh-AH µ!€Ö„ZKh-AH µ!€Ö„ZKh-AH µ!€Ö„ZKh-AH µ!€Ö„ZKh-AH µ!€Ö„ZKh-AH µ!€Ö„ZKh-AH µ!€Ö„ZKh-AH µ!€Ö„ZKh-AH µ!€Ö„ZKh-AH µ!€ÖºÆÐÄo¼QøÀ€0jß2”JÀ±D‘Ð ‚@k]c˾úê«o4W]{íµÕ¤I“ ÃJ“Ù… ¾ñw“'O®&L˜`p`®rœçx‡ñr ¼òÊ+ÕáÇ«3gÎ|ÝĘsÖ”)SªŸþô§Õƒ>XMœ8Ñ`0!ü8vìXõ»ßý®:qâD~<{ölõå—_þÙ¶×\sM ÉkæÌ™Õ~ô£jÆŒÕõ×__ÿo–€ÖÉ“'ë}L0sHBZS§N­¦M›6Ðá”?ýéOÕéÓ§ëñÎëb ád\ó9Û&kchÝ|ñÅÕ©S§êÏß_ž½Úúɺ¹îºëªÙ³g×ÿÜõÿC™›Ï>û¬ú‡ø‡z®²æò÷™«¬Á+É©CW C],Û%T5‚’ðËüùó«;ï¼³š7o^Gš#ׯ__íØ±ãÈK%h³aÆêÙgŸ¸98tèPµråʯÛÇ®dëÖ­õgì·„ç²Ï{÷îm´fJÖOæ1Á©¬Ÿ¹sçV³fͶ…ŒoÚí2W @^íºš¡€îÅs™«ÌSæ+s¥ésü­¯… Ö!È«›~øájûöí­þ,›7o¾jx{HÎÓO?ýtõî»ïZ ÿõ€“àÊsÏ=W8p`DA¶+IØž={êWBQK–,©}ôÑ:05Rù]øò¶lÙR-]º´nO$k×®6ì[2û·{÷îêÍ7߬›Óº%ó˜pe^ &%H›`íOúÓ:p'hW~ ¾óÎ;uø±Û瑼¶mÛöuúî»ï®ÿÌ¿3¶åœpµäÐ1ýòË/·>yäÈ‘âmóðAH‘ $# ^7n¬ƒ @^N‹;wî¬Ã2Ë–-«6mÚTM:µñï™6mZÝFWâý÷ߨVÈ„ˆÒ°XböìÙ=ß¿„_yå•z°ëµ0³†òJ°nõêÕu°v$ëh¬Ë1òÒK/ÕsU¬í´KCÐ9æüñ &S®Ûß!½8p :tèPµaÆê‰'ž£•ç¾eh«4³-\¸°ï!È‹½öÚk¶Ÿ4iR5wîÜâí÷îÝ;sóÙgŸØ/^Üõ°ß‰'ªo¼±/­¡#]Ûk×®­n¹å–ºmÓPûØc9IÓ 9GÍž=»8œžï#D‚´N‚Š<òH㟛0aBµdÉ’jéÒ¥up¸Ö«¼OùÞÿý:DÒ­V¾É“'×­ˆi{,±k×®V!"-m_\µjU£¶²RçΫ›GÒÐy©„53Þ7ÜpC5}úô:<;uêÔ+îw´yÿ4&÷›ßü¦^?ùç‘úÅ/~1fƒ ¬.\¸°#-i¿Ë\ýà?¨çièu¥¹ ¯fÍf~~÷»ßÕÇ|þ}¤Ç{æJ’6Ù´iS½®‡[ÓùN|öÙg I€ÖI{â—_~Ùèg6lØP=þøãÖ w5 ÀåµbÅŠ:¬•fÁ×^{­#áºK-_¾¼8™í^}õÕ®;¡Ic‚©–`Û-·ÜRYG*ë$ÍhC¡Ù&cÀm‚’y],áº={öT,ŠI¨r,Êq|ÇwÔ­—#•ðVB¢™«9sæÔã_*óz¹¹ÊÊ\íß¿¿ñ¾Õ¹bp司ío[mÞ¼¹n—½´í8âyóæUO<ñDýÏ0ˆ!h•4ë%ÈQjâĉՇ~XAF#a«"óÊ>$P’?/5ÒÉP€ëÒ€Êå¤m0ï=wîÜVÎQ4K̘1ãÏh£•Ûüã·/fÒž–äp¡M¥™ð™gž©_ B¾ùæ›ÕîÝ»ëípºÕFÚO AÞvÛm#'¬š¹Z¶lY}œwRŽãuëÖÕ¯j€N02ÇÞxœ+_Úl?øàƒúŸsÎ9þü×ÇQ§ÏuЂ´Ê+¯¼R¼mÂQŸ~úi5sæÌŽîCB‹y%¹~ýúºéïâ÷‰üÜüùó‹Û6lc2¡¶‹Çãj/^ÜÑ÷N¸'M# A…êtíE((!Ð×_½zñÅ«7Þx£^× qŽ'wÝu׈B™Ÿ4×=üðÃ@^NZa·oß^mݺµ®>÷Üs#ÚBä~`¬„ 5Ò ×¤ òoþæo:‚¼X‚‹óæÍ«¶lÙRÙ²K—.ñï[µjUqòÀu.-’m²ÿþ¢í®¹æšºÉ¯“V®\Y·÷5µdÉ’z,{ª»TÂHi|ì±Çª;wV7nüº©íb#mm«ˆ;Öøç@~ï½÷ú29ÖV¯^]‡eÓ¹víÚq1Wƒ@€ÖHòOúSѶC!ÅnK ï™gž©ƒliBœ5kÖˆW¢*iLèòðáÃÕ‚ Z5G¥á¶¡ÏÚ)/¿ürqˆôbiùK1óØOyÿ„ì-ZT½ð նmÛ¾±Ö¯»îº1sgž2_Må{öÙgûÞd7âÍ9fÇŽÕæÍ›¿1Wi ·¾eh‹_üâÅÛ>úè£=Ý·´ Ž& X¥°Tiûb¯œ9s¦úì³Ïж½ýöÛ;ö¾'Nœ¨››Žõ‡~Xmذ¡ï!ÈK×Q™Ÿþù7ÚL›¬‹6;wî\µfÍšâ@ó4vnß¾½ï!ÈKçê©§žªþð‡?ÔÁÞ!‹/v²è1´ÆéÓ§‹¶KXjΜ9ùo½õÖº °Äª .´&–ý) ¸u*Ø—÷[¹reõÕW_ÿÌP2~m5cÆŒê׿þuݰ™ù½þúëÇÄ1|ÿý÷WçÏŸoô3o½õVÝ–ÙVi€üôÓOëpÖá ž{™FHZ£´m0MzmjùkbÞ¼yÕÔ©S‹¶MòСC­Ù÷÷ß¿h»'OžÜ‘÷|ã7êFÈ&¬ksrHÖpBu7C²¬Õ¦ëuÓ¦M­A^,°Bý! @+|ùå—Å­ $ ²åË—o»ÿþVìó©S§ªãÇm»téÒŽ¼ç¹sçª76ú™­[·V+V¬p@õXŽÝ5kÖ4ú™|°zæ™g Ä „,õñýY-ZT¼möÒ Ùoûöí+Ún„ Õ‚ :òžO?ýt£Ïž¶¾uëÖ9˜ú`Û¶mÕÙ³g‹·O˜yûöí€"×Íÿþßÿ{ ÷úôéÕõ×__·,'M{ !ö»åpïÞ½EÛ%Œ˜0äh9s¦Ú¹sgñö“&MªÞ{ï½êškü§^KXõ¥—^*Þ>ëãÝwßíÈ:áÏçâüùóõëâñĉë×”)S# ”¬å„¬‡–õ›u}út} cwêÔ©õŸý–¦æ¡sÌÅ2'ùnÃ>v[æ$×(‹˜6mZ½[ ûüã8  º¥K—Ö‡%BìgòäÉ“õ«ÄªU«:òž/¼ðBõ§?ý©xû_|±bÐ{;vìhÔèúÄOÔA`F'!œ4Æþþ÷¿¯Ž;V‡ÅNöÚkëñÏëÖ[o­fÏž]y|9°ÏZ Â&,˜sãPX«íNœ8QçeMçŸ¯Ö œ5¿ä ý f^òÊq‘ócö%û˜‡R”9¶Þxãêã?®>ûì³Ëžgóûòys^Íü\)¨›óôÐz¹ø÷dý«mUfߎ?^ïgŸC†‚¹ù¼ÂŸôšÿÏ4­Ð¤E(7fºeË–U›7o. û%‘ç¬è‡!Jdÿróÿh%²{÷îâí<ÈxÒ{ G¼öÚkÅÛ'<±aÃ7B ïÙ³§zÿý÷‹e/'A¦œCó:ÎZš7o^õÀ©¨Gy¤j]é;%¥{Ï>ûl+Ï#Y×i–- ÝÇP8/ëxýúõÕ’%KªÇ¼ëÁ‹¥±yÍš5Ö8$¡¶O?ý´>÷]IŸk×®ö»6Çn®òʃV¯^]½õÖ[û,ù ¨>ùä“:ˆz¹0fÉ\>\ÿ]† ,¨–/_^W{žË¾ÜvÛmõg¹Ò1±}ûöêᇾâïÈñ´mÛ¶úá Ãþ‡‘ã0Æ>úèÏÖâ–-[ªçž{îÁÂË]K¥5¹×S#•1[¸pá×u$™ýÌœ@¯BÐ B¦íh¸¶¡È ö ô2ìÐi Eäæñ4 '7â'œÐC?ŸÃqÖS‚± §÷ëÚ&û~µu }fÍBÐKî  5Ò SÚröæ›oÖA‡Av÷Ýw!# pýBæfþÒFÈ|žNØ¿ñ¶ mÍ™3ÇÁÓ'Mæ*Íhil£Ùñ·qãÆ:pÚ€Òå¤mî‡?üazòÉ'{&côºí¦|÷%Ü{µ ÚHåÁù½ ܵ¹-8ÇöóÏ??ªß‘k¦‘!Ón˜àópAÎN˼ÜxãÕºu몭[·¶ö! ùÝu×]õƒ7Fêr?›ÆÝ„c‡“mr.N²×\-½îË÷:ôÒ· mÑäfþ4!¤¥§MÐ( ä¦ôÜœÞkiŠ* ØLš4©#7ìgN›ÌëC=äÀé“„gÒÕd®4w–Køñûßÿ~Ýp׫äÅvìØQ}ï{ßk4ÇP"Í€·ÜrKWBŸŸî»ï¾:lÙ¶hŽç4 Ž69šñÿÎw¾SÔú×-iÂ\¸pa_ÎmÃIñæ›oU26½T“FìÝ»·/Ÿ?Aâ’c&ßçD@¯ BÐ?ùÉOŠ·ÍMÚiëéG8°SÒ€™FÃR¹9½×Jÿ-ZTM˜0aÔïwøðáâm§NZÍŸ?ßÓ' ‹”Œ®½öÚ¾4š¢ƒî¹çžúu¹ M/åýsžMsŒÖ… êu½fÍšžà¸»ÿþû[†Ì~d ò~[2þý>·DBÖ9¿´) ™‡N$ ™µÚ ¹f)} @“ïØNJãz‰\¿æ:zI€ÖhÚ,söìÙêŽ;îè[£Q',_¾¼xÛ^·e\?ÞñÏq5ï¿ÿ~ñ¶M5é¼&sµ`Á‚: ÉÕ%œ”¦¼´A¶ÉÚµk«§Ÿ~ºuÍz ŽË0ëÇÚÞ¹sgÝ ÙÙ~µ¬&Ú¶sKÂ~<òH+ö% Šv+“'O®–,YR|›ñé¥Ó§OW'Ožìéu4! @k¤á¯ICbœ8q¢ºí¶Û¶2±Ò&ÅcÇŽÕ7Æ÷J%Á§ÌÛ¬Y³Fý~™ÃS§NoÿÓŸþÔAÓ'YY檳cšv´Ï>û¬•û·eË–jãÆ&ŠÛüã÷<Ôu±´š&ÙO»wï®vìØÑ·÷ïçø_ÍÛo¿Ý·pè¡ n'¬q¥ðÿÒ¥K‹Ç®]»z:i/¹î›8qb}ý ½æ1É´Ê<Ðøfý„!øÃVo½õV5þüú¼ Aæfò={ö »mnNObÆ =Ù·ýû÷mWÚn4œëJ羜1c†¦OrÌ¥½°tÏ›7Ï  #MdMÂ¥Kà&aä™3gV×]w]5eÊ”o¬ þ‡ø‡ºá5s7Òàø¶mÛªn¸¡Z±b… £HÎ øfÝ•JkÞôéÓ«iÓ¦Uÿå¿ü—úïþøÇ?Öë6×#mì[³fM}œ\ýõ}9g¦‘±_­ª_}õUãã>ËiêΘýà?¨ç#s“×Å¿70È<'ÄýñÇè<–±ùÃþPìú!ïßd–˜4iÒeÿ>cšÏYòš`bƸô£õÎ;ï_÷õjŸàb‚´Ê¢E‹êVȦaÈÜàŸ°Åƒ>XmÚ´é7ê·ÝòåË‹‚qðàÁž!Ïœ9S=z´xÿ;áW¿úUñ¶³gÏv°ôQ“ KÂyW—¦Å^šHH)!êU«VÕÁšÒ1N¨æðáÃÕ+¯¼R|Œ_,¡„‘)Q.Ëz^¶lYõè£Ö!Èüûå$H˜u›õÛ´E0k?aÈŸÿüç=ƒ7ï»ï¾úý‡“Ps‚ã?úѾ¾ŽÉõÍÿïÿ½þ¼ùn¾X‰¥×HMÎÙ9¯äü2ܵTÎ;C炜‡r}’9_»vm£óKö/zxøá‡{¾Fßxãâk°ŒM>ç_ýÕ_Õs•9ÍÜ$džëÖ‹ƒ®W ufÌ&OKiÉšÍwC§8q5'Ož,nåîÔu4% @ë¤Ùñ{ßû^QhàR¹¡}ß¾}ÕÖ­[ëͯ¦h“&í@i\J"ˆÝ”@BIsUšµ:Õ®uiÀãjnºé&JýÓ?ýSñ¶iãêÇtš›HçÝwß­cM µÐæ•se‚açÏŸ/þùœyä‘ê—¿ü¥É£#Òä¼}ûö¢P_¾Óó°„¼vîÜY¯Å& ‘ ç%X–õß+Ï=÷\Q1rȵ˕t£îò@„|†ÿ¥ç×á®…†ÂyiåmÈ9?Ÿ°i®Ç2?¥-˜o¾ùfσ¹îZ¿~ý°Ûåšëõ×_¿j»qæ8!ó]»vÕ×3=ôзM°$ù}½B–†ñ3¾×èAHZ'7Y' F¡‘H¨'­e/½ôRý{zx‰r“{B%Ò\ÔíVÈýû÷m×ÉV &-ƒÚèú«É\ýà?0`W°÷Ê•+‹ƒB‘°RÂây§7Aì{ï½·Q oæ?!´ì ŒTõîéC òs ÞqǼ/¼ðBO¯ † A&øøÑG Ûtœ1J-¯\Û41eÊ”z¼/ æúã±Ç«C{Ù¦“ìÌïÏ9®D {ñ ‡‹•<€"ëìÕW_­ÇïjÒž™mK΋¹†Ið÷ôéÓÃn›ðnöóJÙNyçwжëE(®ä[†€6ÊùWkÞ)qêÔ©jáÂ…Õý¯ÿµzûí·GÔ0Ù+M…iƒê¦H(Ñ©âÎhdéT %#_#æjôvìØQŸ§J%Ý©äI“&U?ûÙÏË›7onà„‹%t—ÖÀÕ«Wj=§õÓO?mK k“0w·Ç!û?\².¾¦J@ñ©§žªþÛûou¨²Ó!È! 69·$ô×¶kÑœs‡ AŽÄªU«Š¶Ëµk€ÑMi-ý^ïä0 )AHZëƒ>¨C£•Æ4D~ç;ß©¶lÙÒ(p×+ix*mAJˆ£I­©œšìópΞ=[¼m‚[yÑ e­Ž^¾ÒLWjݺuu`©“!È!ùï½÷^£0VÎAÝç06¥ ï·¿ýmG¾ß‡~_š%›xå•Wú>iL²Sã0œ´>üðÃuòÿñ«gŸ}¶Þ‡nËÜ” ñ‹_´f&™}ïÆ97–-[Vü»wíÚÕÕÏZú€\÷ùN Ÿ!h­Ü8ßÉÀ¹sça°³¯ IDATª§Ÿ~ºúË¿üËjÍš5u@²Mš´&u3€´ÿþ¢í:Ù ”¹)å&üþj2WiëF“ÖXðòË/JôJh©›ÈIø¼IÈø¥—^2‘4’µœïõN‡ÙÓ<Ø$È›À“sY·®ozù}–ÐcÂ9—t«òJï»`Á‚¢m›4ävSÖSÓpmS™ƒùóçmÛí`ìÛ·¯ç×}0×ÚlâĉÕÏþóºÑ±Sá¿4Ú½ñÆõ+áÃ_|±§¡€+É æÏ?ÿ|ѶiÚ°aCÇ÷!ÁÜp?œ &4 n§IÃ`§Z(¯$Ÿ?k¤Û„éUX'ǹJH0MXl{çwŠ·Mx)Ç\·%°”ÖÉœoKœ8q¢~%ÜÙ>}zWBCò]~óÍ7m›ÖãÇ×·~xýõ×ëñ//^\íÞ½{ØíNž<ÙŠušsn/Üyçu(·D®»qÝwìØ±¢j§¯û`$!h½Æþæoþ¦ºá†ªÍ›7׆NÉåiÂY½zuõä“Oö5™f¨¼JnHÏ6yuºM*7ä—Œïœ9sêj§\¸p¡xÛN†Ñ.uß}÷…5:%AÁ¬í„ìE[æ*í®[¶léÙçNHðÝwß­æÍ›7êßuôèÑêìÙ³EÛÎ;·QÓÝh%öÊ+¯’Ž9"IÑñóÑGu-9æxÉš,ñÉ'Ÿô%™÷\¶lÙ¸šÿÒsD¾_¾üòËŽ^_41Ԍ۫&ãE‹Uëׯ¯?ópºõŒÒðN_÷ÀH|Ë0(žzê©êóÏ?ïxè&Á¿´C~ï{ß«KnHï–´B–êTCæÅ<Øñý,Q ‹niΜ9Ó•1î=o¹å–º‰sP´a®Ò¹mÛ¶ž~îÌÑÂ… ë9­7ß|³xÛǼ§Ÿ3A G}´xû½{÷úrâª*K²Û ±ñÀo›FÈN>X¡Ä´iÓªíÛ·»5‡L”†èúy –&ÈN?`âj2& ,(Ú6¿(iìnzý[ÚHÙéë> AHJB CnÚ´©ã=i"Ú¸qcˆ,½1¼Ó–,YR¼m§H¹É¾¤Ik„ Å7î—jFùOÿé?ueìpëu(&ê+m0kƒ&cÔ­V­„3ný˜«Ñ6†æw$€U"¡©4ÜõZZÊJCKÇoÔÊø“pÙÌ™3{ò^óçϯ¿£J$p׉`si•¯­z¥Áø~= m¿iïµ&áÝÒ‡U”ʵGÉ1Ð$° Ý$ ÀÀIcÙ3Ï}Ú= sõÅ_ŒêçÓèUÚx–`tÎw½–ÐK“f§[Ê;ò}¶bÅŠž½_¾›š¬Ý^~ǯ[·®š={ö¸] i…,Ñ{Bûo½õV_η9F&Ož\´m‚ø|`Ci°2ßEcñº€Á# ÀÀJX'7®ÿýßÿ}µlÙ²ŽßÀž¦³›o¾¹Ú¹sgO?×òåË‹·ÝµkWÇÞ÷ã?îøþ ’~´A2þæê“O>)ÞvñâÅ}ÛÏÛo¿½x[AH®¤á©[o½µxÛN>Làjòƒ4YÓN/¾øbqP³JꑇtªA:ߣyFÛ¿‹àb‚ ¼éÓ§Wï½÷^õùçŸW‹-êèïN3ÑÊ•+«7öìóÌŸ?¿8Ô¹gÏžŽ¼gn®Oðs8 ŸÎ™3§¯óýþÏÿéÊï4i’ƒ©ÃºXä¹:yòdÑvi»þúëû¶ŸMŽóßýîw;­1sæÌâmGÛðZêñǯ[iŸ|Ÿt£]¼‰¥K—o»wïÞŽ¼çáÇ«óçÏ»]¢ã¹É€v„`̘1cFõÁTÿøÿX=øàƒ <ÿüóÕ}÷Ýד&ºÜt>wîÜ¢mÏœ9Ó‘6¶*K>Û’%KºÒðÕäw&´Ù ý ’6ÌÕ´iÓ:ÞÛ 9ÆŽ=Z´m‚'ýüŒãÒ¹îU«”ÈÃJ;w®'û$Ù^™›~Ÿäú£4|~àÀú!£U¨ìFã:Œ” $cN<¯¿þzõÏÿüÏÕ³Ï>[7«uÂîÝ»«5kÖôä3,^¼¸xÛƒŽúýJG“ýj¢Éu"p¥uÓï¶ËAІ¹J@/áŒA“ÐÕ… ж½é¦›ú¾¿¥²Ó§O;0h4çUBˆ—¶X¾|yÑv_~ùe†|7:t¨£û½àÑ}ŒY B<õÔSÕºuëª;wV/½ôÒ¨;o¿ývuà 7T=öXW÷}Á‚uè²$H–6Ç­[·Žø½ÎŸ?_Ô*™¦Ê´ÔuCZãÓO?-nÇ,±råÊ1w\µe®Þ}÷ÝêÎ;ï,gýúõõ±ÐMMÚçÌí·©S§VÇv»œ§òêF[,Œtíž8qbØí*ƒ6HãöÚµk‹Öd^‘íGêÈ‘#Eï3kÖ,mÙ´Š $c^Â9>ø`µzõêºÕñ¹çžU rãÆÕܹs‹ÛÒF"!΄!ÊΙ3gê cnX‰Òà_ø®¹¦;ÿi¡IË`7Ãuù|l‹AÈ&s•à_ÖV·ÖÍ¢E‹:ö»6oÞÜõ d“óN7Ï/¥~n2× ŸALš4©h;AȪxœNž¿ç;¹ä¢I|¼^«Î›7¯èº/™“ÒæÓK½ùæ›EÛiƒ m!7ÆZ±bEÝ¢“0äóÏ??¢æ¿4ŸÝÿýÕ/ùË®¼âî»ï.º!>Ò4Ò dé ñÙŸnir3‚ŸÚçú§É\åøJøO£Ô¿i„lBì–ÿüŸÿsñ¶ G BÒMŽŸ´Ê^{íµí"i‚Mkào~ó›úŸKÃŽŒÎ 's5’ d” C'ÍçmäÀže¯¦OŸ^ýú׿Ñäñ /tµ!)A¨´W–Hh!!†¦öíÛW´]B£Ý”ÐJ“îD¥¿ÇN©¿û»¿3`ÿ_ixtòäÉ­Øß^a5i3ï:TÝxãÕüãjÇŽB}¶jÕª¢írÍ7’¹Ú¿ÑvË—/7´Ž $ãZš×^ýõêÝwßmܦ˜&ÉnòšÓÔÔÇ\´]i s4š´þêW¿²xû¨É\$ ë¼tAº*×/ @Þu×]#jƦ;æÏŸ_´]Ä‘6ð&ÒÜ{äÈ‘a·ËÃ)ÒJm# ÿÏŠ+ê@dÓÒH‡MÌž=»¸)1­NM*Ó$T~˜5kV£àÛH5yýõÝï~·xÛ'NT_}õ•Ah‰-[¶Ô í3cÆŒúگĮ]»ýî\'–|çái%€¶„€ÿoõêÕÕ† ýLn*ï¦3—-[V´m‚MÚ÷>\œ\¾|yOÆÿ¯ÿú¯‹·=}út°£?JC‘ª¬5ú+!¸{î¹§zúé§=8ÞZºtiÑv§Nª_¥Jƒ“MÚÈ —®1ðï6mÚT‡KCvgΜ©_S§NíÚ>Ý}÷ÝÕóÏ?_´m*çÌ™S´íûï¿?ì6 bΟ?¿'cŸp]Þ¯4œ‘Ïšæ$z/ã>qâÄêË/¿,ž« ¸BçÏŸoÅ~4iòœ4i’‰£5þå_þ¥xÛÒÖåA—ãù¶ÛnkôÀ„Kå¼?mÚ´jòäÉõ1Ÿ±ûÿñ?ýlBx¹^bxi)_»vmÑ98ãºuëÖa·ËC Ž=Zt<4yØô’ $\$A¼Ç¼ºï¾ûŠ&m<Ý BΚ5«ºþúë‹Zâ|õÕWëÏq5 Z•„!æÎÛ³H‚Ó§O/¡8p èæºsœ$p›9(‘FÈ:&L˜0®Ç-!¢ ¬$<ÜqÜmçÎëøgƒ^( iÏÆƒ•+W6AflæÍ›WÝ~ûíõŸ AŽÔ/~ñ AÈBù®ÌõWIëøž={Š®…vîÜY¬L ùx9&<ß2ðM‹-jêéÅý‹/.ÚîìÙ³Õ‘#G†Ý.¶’æÅÒ÷í”Üø_*ÁÐÑ4[1:·Þzkñ¶ %íÞ½{ÜÙ·¿ýíVW†ó?ÿçÿ,Ú.¡™¡-JƒãeÝnÛ¶­Ì•JîÁ¬þÇÿøÕG}T=üðã AÒÜ­% —H¨'Í„¥š´¦Ô’%KŠ·Ý»wï°Ûìß¿¿h,XÐÓ±o®‹—^zɂ퓄V›´F½òÊ+EáÛ±¬Ièª AÈ«KLš4Iƒ­rúô鎓ƒ*ç’Í›7oŸè_ÿú×Õ믿^ÛôG8K×ç®]»†½N-ypÄP9´• $\ÆŒ3Zµ?¹1=7¨—HÛãW_}uÅÿ½´52!È&͘ÿÂ(•ÏZÒ„D×dœ.2ÉôéÓ«Ù³gm{¥k»´€g gΜ9æ€Ö„€ËøãÿX¼m¯Ú“8Ëê%¼\ø!wüøña~Á‚ˆ4uêÔúý›xä‘GŠÃtN³«W¯nô3i…“cÇŽU( Ÿ´1Ø´hÑ¢jݺu.ȧŸ~ºúñ\¯+ºoæÌ™ÕöíÛGô³Ï?ÿ|õÃþ°:~üø˜§4gΟ?¿xû´Ô¦í®›$K µ48¼zõêjêÔ©=¤ù9-°¥Í¥Nà¿TÂÈãAZžKä:áìÙ³ÿ,}wƬY³ª3fm{ðàÁúºo8iÏ÷6 AHú*á¼ËÝ|ŸV²¿üË¿¬ƒlL\*ÁŸ´R¾üòËÅ?3a„êá‡îÛ¸Í;·nK,ñþûïׯá$0QÚÕk[·nm»X­iM;d“ Ìh×Ôž={Æå1ã"!¹‘Hh&AèÝ>î/–æÔ^­!O<ñD£íôzã7º²/ ßqÇÅA¨œÿšî? 9räHuË-·ÔáÛNX³fM} —Jd`ãÁu×]WüõÊ+¯tô½ÓJ˜ä¹sç,ú)moܶm[QËg®«ÆC3*c‡ $}uµäLJx-È´5ž¯Žuiíž8qbÑ5O‰¥K—TŠÇûÐz¹¡;៼„{衇êÑh‚ˆ W½öÚkõÿ¥7ŒIsâ“O>Ù÷q¹óÎ;‹ ¥Ÿ/7Ø·Yæûg?ûY®i“XšïˆÜ¼ysÝ„t÷Ýw׎f-%L{øðáêoÿöoë??þ-lôá‡V·ÝvÛˆƒ5Y·Ï?ÿ|ýJêâÅ‹«yóæªµ´ms•¦Ó4Ù59%þñÇW/¾øb5sæÌQŸWŸ{î¹FïŸg‚àãI§š ›È:ëMm9sŽH‹ì¦M›Š^ÿlÂæMƒ”>øà¸ ñæxÕïÊq;Ôjzýõ×=W ë ÍÕÅÇæ#s44_Cs•óG›æ*ûŸ@c‚CM䳤É2ç€D—,YRšKdÖý›o¾YŽºtNÓÌ:Öz—úÎw¾Óó÷\°`A&di..d;Ô›Á„—/_^ãW’c:!°<È i9ë÷‰'žwçâG}´n×-‘¶äœ[Þ{ï½úüR*ó¼oß¾ú}ºÕ¸Ì¿ÉC0:„Ìw AHú*ỼFÒÌ–pÕÅ«„ó»ŒJcH~w§ÂjÛ·o¯CXm‘ ÂŽ;Fý{V­Z50k&·Ï?ÿ¼ºë®»ªcÇŽuäw&–W?ƒ±(Á£´‹Ýwß}uH¦r,ç5Væ*mx¿ùÍoê XSi“Ì+AÊ„Áóú«¿ú«oxÓòöOÿôOu 4çË‘†”rNM0ïj!5:'aÕÌWBêƒ*Í¡ù %­ij€M=Ÿ;þÅ_üEõ¿þ×ÿª¿Ç󃄙G*á¾Ñ4ʪL_xá…zŒKd»|¿Î™3§¦æšçrã–kª|oîß¿¿>5 V32iDÏ9´Æ«W¯6˜ AHúnöìÙ_7óF?yu¢!ñržzê©:´Ô&K—.u2ÒÜX?H°K3äÚµkë6±Aצpm§%@÷ÁTÛ¶m«Û½=óÖ[oíøï|ë­·êpçH›3\J(¸SÁà+íc‚QôNBƒ„Lˆñ£>ª.\øu£k‰|‡wú{|ݺuÇ’|_nݺµnÒn"Ö¡kÂÕ Cµa'P9jr>ÏwÁŠ+ª7ÞxÃÁ= iäÍ8æ;u4׿`P|ËÐoO>ùä7Û(!ÈgŸ}¶uû•ÌhÚrC|‚ƒ( Ù¥9lP¥5m<´ì%ˆôé§ŸV3fÌØÏÀp7B« ¶üìg?ke 6ûöî»ïVË–-óeÕcc¡a/Á¹¬í ôm²vÏÒ 9š sÖb9‡Â‘§OŸn‚j¾é¦›ØðÀÔçæ‘Z¼x±A` BÐw3gάoŸ4iRëö-7š'hׯäѶ\-_¾| ×OÂiÿ÷ߺ¶Îá$ü›€í‹/¾8nŽõq>ÿüójÓ¦M£ qôã<õõÞ{ïum¿úðÃ[ÕΚ}J£_ÚÇè½AoO½øøÉÚÎqßkK–,©ÛLé|Ó-™ƒ~Ñs—óþ ·›¶Í´iÓF<ž¹öl`Pù/>´BnèN˜íþûï¯:ÔŠ}š={võꫯ¶¾Á.Í@/¿üòˆB3iëm² ò92W u>ýôÓÕáÇ[»¯“'O®Ã9>úè@7YŽTIÏ<óLÄØ¼ysµgÏžÖ¾š«^´vf'x½cÇŽjýúõÕW_}Õ·ÏžóBŽ©±ºF3Öm×ö¦ä¦rÜç»þÞ{ï­¾üòË®¿ß† ê‡Aþûù,íœwÝuWÝîØ v'ˆ:¨­ÓmvçwVÇŽküsóæÍsçÆÿÕ€ÖH@,ícG­ž{î¹êÈ‘#}Ù)S¦TO>ùdµzõêP¤(!­“'O6þÙ Œ©Hš§ôHÈ#k(¡Ú¶„ìÒ†˜ f‚uBÿ¶nÓ°˜c-suàÀêÂ… ­YG™«„5ûàIûäܹsë`øH‚.£=ÿ¥¥4¦± Ë·]æb¬I+=¸çž{ªÏ>û¬+ï‘c6á»6µ«¶é:ë—¿üe´Nàº[ÌA÷å5h˜ôpÆ7AHZ'±¼ì{çwªÝ»wWçÏŸïúû&“æ·Õ-”ýNhª©±zC|æòÃ?¬Îœ9SíÛ·¯Ú»woϰ†d ¥MtéÒ¥ud,†š:!!Þ"Ï;÷õ\õ:üÓ§O¯~úÓŸÖáÇ„4Û0. ,%ž–ÓnIšsI¸fÚ;ñ=“Ö˶4_é<6å\˜µýÆoÔk»Sí ˜çøÝºuk5iÒ$'׫ŒSÚ^Ó(˜ñïäw£9èŒo®-rÜägÆB 8ã— $­•`ÒöíÛëêVùøãëPPÂm|4TÝ}÷ÝÕìÙ³v¬^úýï_MXá‰'ž¨[çÆ²„»Ö­[W¿¬=|øpõ·û·õ:jÚ¢T"Á¥¬~ò“ŸÔއ@Y§¤©,Mˆy:uªn„Í1ŸÖ¸N¥.=ö/ž«¶†v²o eýîÚµ«—'Ntäwç3çü—°nÎc¹òr–Þ²eK¸?{öl«ömÅŠu¶Ôƒ>X½ýöÛWlÀÍÜnذ¡cû—õ’ó땾vLãð•dr¬ŽÒå* IDAT'ÈõÚk¯Õí„#=Î\Î V­ZUïS/=þøãõù*Aî+I ¾á³ûyåœòÒK/ÕŽ´A9sóHæôjçÒY³fÕçú+W¾C›¬û«É{ vÎwtæpP½þúëõ1X”OHu¼ç[þÃÿ{ýë3Ïv^‚9™ŸöÍúš«áB¿™§¡¹Ê}÷»ß­CIùçAn+ËçN¨÷7¿ùÍ×çÀ¬Û«G‚i9îó¹ÿú¯ÿº²e,;²²rl\¸p¡þ»„އ澭´9.s<­½¬¯|×fm5 \å8O/Aõ„ºò;/–úË1uœ _νý we߇Æýâ}Î8dÌs¾„ðYÖL®­>ù䓯¯#ò™.ŽºžÊ$ò ‰&sߗߟóúÐ ²N‡®É:=VY›CßCï—ï…¼W>ào8p Z¸paѶ¿ýío÷XÉ? B0¦\®*7ìç&{(•°Ä¥!œ„3²–´)µKB5—kóKi<ÎUÖíÅm§CaPDñ€6ï½ ‡‚µ¾ÛçŽ;ñÃI ÷øƒ``%ÿè¿P0¦ŒÅ†Gz/ÁYki0$c®þ]BJ0Vt£Å’f†š¤iŸ<ü#Mª%-ZdÀxß2ƒcçÎ_7¦gÕªU €' 0 €|çwж3gŽædÆAH€qèСêìÙ³EÛÞ}÷Ý þ/{÷$eyæ ü†›"rÔQ‘»BŒ J¼£M4šl.$¦Œ›[Yµ[ÙlíÖ&eN“JªÎnÕ©$kÌšx)51]"&^p‰!ʪÀ (¨(¹  pÎóêÙîža¦g¦{è߯ª«çûº¿Ûó¾ß×UTýy€£‚ $@ñ³Ÿý¬¤ïõéÓ'}á _P0Ž ‚]ÀêÕ«Ó“O>YÒw¯¿þú4dÈEਠ ÐÜ}÷Ý©¡¡¡¤ï~âŸP0Ž‚U.¿øÅ/Júnt‚œ5k–¢pÔ„¨r?üpZ»vmIßý¾úôé£h5!ªÜ¯~õ«’¿ûÕ¯~UÁ8ªBT±èùÈ#”ôÝáǧñãÇ+GAH€*ö¯ÿú¯©¡¡¡¤ïÞxã ÀQ§F €–üáH+W®LÝ»wO£FJW\q…¢`îB'úío[òw¿ô¥/)GAHà°Ö­[—ž{î¹Æåž={* æ.t¢'Ÿ|2­Zµª¤ïNŸ>=?^Ñ8êB”hëÖ­éž{îI;wîL=zôHçw^š1c†k⨶téÒ‚åI“&)Šç›¹ è_þå_Jþî7¿ùMà¨$ tiëׯO›7oN½zõJgžyfêÝ»wÅŽµ}ûö´cÇŽì†ìØ]ÝÑxM”ÏÁƒS]]]ãr·nÝŽH˜¬¾¾>½ùæ›iÿþýièСéä“O68ž]bîBG­^½:ëYª™3g*G%AH ËZ¼xqš;wnãò‰'ž˜n¹å–¬›Ðq+W®L»wïn\ްñ 'œÐ©ç¡¼Ûn»-ëT˜ó‰O|"M™2ÅQÕsÊá—¿üeö,ŬY³R¿~ý €£Rw%ºªU«V,GG³­[·* ”ÉÒ¥K –'OžÜéç°iÓ¦‚dˆîPís:*÷ÜsOÉ߿馛 €£–Ž@—uèСvm·cÇŽ´k×®TSS“† ’ºw÷GB±úúúôꫯ6.÷ìÙ3?¾ËÜçåæ¹aîBg‹ dñúp8Ñ%7:BÀÑJè²FŒ‘^{íµÆåO<1 4¨ÅmÞzë­t÷Ýw7.Oš4)]wÝuŠ E–/_ž8и|ÖYg¥^½zuúy :4õíÛ7íÙ³§qÝÈ‘#;õ<7Ì]8úôé“~úÓŸ¦ï~÷»ióæÍÍ~gÀ€éÆoLßúÖ·² 6­ü+8Ðe]pÁéä“ON[¶lÉB.£GN=zôhq›è–/?\ü·¥K—,ŸsÎ9Gä<âÞþÆ7¾‘Þxã´ÿþ4lذT[[Û©çà¹aî‘òµ¯}-}å+_I6lH«W¯ÎºD†èLÏÂBÀ±@èÒ¢+d¼€òyï½÷ÒÛo¿Ý¸| 'tzÆ|ýúõf£KÎ](‡èô¡ÇÎ@5é®@¾âŽz“&MJݺuSÌ]ŽAH @q˜L7FÌ]ޤ%:SCCCÚ´iSz÷ÝwÓæÍ›ÓîÝ»SïÞ½Ó€ÒI'”zöìÙìv'žxbêׯ_“õk×®M;wîL555éÔSOMÇw\“ïlÙ²%Õ××gÇ1óÅúuëÖ5Ù¦oß¾iРAm¾¾7ß|3{mß¾=;ßSN9%{ 8°Mû9tèPzë­·²úDMN?ýô¬N¥8pà@¶m\[lsÆgdõi¯·ß~;½þúëiÇŽéàÁƒ©ÿþiذaiüøñÚo{mÛ¶-mܸ1{½÷Þ{©OŸ>Ùü9óÌ3³9ÔQç•+Wfsåý÷ßO{÷îÍö{üñǧáÇgûŽå¶ˆ1ˆ}Åv±}~Wº£+V¤õë×gó7æJ\ÃØ±c³÷–Äœs:œp ټWŒK9Äþ·nÝÚ¸×ã]ª¨]Ì™wÞy'»þ¨mÔ1îx :´Í÷Cì'îÓ¨[Ôçä“O.i»˜¯qqßÇ+j÷Db|bÜâ7£µû¹Ü¿¥õ\h/AH Ó¼ôÒKéÉ'ŸÌmáoûÛë"€s×]w5.92Íž=»à;{öìI·Ýv[piNg~þóŸ7Y–¿ÿû¿/9|øê«¯¦?þñYp¦9Žú«¿ú«,àTŠeË–¥‡z¨q9ºš]{íµ%m»hÑ¢¬Î9^xaúØÇ>ÖæšGxêøC‚iNÔæòË/Oüà;eþDppîܹYXèp"€tñŧI“&•´Ï¬Î™3' ¯Føôp"œ4eÊ”4sæÌÔ«W¯V÷sóî»ïn\þÔ§>•&Nœ˜ýýâ‹/¦yóæ5{<öØcéì³ÏNW^ye“€Ñ† ²s Vs"¨s¬¹0p[,Y²¤`¹”ŽzQ»×^{-=÷Üs­Ö2ÄøÄ܉ g)î»ï¾,4šó×ý×­†!cþF=#(v8‹ûcêÔ©nT²F‡›oØüÍo~“/ßóÏ?Ÿ>úÑf÷J{EHîž{îÉÂx9Àûú׿^‘1©ÔÜíè¹Å3äÎ;ï,xV|ãßhS2ž÷¿ûÝï—G>ÿùÏwÚoig=:B¨¸k™Ú*B7ûöí+[Dw¸|Ñ!«¹c.ÌÔ’Ø&ÂP­!ã{)à‹.eœŠ€RtýkM)×v8Åßm˶¹kzôÑGÓý×µø½èöÈ#dcsÑEUlþD˜+™K—.mõ»Ñ-EL½îºëZì,÷òË/§ßÿþ÷]ÿZa 0®^½: ¤Fè°-ãËû÷ïÏ­¯¼òJ‹µPT\G„z£Ã`X¸pa|ji.GG¹;î¸# Pµ§#]î<—/_Þ¸A§ÖB¥Ñ /Âv,U\c[cŒ¢³h¹çtÌËÖæoˆPm˜œ…Ï*õܨtš›oq_FP¼¸›eþ5¶WÌåx¦ç‡ Ã%—\R±1©ÄÜ-ǹE‡ÅxEx0÷¬ˆ±‹ i[Æ:_t„ìÌßÒÎz.t„ $PqÌnD@åŒ3ÎÈ:ºõìÙ3 ê¼ñÆ-/†Þ®ŽSÌBIÚk‹ E¸¥%&‰PJtËÜrA¤âï?ðÀé–[nÉΫEÀ)ÎqåÊ•M®©GÍv¼|ê©§²ð×Ç?þñ²ŸO"Ü— QtÚ|ï½÷šÔ:Qѯ¹Y„Õ¢³ä /¼Ð䳘›r:þøã³Î‹1fùâX.»âŠ+Ò‡>ô¡’¯#7W"0”¬¤;v4 ²Å±üñô‰O|" @þùÏn21VÅ÷LŒO'æXì¿­bÜc,s¢ËjtÁkÉ3Ï<Ó$à÷uÌñÜýç÷yþ8ÅùÇ8Ä1ÊÙM.³Å¡¶SO=5ÕÖÖf×õŽs‰±Èu»‹u•|ntvbŽÿû¿ÿûaC¡¥pkûþõ¯Ö­[W°þšk®I&L¨Ø˜Tbî–ëÜ"p¹`Á‚Æåººº’ƒqÿ†.ÀYÉßÒ#ñ\h AH ¢"`òÚk¯5.G€ëúë¯O§Ÿ~zÁ÷¢ÓV„<òx3gÎLçw^öw{C;ÞûÛ¿ýÛÆ€LN¢ûWN„H>ùÉO6Ù.M­É6lØÐøwœß?øÁ¬3b\cˆpH„#“Ašèîwå•WVåxEÈ%'®?®é#ùHØË]SŒç¼yó²VN\Ó9眓…lÊ):(‡ ãœ.¸à‚,˜“á±èLöì³Ï6É¢ _s"èSö:thâ‰T.$cõî»ïfçÏŸ_0†NŒÐWƒJßÏ…‡jjj²šN›6-;V„ #¼ךäŠ.£Qãü.wguVºüò˃´q~qßä‡ìrc4nܸ6×»¸³éäÉ“[Ý&?¸7bĈl|"€×™/®3O?ýtcð3îûX¾ì²ËÊ2_¢Sd„¹òçpta=ûì³›|7B¶ùË_²Wîž­Ôs£³kûزeKãrëÎ=÷Ül¾Fǘl‡~¸Ip/BøÀ*:&•˜»å:·â dtŽgi[Ï—üMs]+ý[z$ž‹m! TTäò]{íµM‚!:oE¨ã'?ùIc€-BÑÕ¯£¡Šè¶¯ÜqòE­\ȯ#û¿á†Ò¨Q£ ÖGX-B5vz饗×ÇßÑ-,BSÕ*®éºë®KcÇŽmrMS§NÍj]árÒÂóÏ?Ÿ®¾úê²C„~ŠW—^zi6-vÜqÇeá˜ýþ÷¿O/¿ür³]7#Xø§?ý©`]Œ[Œ_nŽäD(ꤓNÊ^Ñy2`¹ÀR„c?¥Zs!ÈóÏ}îsY·¹œÝ3&ÝxãéßþíßpQÛüd #<™/‚J_üâÓwÞYÌ]´hQ›ƒúËOE𩹎šÅbŽÄõŹEÈïpâ:/¾øâl òCcË–-+[rãÆÝê"œÛ\¨-Äs%7Ÿâ9PÉçFg×(?9kÖ¬ì¾ÈŸ3‡«Ik{ì±ì\òµ‚,÷˜”{î–ëÜâ1lذl9€/%™çΡ³KÔs -º+P)®ˆÎX9§vZ:óÌ3ûý¯|èCj\®¯¯O¯¾újU_c„D"ÀV‚Ì¡Çü.\Q—·ß~»j¯)B>®+Aæ‹]q 1R{÷î-Ûy¢ÆÍ… óõéÓ' ï|ýë_O£GnvŸÑ1-'ÂlL,û‹ŽoÿøÇ ÖE‡¶è¾Xª8·˜+ù!È|,:\x몫®j‚̯è™ï­·Þ*è2WŠdåB˜!ºO–F‹àVÔ°¥€_¾¬æßÖÊ?nGäw4 ¥ÑÚ¸k¯#U£xöä‡ ;"ºLFÀ6_k!ÈΓöÎÝrž[q€±8àØœèúæ›o6.GÈ0‹ý[z$Ÿ‹¥„*&ºcåw ,%dÝÊòmÞ¼¹j¯/º·}æ3ŸIÇoñ{ýû÷o”ÌT“âDð”SNiõ»Å¨ëD´rˆà×êÕ« Ö}øÃ.yû‹æ³Ï>[°îüóÏ/œµdÊ”)!¨¾ð mš+ÍWñ1ŠEøó¼óÎkq» &dÇȉû®­slÉ’%Ë“'O®È‹@hÜùçº}ûö²ì;`ùÖ­[×%Ÿå¨Q„.£»d9DÛ'Ÿ|²`])!ÈΓöÎÝrž[„ãù™ãµvíÚ·yå•W Ë#GŽlÒ}´Ò¿¥Gò¹Ð‚@ÅwŽ?þøV·‰P¾×U«SO=µÅ®\ù T°A¿jÝ ÇŒSÒw£kXq´\ÁÕ⎙'žxbÉçu8ï¾ûnAÇÊèvVJ+ÿû8Ì·eË–’çJ)Ý‹çIh­ f¨©©I'œpBÁº;v”|mÑm.?´!¼R»¶GŒgKÏŠö*ˆ­X±"ëŽÙu¤F1W?ùÉO–å<^{íµ4gΜ‚u¥† ;cL:2wËynqÿ·µ`xq×Èâ®’ñ[z$Ÿ‹m! TÌÀ –#°ÒšâEkôºŠâ._Õ„ÌïhVŠâ h[Âw-)î¤6lذ6Ÿ[±âù!¨âŽp­‰ŽmùÞ{ï½²Ö¿8ÌØ‘9Ö–ŽK—.-X.înWnùäÂʲß!C†ûÙÏVŘ”cî–ûÜ"И„ŒÎñ[–†N‘ˆÍ¯õÈ‘#Èo©ç"ÐUB¡Šè’ݵrr¡’–D0¥½¡:OtýÊמnbÍ)½ÿþûÞg„:ºÏ0åëÑ£GYºU)HË—/o\ް֤I“Ú´èzÏ=÷¤íÛ·¬Aƒ¥nݺuÙy¡¶úúúÆåèJ×–@`t§+øE¸/: Fˆîp"ÀÖY!¿÷3f¤Ë.», ·Eˆ/Ân9;wîÌžSŸÿüç+rü®P£–\|ñÅiÍš5»§žz*ÕÖÖ¦#F±1éèÜ­Ô¹ÅïÜÙgŸ]Е±®®® Ëù¢‹ä‘ú-õ\ºŠîJTJUòÃ3ŸûÜçÒ7ޘƗú÷ï߸>:¡E æüóÏO·ÜrK¼©©9¶ÿ?ÇâŽˆí §t†mÛ¶,—«#dq 2ÂGUŽpeqG¿üteôÊ×Ö.¬ëׯO[·nm\Ž Ú¾ð…~GJ„ÇÎ=÷ÜôÍo~³IçÀxNmÞ¼¹"ÇíJ5:œë®»®à™Ý ûÛßvø¾ìȘttîVòÜŠ¿¿bÅŠÆàb} –#°ÛšuwI<ŠÃŒ-;òÜèÊ5*6uêÔ4a„&÷eq—ÃJI9æn%çK(BÆ35žÑùÁØ—wdììßRÏE «¨Q R"±råÊÆåçŸ>{Eð#‚~={öL½zõÊÞãaè8xðà,dR *‡þýû,×××§U«VeçZMºwïžu9Ë-\¸0;Ï“N:©É÷#ô÷ë_ÿ:»žŽŠcìkÆŒYð©9–,^xá…‡ÝgCCCºýöÛÓÎ;×}øÃNW\qÅa·ùèG?šî¾ûîÆåèð;YvÅa²0µG®›^t+͉àØüã¬]t“‹YŒY®žQۜŽ]nÔŠP^óé§ŸÎ^‹û8ºÕmÛ¶­É¼;î¸ãÒ 7ÜP±çFµÕ¨¢Ëà•W^YÐá0î©èÆzíµ×VtLÊ5w+=_Âĉ³es¡ìÑ£Ggûª–ßRÏE ÚuW R"üñÛßþ¶ ¸á·EWà }ä#Iþð‡³ÐOt,†DHå¾ûî˺!–Kt;\H._qg³#᳟ýl?~|Iß@ÐìÙ³SŸ>}Ú}¼¾}û¦/~ñ‹%3êxóÍ7·T=þøã›¬+¥Ógì÷k_ûZ6oJᦠ3æ°×ø¥/})]~ùå©GÍ~§¹°Ot»þúë³}WÃÜh¯èš¯œ\gÍöºì²ËÒˆ#šÔ/‚}ø‹n¹zFhöË_þrš:ujE®-ÂXÅ"¯8âP[Ì˵ÅyUò¹QM5*g­ã¹/BŠÅ] Ë9&åž»•œ/¹g\„Ù›Ý"«é·ôX.ÕOGH bžzê©ôòË/7.Gw¬è]¥'‚¤™;wncèbûöí©®®®I0(‚ùªJ Ö…èôA£gžy&ëÖ\¸#B*ýúõ+ËñBñ¾Š—'‚%ŸùÌg²ó|ñÅ B@9Îºè¢‹ÒØ±ckÛE §µcEh2êA›x O ã˜jŠãnÙ²¥Ùí"xÙRh&ßÉ'ŸœÁV¯^-Ç6çŸ~I5ˆ@Ïç?ÿùôì³Ïfç”ß9/߀²°Zì·µNkq­Ó¦MK£FJ?þxÖíÀ‡­Q„*/½ôÒì¥èè\‰Nl¹k/¥¾ÅÇÈes–,YÒd®wD\g¨–/_žæÍ›—Þ{ï½fçñÙgŸf̘‘Õ3®/®+jçZjg¼ÖD0,Æê¹çžKo¼ñFjhhhö{Q›ŽwÞy%‡·ÚûÜèŒud¾udÑAvëÖ­Y@1'‚ùÉrŽI¹çn%çKNTrp#œqÆËjN„J:Ò*?¢‹W):r¼2µfjM¢FŽ™½Zµ.õ<[;¯SN9%{•C9:‡Å˜E4^åã+Ë©½s%ÂFŒkÖV¯½öZc,Œ=ºlA²üsWks4‚šÍ…5›SÜ•®”®|1ON;í´ìUnm}ntF:ú,ìÈ>b•2:2&•ž»•œ/Œð`5ÿ–vÖs ½ü@ED§´|ÑU«-¢ V¾áÇ+*tÐ’%K –Ï9çœ.qÞÑ¥0_GŘ»~KºAH "ŠƒJëÖ­+yÛè~õâ‹/¬«­­UTè€Ý»w§U«VÜ£ãÆ«úóÞ¸qcA'ÀèÜ×ÞŽ™˜»~Ký–]“ $P§vZÁòÂ… ÓÊ•+[ÝnÏž=iΜ9iõêÕëŽ;î¸4fÌE…¨««Kl\ž0aBª©©©êsŽóýÓŸþT°îÔSOMݺu3 æ®ßR¿¥À1¤F €J5jTêׯ_ÚµkW¶¼oß¾tß}÷eݨÆŸœŽ?þøÔ³gÏ´cÇŽìß–,Y’öïß߸ŸîÝ»§OúÓÙ¾€ö[¶lYÁòäÉ“«ò<çÍ›—=/8Ö¬Y“6mÚTðùùçŸo0Í]¿¥~K€cŒ $P̘={vºë®»R}}}ãúµk×f¯RDǯ+¯¼2qÆ tæ™gfÁ¨è¬7räȪ¼¯"ü¸`Á‚Ã~¡¯aîú-õ[ [!€Š9餓ÒM7Ý”.\˜êêê²NV¥ˆÎVçw^úÈG>’…@€Ž»ôÒK³WWuúé§§n¸!uëÖÍ`š»~Ký–ÇAH ¢† ’®¾úê4cÆŒ´|ùò´yóæ´sçÎÆWtø:ñÄÓ€_cÆŒIÇwœâÁ1¦GécûXzùå—Óû￟ú÷ –N>ùäô| uïÞ]‘ð[ê·8 B¢W¯^Y  %^xaöü–äø/S€ª% T-AH j BUK¨Z‚@Õ„ª– $Pµ!€ª% T-AH j BUK¨Z‚@Õ„ª– $Pµ!€ª% T-AH j BUK¨Z‚@Õ„ª– $Pµ!€ª% T-AH j BUK¨Z‚tàÀ´aÆ´}ûö&Ÿ:t(mܸ1íܹ³ÍûݳgO¶ßýû÷7ûù¾}ûZüŽ‚´mÛ¶tÇw¤ÿøÇiëÖ­ŸÍ›7/Ý~ûíiñâÅmÞïsÏ=—~ö³Ÿ¥Ç¼ÙÏçΛ}¾bÅ ƒÀQK ƒœ¦M›–u†|ôÑG×oÚ´)-\¸0 0 ]tÑEmÞïĉ³÷åË—gûÎ×ÐÐ kjjÒØ±c G-AH€2¸ä’KÒÀÓ믿žÃ#<’<˜fÍš•Û*–µµµiÏž=iÕªUŸ­\¹2íÛ·/ AöîÝÛpÔ„(ƒ:^uÕUÙß=öXzöÙgÓš5kÒ¤I“Ò¨Q£Ú½ß)S¦dïË–-+X_WW—½Ož}ú¤3fthŸgŸ}v²|õÕWÓÞ½{³uû÷ïÏ:BöíÛ7=Zá9ª B”Q!sjkkÓñÇß¡ýE˜rܸq©¡¡!­X±"[¡ÈCNœ81uï.&ÆÑÍ (“¤Ç< 'öë×/­Zµ*ëÜØQS¦LÉÞ—.]š½×ÕÕeïçœsŽ¢sÔ„(“ ¤Í›7§©S§¦Y³feë}ôѬ{cGŒ5* V¾ùæ›Ùþ#`9hР¬ã$í!Ê ŠÏ<óLXœ>}z7n\3fLÚ¶m[š?~ÁwŸþù4wîÜ´sçÎ’öÝ­[·¬ûã¡C‡ÒC=”užœ4i’¢sL„(ƒ9sædÅ3f¤^½ze뮸âŠTSS“-Z”6nÜØøÝL.^¼8­Y³¦äýO™2%{_¿~}öÁH8BtP]]]j9rdš8qbãú¦‹/¾8¼MÇ2dH¶½nK!Ê ¹dN¯^½ÿ~úé§SÏž=ÓÕW_ݦýïÝ»7ë<Ù½{÷‚®“p´ë®ãwÞI«V­J—_~y0`@›¶]¶lYÚ¿Öy²oß¾ŠÉ1CGH€NÒ£Gtà 7¤ &´yÛ_|1{Ÿ|¸BrL„è 9VuW Z BUK¨Z‚@Õ„ª– $Pµ!€ª% T-AH j BUK¨Z‚@Õ„ª– $Pµj” óíÝ»7mݺ5{?tèPêÝ»wþ› $@'Û·o_ãß:ìg€ $Àõõ¯ÏO?úÑR…€Ã„èd½zõÊÞþó—ÓSO­M¿ùÍÊôî»{ >þ?AH€N6xðà4þúô¿ÿ÷ Ùò¿üË…ièо©[·nÙgÀ«Q€Îµ·ôƒ¼8”¾õ­sÓœœu‚ŒdŸ>}òBt²›ož“^ý½4sæèôÃÎJ55ÝÃÝЉ~ò“çÒÔ¥ÚÚþé§?‚„Ö¸C:Éüù«Ó·¿ýx~|ðÁO§#( ´B lذ+Ížý»T_ßþá.LçŸ_«(PAH€ kh8˜… ׮ݑfÍ›n½uº¢@‰!*ì'?y.=ùäiøð~éÞ{¯K55¢]P*w @½ôÒ†ôÝï>•…ï¿ÿú4`@E6„¨ v¥+®øeÚµk_úŸÿóÒ4}úE6„¨/ùá, È¿ù›óÚA ¾÷½ùé\•jkû§ütêÓ§FQ !ÊlþüÕéûß:ÕÔtOwÜqM2ä8Ev„(£mÛêÓÍ7ÏI Ó?ýÓÅiæÌÑŠ  PF³gÿ.­Zµ5MŸ>"ýÝßMSè AH€2ùç~6=òÈk©¶¶ºÿþëS¿~½:H  -Z›¾ûݧ²¿ï¸ãš4|x?E2„è mÛêÓìÙ¿Kõõ é;ß¹8Íœ9ZQ L!: ¡á`ºâŠ_¦U«¶¦ /<=ÝzëtE2„è€þpAZ´hm>¼_º÷ÞëRMØ”“;  /^Ÿ¾ÿý§³¿ï¿ÿú4bÄE2„h‡]»ö¥Ù³—êëÒßýÝ´4}úE „h£††ƒYrÅŠÍé²ËF¦üà2E „h£ÿñ?槇^‘ è“î¸ãšTS#ª•âîhƒ?þqUúádáÇûï¿>1@Q ‚!è286lضoßÞä³C‡¥7¦;w¶y¿{öìÉö»ÿþf?ß·o_‹ŸsìØ¼ywºùæ9©¡á`ú‡¸0Íœ9ZQ Â!è2¶mÛ–î¸ãŽôãÿ8mݺµà³yóæ¥Ûo¿=-^¼¸Íû}î¹çÒÏ~ö³ôøã7ûùܹs³ÏW¬XaŽq·ÜòhZ»vGš>}Dú§ºXA BÐe <8M›6-ë ùè£6®ß´iSZ¸pa0`@ºè¢‹Ú¼ß‰'fïË—/Ïö¯¡¡! @ÖÔÔ¤±cÇ„cØ÷¾7?=ð@]2ä¸tï½×¥>}j: $]Ê%—\’˜^ýõ,¸yä‘tðàÁ4kÖ¬,°ØV°¬­­M{öìI«V­*ølåÊ•iß¾}Y²wïÞà5þêôýï?ýýÓŸÎú󥿢@'„ K‰ ãUW]•ýýØc¥gŸ}6­Y³&Mš4)5ªÝû2eJö¾lÙ²‚õuuuÙûäÉ“ÿµm[}úò—N Ó­·NO×_?AQ  BÐåDà1‚;wîLO<ñDêÓ§Oš1cF‡öyöÙgg!ËW_}5íÝ»7[·ÿþ¬#dß¾}ÓèÑ£þ!ÈÕ«·¥éÓG¤ï|çbN& @—AÈœÚÚÚtüñÇwh¦7n\jhhH+V¬ÈÖE(2Â'NLÝ»‹â‹þÏÿY”~xE>¼_ºÿþëSMyÍ]@—sàÀôøãgáÄ~ýú¥U«Ve;jÊ”)ÙûÒ¥K³÷ºººìýœsÎQôcЂo§üÇ'³¿ïºë“Yè|‚t9 ,H›7oNS§NM³fÍÊÖ=úè£Y÷ÆŽ5jT¬|óÍ7³ýGÀrРAYÇIŽ-›7ïN7Üð›T_ßþæoÎO3gŽV8B!èR" øÌ3ÏdÅéÓ§§qãÆ¥1cƤmÛ¶¥ùóç|÷ùçŸOsçÎM;wî,ißݺu˺?:t(=ôÐCYçÉI“&)ú1èæ›ç¤ v¥óϯM?øÁe G $]Êœ9s²€âŒ3R¯^½²uW\qEª©©I‹-J7nlün&/^œÖ¬YSòþ§L™’½¯_¿>{`$Ç–Ûo_œ~xE0 Oº÷ÞëRŸ>5ŠG $]F]]]j9rdš8qbãú¦‹/¾8¼Ÿ¢@„à¨sÒI'e¯ö¸úꫳ÷áÇ+ä1`óæÝ鳟ýmª¯oHßùÎÅé²ËF* TAHÈ#yl‰äªU[Óôéÿ—½ûΪ>óþ! ˜ÕQS&:(«A²Sª¢éŒSPC‡qdÆÎ)çÔ=:§ÚÝζ=§L§G=9ã;ÇîèupÇîÐY<­j•­€T)¢ÒÊHt¢Fec #D’å÷ë& ÊŸ É›Ïçœë½ï½yßÜünîë“ûò½Ï¸¸é¦ZýÐPC F·Þº*V®|5*+ËbÑ¢«£¨HÜ ú#g&0è¤ä-·Ôçåûî›UU'è§!€A¥µuGÌŸÿ`tttÆ_ýÕ¥1{öƒý˜ $Ð'öîÝ---ñÎ;ï¼o[wwwlÞ¼9¶oß~į»sçÎüº{öì9èöÝ»wàö÷ºá†‡ö}}{\~ùÙñÍo^êÀA?' ô‰¶¶¶X¸paÜqDZuëÖ¶=öØcq÷ÝwÇÚµkøuŸ~úé¸çž{bùòåÝþÈ#äíúZßùÎêXº´1*+Ëâ¾ûæFii‘ýœ $Ð'ÆŒ]tQî ¹lÙ²Þõ[¶l‰'Ÿ|2ÊËËcæÌ™GüºÕÕÕyþüóÏç×Þ_ggg@ŤI“>ðuV®|5,X™—.üLTUì Á  ô™Ë.»,F¯¼òJ.&?üptuuE]]],©°¬ªªŠ;wFSSÓÛ^~ù娽{wA–””ò5ÚÚ:âºë–FggW|á çïÛ—I ‚@ŸIAÇ«®º*/?ú裱zõêØ¸qcL:5ÆÔ¯[SS“çëׯ?`}CCCžO›6ퟟB›6m‹K.93n¿}¶ˆ $ЧRà1·oß+V¬ˆÒÒÒ˜5kÖ1½æ¹çž›C–/¾øbìÚµ+¯Û³gOî9bĈ˜0aÂ!Ÿ{ë­«béÒÆ¨¨ ~fßþ9H0€B}.!{TUUÅI'tL¯—”“'OŽÎÎÎhllÌëR(2…!«««cèЃG¥V­z=n¹¥>/ßwßܘ2¥ÂÁFèS{÷îåË—çpbYYY455åÎǪ¦¦&ÏŸ{î¹}zÔÕÕåuË–-ËÝÅøñãs°òµ×^˯Ÿ–£GÎ'æºë–FKK{\rÉ™qÓMµ P‚@ŸIÅ'žx"kkkcòäÉ1qâÄhkk‹úúú¾ö™gž‰Gy$¶oß~X¯=dÈÜý±»»;–,Y’;ON:õ _{ë­«båÊW÷íGq,^|M”–980@ B}桇ÊÅY³fEqqq^wÅWDQQQ¬Y³&6oÞÜûµ)0¹víÚØ¸qãa¿~MMMž777çy F¾×ÚµÍqË-õyù‡?ü㨪:ÙL€‚¶k×®xóÍ7cÆ ñÚk¯å;Çl{ IDAT]ZGßkhhȡƳÏ>;ª««{×5*.½ôÒèêêŠ+Vô®O“Ñ£Gö÷8õÔSãŒ3ÎÈËUUUï{n[[G|ö³DGGg|íkÅìÙàôs `¥Àc >öî’;wÆo¼cÇŽÒÒRƒÔ‡RøqâĉQRRò¾m3gÎŒéӧǰaÃòã˜|÷Ýwsh²²²òˆ¾OEEE~þ{»AvvvÅœ9ÿ;šš¶Æùç¿ýÛË(:BP°Þzë­B=º»»ó6úÞÁB=Š‹‹{ƒ?þx ><æÌ™sD¯ŸÂ­©óäСCè:™|ë[?‹U«^ŠŠ‘±dɵQT$>…@GH ÖîÝ»{—ß~{WüÏÿùBLœX55qÖYBr'Ê›o¾MMMqå•WFyyù=wýúõ±gÏž˜ûlžO›6­w]KK{ÌŸÿ`tvvÅW¾2#®¹æƒ D€‚U\\·ßþ̾i]ìÝÛ×]|ñÇâÙg;D>üðKyêQQ12"Ç+O|blÔÖŽËËô­ÓN;-OGcΜ9y^YYÙ»nÞ¼ä0d:v·Ýöi F€‚ÔÖÖ7Üð³X¶¬)?þÚ×þã¾ÇçÆ°aC¢«+bÛ¶âxùåwâ—¿lŽ•+_ÆÆÖhmÝq@02)--Ê»Ë.S¦Tô%91ö@&ßùÎêXµêõ(//E‹®ÖÑ $§¾~CÜpÃCÑÔ´5ÆŒÿý¿×ÆÅŸÝÝݹKä˜1c¢´´4>ñ‰3âÚk«{Ÿ—¾¾¡aK<òÈK±n]K^îèè̯—¦••e9)yb¥Ðê‚+ór AN˜0Ú @„ `tvvåßúÖÏòrmí¸«ª:ù°žŸ‚tiš;wJ~œB6´Åš5›â©§6宑i¹¥¥=Oû‡#S2…"§O¯ÊÁÈ´,yü¤ŽŸ×]·4ç›nªººI ” $¡½}wÌ›÷ƒøéO›¢¬¬8¾ùÍKã+_™¥¥G¡IÏMÆ4}þó5yݡ‘i]šÒ÷ï±82…2Órê&ɱIáÇ‚lmÝ—_~v|ã—(`‚ x«V½óç?˜ƒˆ)|¸dɵQSSy\¾×ц#o¹å7ÏOû—:FNž\!y”n¹¥>–.mŒŠŠ‘qß}s)ì ôÎp´ï~wM|ë[?Ë!¯½¶:î¸ãÊû(}X8òÅŒÜ?™ìŽL¡ÈŽL!É4 ÷\Ã[o]•—/¾&ªªN6(Pà¼0 mÚ´-®»ni¬\ùj ÛoŸ7Þxa^îöGî/… ëë7¼/ù½ï­;àë„#߯¥¥=þðïÎή¸é¦Ú¸üò³0B0à¤ðàüùFSÓÖ\´èê¸ä’3ľ§ýíéÙãp‘)™ž›‘Ó§Wå€å` G¦ðãg?û@C¦Ÿû߸ĉƒ„ $Ê_ÿõãqË-õ97{ö„‚¬¨9 ¦Ã G66¶æ©' ¹8ò²ËÆåpdMMeA†#¿õ­ŸåñHÇzñâk}wLLœí ©`ê˜ÂpEECãöÛgÇ7^˜— Ñ{Ñ9™‘O=õ›Pä¡Â‘) ™B‘…ŽL?çw¿»&//\ø™<6Àà! @¿—Â)™Â••e¹#`mí¸A5)ȘBiê HîŽüùÏ7ä@äºu-½$÷G¦®‘é¹Ó§Wåå&áÈM›¶Åœ9ÿ;ÿ¬ßøÆ%1wî' 2‚ôk©à‚+s.o»íÓQQ1ÒÀġÑ) ™B‘û‡#S˜4Mwß½¶÷¹û‡#S°´?vZ¼îº¥ÑÚº#ïß·¿ý»: B‚ôK©ûcê™Â{eeŹ äµ×V˜ÑpLÓþáÈÔ!2…"Ÿzê7Ý"S'ÉC…#Ó4yrÅ ïù­oý,V®|5w]´èê(*êà $ @¿“Âoóç?˜Ã©cá’%׿9G'SGÅ4}á ççuï G¦@äþáÈ)„ZSS™C‘çž{ZžO˜0ú¸‡Ó>Üz몼œBUU';0H BЯ,X°2¾ûÝ59¨wýõ¿·ß>;‡ñè[ŽLÓ‹/¶övŽ\µêõ<õ(//ÍáÈ4}âcãüóÇöi8²­­#n¸á¡èì슿ú«KãòËÏvÀ`„ _Hݯ»niüô§M9PwÇWÆ7^h`>Bû‡#{´·ï޵k›cݺ–øå/›s8²©iëû:G¦pdêÚ™ž›:GVWŸ–ƒ’f×®]±uëÖ<ïî’’˜7oYþ)yÓMµ r‚œpK—6怭­;r˜nÑ¢«s—AN¼Ôó½áÈZmhØ’’=áÈM›¶õv“ìQQ12ψœ>½*ÏÓãžÎ‘)üØÜÜ]]]½Ï¹ãŽ_ÆêÕ›âôÓOŠÅ‹¯é³.“ÀÀ% À ÓÙÙ ¬Œï~wM^ž;wJÜwßÜÜ]þ«²²,O©ccžpäªU¯Ç¿þkKî ¹aC[~œ¦;ï|:] G¦n‘i:ï¼Sâ·{DŒJÞöoÿövüýßÿk^þ»¿»4-€ $'D ÎÍ›÷ƒ’+--Š… ?ŸûÜy:P‡ G¦®‘)ù‹_¼ž—S×Ïúú yêQR2,&M*õëßÊ¿ô¥©qÑE§T „à#wÿý ñå//Ë¡¸qãÊcñâkbÆŒ*S`R0²®nRžz¤.‘)ùË_6Ç¿üËËñ쳿Ž;:{CÓ§Ÿÿù?O3x@/AHàCuvvƦM›â”SN‰Q£F úý€BÒÕÕ•Ï«mÛ¶ÅÈ‘#ã´ÓN‹²²²A»ñõ¯/»ï^»¯¾ëÊ ﺫnßû~±Á$Rð5MsçN‰ææ)±sçÎ}u@{¬[×oïû˜Æ ‰âb¿ÀoBhÆ ñ“Ÿü$¶nÝ“&MŠÏ~ö³ƒz? P¤àáã?O=õTtttô®2dH\xá…ñû¿ÿû1lذA³|4ZZÚcþücåÊW£´´(n»íÓqãFQÑPƒ3H3&𛛣ªª,Ouuãzk´ „*’V¬X¿úÕ¯bèСƒ~? Ð¼ð ñóŸÿ< >ñ‰Oä¿þzüÛ¿ý[%><~ï÷~oÐìÇßÒ¥qà Ek뎘2¥"–,¹6ÏÜJJJbìØ±ñÖ[oÅîÝ»£»»;w‚LµAii©2AHà î½÷Þxûí·cÆŒQYYK—.Ôû…&… þàþ jjjz×¥ólùòåñä“OÆÓO?ý‘ûË~pü´·ïŽ VÆÝw¯Îή¸æšs⮻ꢢb¤Á!ë CŠÛ"QÐöîÝ---ñÎ;ï¼o[ºCÀæÍ›cûöíGüº;wî̯»gÏžƒnOw ø íœ8阤c³cÇŽCnÛµk×û¶uvvæm騠¾€¾âúê+P_©1€þ^/uÖYñ…/|!fÍš•ÿar_8šúêxì8Ï#>ö±>ìqÞyçõžíííÇýJ6´ÅÌ™ÿwÞùt~¼págâ‡?üc!HŽÈ°}Ó͵µµ‘&(4é`÷ÜsO¾PuuuŒ1¢wÛc=K–,‰áÇç‹dGbõêÕñ£ý(Œ“&Mzßö‡z(–-[§žzjœ~úéD?òꫯÆ÷¾÷½hnn~ßE“ŸÿüçñàƒƶmÛâãÿøÛV­Z<ð@þ:óÌ3 $€ú ú„ëW¨¯@}¥Æú{½”ꟓN:)/·¶¶ÆóÏ?Ÿ;·M:õ¨÷ãhê«ã±àååå1sæÌ#~ÝTt&ébZzíý¥»¢666FQQÑA‹HN¬qãÆåcóú믿﮶éऩ©)ßQe/½ôRž;¦ê+èK®_¡¾õ•(„zi°ÕW0Îóô÷C’3ÅÅÅ'ì=êêêòºeË–å»[‹Ô’<ž¯½öZ~ýT€Ž=:ß‘ƒþ-Ýí$y饗òn“÷~È›þ˜èùƒÂÅõO®_¡¾õ•(„zi°ÕWP(çùŽ;rø0Ý0åSŸúT\xá…'äy½$™ôéF÷Í7ß7Ýt“Ñ  íÚµ+JJJºm÷îÝùîAiJå?þã?æ¢rþüùGô=Ò]9ž}öÙ¸òÊ+ã‚ .0èHgggþ¡çn*‡» õôׯP_€úJôÇzéhmåú8ϼO¿ÿ}º?]¿jllÍȆ†-QZZ·ÝöéøÂ΢"½¹øh¤üc‘a ÐªHLöÿðîñÇáÇǜ9sޏMwæ:tèwå``(**:ªm¨¯ ¯¸~…ú ÔWj  ?ÖKGãhj,ׯÀyà}úàïÓýåúÕ÷¾·.¾üåeÑÞ¾;&L‹]3fT9|ä|‚û¼ùæ›ÑÔÔ”ïxQ^^~DÏ]¿~}ìÙ³'&Ož#FŒ0˜ô9ׯÔW…Zc©¯Ày@ß½çöåûn >.X°2î¼óéüøškΉ… ?³oJ NAHØ'µŸ7o^œsÎ9GüÜÔ2<™6mšà¸pý @}P¨5–ú œçôÝ{n_½ï64lÙ÷ý­QZZ·Ýöé¸ñÆ N¨!û¦î›o¾9nºé&£G¡¥¥%Ï+++ ýŽëWê+õà<ð¾{¸î½÷WññÓܲºú´øáÿ8¦L©0°œP)ÿ¨#$#˜П¹~ ¾P_Îsﻦ³³+¾üåeq÷ÝkóãÔòÛßþÝ(//5¨ô ‚ƒÔÚµÍñÅ/>œç¥¥EqÇWÆõ×ÿŽ _„„î¼óéøú×—GGGgTWŸ?ùɟƸqå€~G`éììŠÏ~öxàòãϾ&w‚,++68ôK‚ƒDSÓÖ‚\»¶9ﺫ.>÷¹ó ýš $À ð﬎[n©ööÝqþùccñâkb„Ñ€~O €µµuÄüùÆÃ¿”þó5¹di©X ƒÿc¨ÆÆÖ˜7ïÑа%ÊËKcÑ¢«£®n’`@„(@ßûÞºøò——E{û ~&&Lm`p! HKK{\wÝÒøéO›òãë¯ÿ¸ýöÙQVVlp! ĺu-qÅßÏaÈ|\¼øš¨«›d`Ð! À½÷þ*þâ/~íí»£¶v\ÜwßÜ7®ÜÀ0à B `)øøÅ/>ßÿþsùñ×¾vQ|ûÛ¿¥¥b#ÿG êë7ÄüùƦMÛ¢²²,-º:.¿ül@A„€î¼óéøú×—GGGg̘QK–\›ÃPh!á=víÚ[·nÍóîîî())‰1cÆä9œh®_¨¯ÔW€óÀûîž=Câºë–Ƽ¿î߸$¾ýíߢ¢¡ €‚$ ï)›››£«««wÝÎ;ã7Þˆ±cÇFii©Aà„qý @} ¾œçÞwüãgã/ÿò©xå•·£ªêä¸ï¾¹qùåg0 š¨?ìç­·Þ: Hì‘¶À‰äú€ú @}8Ï÷ûî½÷¾ú§æä%—œO<ñŸ„ t„„ýìÞ½»wù©§6ç©Ç!C¢¼üeƒèSŸ—]6Î@gõõò¨¯8zû_¿:’m‡«½}w46¶Æ¦MÛ¢µuG´´´Ggg—‡QYY–§ŠŠ‘Q]}Z”—»ë=À@¬¯|>8x¹–ǧ¾zo¥¾‚ÂÐÖÖ–C=fÌ8=.¼ðôƒ¾Ð÷õÕŸýÙ¿ÄÏ~¶)/ÿ×ÿúã¿ý·9QT¤?ƒƒ $Bº÷÷ÿ¯!Cj}¸ ðH!È[n©7 ¾ Öiizä‘—¢¡aKttt¾÷+öMµ Ž@MMe¾óòïýÞÙ1{ö0@ø|pðr-ÔXÀ±8¯7 Àñ7mZE¼ðÂÖøîwgî{ÿ­‚`P„„ýÇÎ;X7}úéy***Š“O>Ù Aù—Ù?ÿùð«­—'@}À‘;Øõ«ý·® Úâž{ÖÆý÷7äåýM˜0:O=în½õ–¸é¦›i¿‡ y7º»Kâ7—¤wì›Ú÷­+Û·®xß¼cß|ä¾uCò¼½û¶íÜ7¹oîC\ú¯7ÞØ–;©¦.ª)P¼n]Kž¾óÕQUur\sÍ9ñ¥/]˜Ï-ú}åóÁÁõ 8þõUê—BR‰ú ÃöíÛcÏž=û畼p|ë«/}ijÌŸ?y_]Uì}€AGö3f̘hnnŽ®®®ÞuéCÎÿò_jbìØ±QZZj €tw×ûpàH!È›nª5 ¾à(ìúU2dȼíôµuÄßüÍãqçO÷v~7®<æÎ³fMˆ3ª¢¼üÀk`·ÞqóÍG_¿¥ý]¸pa\rÉÅQVV>ú󼾤¤$>ùÉOF}ý/₠Ϋ®ºê}Ï]²dI<÷ÜsqõÕWÇÔ©Sý0 ´·ïÎ!ÈÔeõ^ˆ¦¦tWæ5ù¼ûÂÎÏUTŒ4Pý¸¾òù k@ßÕW©K\šÒõ+õ†]»vÓujŽ­¾JRÒû.ƒ‘ $ì'ýã«tÁí­·ÞÊÅa>IŠŠ\„ _ØÿúÕîÝ»£»»;ßé5}Èùaׯî¾{m,X°2‡!“ººIñÕ¯^—\rfÿn‹MMM±yóæ8묳âÌ3ÏÜ·mQ]]õõõñüóÏÇìÙ³cذa½_ßÙÙùúܤI“|Œ²²â|^¥éoÿöòX³fSüÝß­Ž¥Ksòûß.¯¿þúßùHÎ=¿¾òù @ßÖWGzý pžà}>ˆ $¢X,//ÏO>ùdE"ýFÏõ«ÃµiÓ¶¸îº¥±rå«ùñµ×VÇ7¿yiTWŸö‘îw A¦®Ž©»ãþªªªöí㦔œ„ $@zàbæÌŒ––ö¨­ë×ÿy|îsç°ý©¨¨ˆâââ÷­?÷Üs£¨¨(^|ñÅØµkW^·gÏžÜrĈ1a“‚’º²¦óqöì ùüLçiê š $@I!Èë®[íí»søñÿüŸÏEeeY¿Ü×ÒÒÒ˜×ÔÔäùsÏ=—ç ¿ „wÞy(«¨hh,\ø™¹BöÁ IDAT|žvvvåðr:€÷„(›6m‹+®ø~´µuäN·Ýöé¶êïÆeeeñÚk¯Ekkk455ÅèÑ££ªªÊA¥àÝ~ûìÞÎéümjÚjPà=! Dê(×ÒÒµµãr§¹‚L† ’»?vwwÇ’%KbïÞ½1uêT”Aã®»êböì ùü?ÿAï! Pî¾{m¬\ùjTTŒŒE‹®ŽÒÒ¢µÿ555yÞÜÜœç) ƒE -/^|MTV–Åš5›òù ü‚\[[G,X°2/ß~û쨪:¹ßìÛ°aØʩ§žgœqF^®ªªŠÑ£G;° *åå¥ùüMÒùÜںàÀÿ# 0ÀýÍß<žÃ×^[Ÿû\ÿê¤xúé§Ç‚ âOþäO>ôk+**ò\7H«t§)Ï·ÜRo@àÿ„À6lh‹;ï|:/ó›—öË},..Ž¡C?ørô®]»¢¡¡!]uuµË •Î㢢¡q÷Ýk£©i«€„ÐþÇÿx::::£®nRTWŸ6`Žõë×Çž={bâĉ1bÄ–A+ÇsçN‰Îή|~‚Ú¼ç_ýêEúçxöÙgó|Ú´i*ƒ^ÏùœÎÁ®È Lõõbƶ7®<.¹äÌý³Ì™3'Ï+++X½3ªb„ÑÑÔ´5V®|5fÏž`PÔt„ {ìÕ<¿æšs¢¨h`_îMH!Høÿ®½¶ú€ó3AH€ê§?mÊóY³t‹ƒBÓs^§Ž0Ø B @íí»cݺ–¼|þùc ˜ššÊÜé5ç­­; ƒš $ÀÔа%:;»bÊ”Š(//5 P`ÊÊŠ£ºú´¼ÜØØj@Ô! ––ö<7®Ü`@ª¬,Ës!ì! ž`TOP (ùd”——ÇÌ™3øu«««óüùçŸÏ¯½¿ÎÎ΀,**ŠI“&9ƒœ $è²Ë.‹Q£FÅ+¯¼’ƒ‹ÉÃ?]]]QWW—‹G*,«ªªbçÎÑÔÔtÀ¶—_~9vïÞC%%%À ' ÀJAÇ«®º*/?ú裱zõêØ¸qcL:5ÆÔ¯[SS“çëׯ?`}CCCžO›6Íà  À‡KÇ|ܾ}{¬X±"JKKcÖ¬YÇôšçž{nY¾øâ‹±k×®¼nÏž=¹#äˆ#b„ AHO Bö¨ªªŠ“N:é˜^/…)'OžÑØØ˜×¥Pd CVWWÇСþd@€Ã°wïÞX¾|y'–••ESSSîÜx¬jjjòü¹çžË󆆆JE†€CIálj'FIIÉû¶Íœ93¦OŸÆ ËS`òÝwßÍ¡ÉÊÊÊ#ú>ùùºAÀ±yóÍ7cáÂ…qñÅGYYY<úè£y}úù 7Ü· 2$þüÏÿü€›<öØcñ‹_ü"ßèàSŸú” _Ñ€t°dâââÞ äã?Ç9sæÑëïÚµ+wª:tè]'€£×ÔÔ”CguV\vÙe1eÊ”3fL\tÑE¹3ä²eËz¿vË–-ñä“OFyyy¾Ñô7:BpÌR÷©¸¸òÊ+sˆâH¬_¿>öìÙ“'OŽ#FLè›7oŽ©S§ÆÕW_}ÀúŠ|þùçã•W^ÉósÏ=7~øáèêꊺºº(*rÉ€þGGHŽYê 9oÞ¼¸à‚ Žø¹Ï>ûlžO›6Í@@9å”Sr°ñ½RÐñª«®ÊË©cäêÕ«cãÆ949~üx@¿äöÞ³ÓN;-OGcΜ9y^YYi  TTTDqqñA·¥Àc >¦®Ì+V¬ˆÒÒÒ˜5k–A ßÒ€* … ࣕ‚=ªªªâ¤“N2(ô[‚ƒÈÞ½{cùòå1tèÐ(++‹¦¦¦xùå— ý– $JúGZ/½ôRlÙ²Å`¨¯ÔWGaÕªUÑÚÚÓ§Oººº¼nÙ²e±gÏ€~I’eóæÍ±xñâ|ÇzÔWê«#“O<ñDîY[[“'Oމ'F[[[Ô××;€ôK‚ƒÄC=”»VΚ5+Š‹‹óº+®¸"ŠŠŠbÍš59È ý $@6lØóý544ÄÆãì³ÏŽêêêÞõ£FŠK/½4ºººbÅŠ€~§È†ÓO?=,X;<¾W ?Nœ81JJJÞ·mæÌ™1}úôƒ(àD„d@Kw©óÍ7cçÎñ±},N:é$ƒ ¾ÔõUqqñ!·,y8Ï€I’kÆ ñ£ý(ÚÛÛ{×ÕÖÖÆe—]fpÔWê+(C QsssüÓ?ýSŒ3&êêêâÒK/aÆE}}}¼üòË@} ¾€¡#$ÒÎ;cúôé1{öìÞu%%%±bÅŠxöÙgcâĉ @} ¾€ #$RUUÕÿˆ,™4iRž¿óÎ;@} ¾€!É€”îžÿ^'tRžïرè¯ÔWP !)œ_æ¡~ÔWê+(4þå ÐoúÒ®]»bëÖ­yÞÝÝ%%%1f̘<€#¥#$}&…›››cÇŽ±wïÞèêêŠ;wÆo¼€#& @Ÿyë­·røñ½RgÈ´ Ž” $ʰaØðË
    SRRcÇŽ#FİaÃr(²´´4¯Ks8RE†€¾Ô†€¾ #$Ðo Bý– $Ðo Bý– $Ðo Bý– $Ðo Bý– $Ðo Bý– $Ðo Bý– $Ðo Bý– $Ðo Bý– $Ðo BÀÿeïN äªÊ<€_1"KDD–È"AA²o¢eGAAadvdßWdGQeß‘€À  D`X#$°˜ù®S}šN/µ¼ªzÕõûÓ§“ú½zïû÷}ﻀÒÒ ”–FH ´4B¥¥(-@ii„JK#$PZ!€ÒÒ ”–FH ´4B¥¥(-@ii„JK#$PZ!€ÒÒ ÐFŒøçðî{ï½oc0¬i„è@£FÌŸ'N|ÕÆ€ajÒ¤W?t¼@·Ò ÐfŸ}¦üyòä)6 S•Fgt;¨Ò5aÂË6 S•FçJã3t+h̘9ÓˆÓ¥‡~>½üò[6 3S¦¼“Æ.ÿy‘Ef·Aèj!:Ðȑӧ%—•ÿ|÷ÝÏØ 0ÌÜwßä4uêûù8·"$ÝN#$@‡Zk­…òçë®{ÌÆ€a¦r\¯±Æ‚6]O#$@‡Z}õ6H]|ñƒyå8`ø¸ðÂñ:Π›i„èPcÇŽN£GÏš&Lx9ÝvÛS6 wÞ91=öØ‹iÞyg±"$$mãÍŸ;ÊñÇ÷ˆ.倫çl§–O3Ì0"]yå#iüøçlèpq_zéù2Žo@#$@G=zÖ´óÎÿl–:ì°[mèpqOú~Ú~ûeÓB Ífƒ@Ò Ðñöß•4ë¬3¤ /ŸÎ?ÿ~:TÃñÇó޵Aàÿi„èpÑ4uÄkä?ï¾ûµiòä)6 t˜—_~+í·ßùÏq<Ï>ûL6 ü?ÃÀöÛ/›ÆŽžþ´Ùf§·Þšj£@‡˜:õý|ÜN˜ðrZqÅyÓ6Û,m£@/!†‰_þr£4jÔÈ4nÜ„´í¶—çæ* üvØáÊtíµåã7Žã#\¾€Þ\I&æw–tÍ5[¤‘#§OçŸÚ{ïë5CBÉí¾ûµéÌ3ïI3Ì0"]qÅæi¡…f³Q ÃÈ’KŽJçœóÍÜ yâ‰w¦­·¾4½õÖTJ&š”cåÖ8NcÈ8n—]vnú1Â&^6ÞxÑü9š ceȉ_M\°q5j¤%ðòËo¥Í6»8]{ícy%Èh‚üÎwÆØ00ÃP4C.´ÐliíµÏOãÆMH‹/þ³t k¥-¶ø¢mtá…ãÓÞ{_Ÿ”£9ùŠ+6·$ A#$À0µä’£Ò]w}?m¹åos3d|¾êªGÒþû¯’ÆŒ™Ó‚?þ¹tØa·æFȰâŠó¦_þr£Ü° N#$À06ï¼³¤›oÞ*vÚÝi¿ýnÌMXñ±ÞzŸO{îù¥ôå/61 MrçÓqÇÝ‘.½ôá4uêûiÖYgHG±FÚf›¥{P%]`ûí—MßùΘtðÁãÒ©§þ)]yå#ùcôèYÓÆ/šÖ\s¡¼BÝÈ‘ÓÛXЀ)SÞÉ«?Æê«_ü`zøáçó×£é1ŽÃ›FiC@ 4BÂn¾yBúàƒq6 CãÆM°ä+@¾è*±Ý '¬•vÝuÅôÓŸþ)7iM˜ðr:öØ;òGXd‘Ùssd4jÅj’ý¨ë`(Ï>;%K“'OÉo½5µç{qE³ñvÛ-›/ÊÏøU÷0–C#$ á–[&ää+€á"9æëùãÆŸÈùìÚkK÷Ý797qUV°ê3fÌœi­µJ«®::ŽÕ èƯÊG#$ `µÕF§|d¬ ]`ìØÑ6€|ÈW]k5̇òÕ4eÊ;iüøçòªvÏ?ÿFš8ñUª0ûì3å•ãs¬úŸè<Ưº—± (?0€˜©;>¯ºÅȑӧWœ×†º’ñ+€òšÎ&ÊJ#$PZ!€ÒÒ ”ÖGÿïã ±cǦøšc¿ýöK÷ÜsOZyå•mŒ*½ýöÛéšk®Iï¼óNšk®¹lÛ³£]}õÕé˜cŽIË/¿|9r¤  ãÉ#òˆí)ãÃÐ|n»í¶ôú믧9æ˜C®Q‹ÁûG–äÔ]5WÍÑUF(U)sê¤yß}÷¥+®¸"¿Þ{ï½7Í7ß|iæ™gN¿ùÍoÒwÜ‘¿þÄO¤Å[Ì›V’‡ @ãÆ³"$ç…^H>ø`þx衇ң>š^|ñÅRhGydzê©§ì¼<ðÀiƒ 6H{íµWC:*lÛ³ñíÙéÚõ~xï½÷Ògœ‘Î>ûl70¤gŸ}6ÿ¢íµ×¦§Ÿ~Z¾“ñäO¾“ñ@½ï'NL«¬²JÚtÓM;êuË5jq7Ômïï YŽábêÔ©ùš‹.º(]rÉ%éÉ'Ÿìêí!Ǩ»ênùßj.@±bl-j[™3@Ñí­·ÞÊùøã·uÛ·ûu¼ûî»y¶ .¸ ÝxãiòäÉmÛ¼UF•QnðÒK/õ\¸.©–¥Žô÷ÿËP£;m¬o=öHßÿþ÷óǶÛn›î¿ÿþüõÝwß½çëßþö·§¹ÿO’…Êð~‡èt#l:͘1cú@Yf™eÒž{î™6Ûl³–‚~ô£~ÿ„NÈŸ¿óïØympå•W¦}÷Ý7Í?ÿüi„ Mßßíxu£z·IÑï‡j­¹æšiÖYgÍ1f¬èÏyç—ößÿ4iÒ¤}=f݉_v[½‚{+ëO§å;OÆ“ïd<:¿Þ;¿Ê5 'ËÉr²Ý$&ÍÚe—]òS½ÅD 1ËøœsÎÙuÛDŽií9Ö5²Î®¹íª»j.@±–Xb‰<9F4å}üã/ÕïYÍÈh7ß|s¾ü±ÇK믿~ºüòËÛ²ÝÛù:¢éðÐCM'Ÿ|rzùå—{¾y`§vJG}ô€ï…2¾UN•Qn°øâ‹Osÿ×G>ò‘´ÒJ+¥ÿøÇùö±åJS\4ÁÊC²PÙŽ=yˆN§’ޱ2‹T 2ÅŒS1SF|ì¸ãŽ-™íê7ÞHï¿ÿ~¾y¯?qñ9¾K[÷ 2´ÆB -”.½ôÒ–ìïv¼ÇºQ#Û¤È÷C­b–šŒ1{†Àôvúé§ç&Șµ?²Cß_jÿú׿æ lÓM7ݰ¯?ïd<O¾“ñd<èüzïü*×0zê©é°ÃkÉë*âµÈ¨rªŒ t“Þ×EÃA”ÇööØcô­o}«eâ—=C™#»-¯ËC²fÌ.qà 7¤Ë/¿^ÍR!2ÜÍ7ßœþë¿þ+Ïýü£áLTíùo8ä˜fg™"αͪ¹eª»Ýr¬¨1µzꮚ ÐZEÿžÕŽŒ¶Új«åÿÍuŸüä' Ù.õdˆf¼ŽZ·ÉÒK/¶Þzëi¶Øb‹ž×=yòä–l“"^‹Œjl¨š±!®öÛo¿Üã@1ÞÖW5×D›5vÕŒ Ùc}õæu÷µwï˜ëdP+B2lŒ921"ŸègžyæiöÙgŸtÒI'}èÆ´•VZ)“Ï~ö³=_{饗òMx¿úÕ¯z¾8›nºi:묳ò û￞Ūbƒ 6èùs\LŽ®øxž¸ù),ºè¢=ߟ0aBZ~ùåóªN<ð@úÜç>×ó½þð‡éè£Î³€rÈ!5ýü§œrJž-dûí·OÿùŸÿ9Í÷wÚi§ôË_þ2þùé»ßýnÕ°×[o½<¸÷ûßÿþCß;üðÃóL^1 ÝÛ1Ç“g&>ꨣÒ~ðƒ†öí¸qãÒf›mÖ3XûãÀÌC¹çž{ÒrË-—Ö\sÍ\ü{-^×Zk­•g4¨„‚þ»šý]‹xî°ÃùýÔ;pöÞŽCÏ+¦… IDAT=gì—"öw-ÇF³ µ]ªÙ½÷í¨Q£ò¬sñsÅ€t„ÁþÞõ¼'Âo~ó›|®è>+bFÀ ï)¸ÿw~Zxá…Ówޙϖ„Â\Ï7ÑìØû¼6˜ZÎÝͪyõ¾†þÎÍ­ÈwÕd¼jv¯˜Œ×H¾ëԌת|W¦ŒWDî•ñ »Çsâ†ü˜5êλï¾Ûóõgœ1×Ö87Vnº‰sÆk¬‘6Úh£|¢·¸0²øâ‹ç¸ˆÐÈy"¼ð iã7Îõ¬"šN8á„kÖPY¨ÙF®)ÇØÕPy§¿÷N\äjÆ>êy£6º‹ªÛ²œ,'ËÑm*Ç~ܼyg(‘W⦡8?UD½†Å8¦â憨!½ÙZ²Td‘]wÝ5Ÿ—zç´é§Ÿ>í»ï¾Zµ²ÚLTËùo¸ä˜fg™"αE×Ü2ÕÝn»FVÔ˜Z=uWÍh­"ÏjWFë¯Æ7ªžL׌×QTnÿÛ{ŒµUÛ¤Ñ×"£ªflHF†«h‚\pÁÓC=”Ç×*j¹&Ú¬š^d†ìæ±¾zóz·Ý×5ؘëdÐ Qdbö‹/|á iþùçÿÐ÷âæ´+®¸"/%6]bíc=6™ÿùŸÿéY~<.6ÞqÇù‚ã÷¾÷½¼*ÁUW]•/o¾ùæiýõ×O[n¹ešo¾ùÒÏþóó™Ïä°õÅ/~1?N|= Ô,³Ì’>ñ‰Oô¼–({íµW:âˆ#ÒÎ;»îºüõiQ\X`Ðj.ÄE]”‹\l®ˆYac¶ŒXI*~¾ZÄvˆ0}Ûm·å™ â穈BÞ(ð½¶b›…u×]·¡ýz÷Ýwç‹ï_þò—óÏ7iÒ¤öãÏË.»lÕßßÊ ñc›¯¼òÊy5®»šý]‹O<1qÆùý7Ä{,.n÷¾À=ÔsÎ5×\…ìïZŽfj»Ô²⸊2b¶Šxï<ùä“C¾jyOĬ%ñ N„Ò¸ñ$öGÈ“O>9ß@ÿ7ž·¯yç·çü 0!fø Që°jôÜ]tÍ«ç5 unnf¾«&ãUû³Ëxg¼¢ò]§e¼Vå»2e¼"r¯ŒÝ=ž7ïÇù/&Øn»íò÷cÀ?c?f•Œ›¿©µœ'*u,f«ŒÕ¼#WÄ,¤çw^Zgu|î¡ÎaíÈ6rM¹Æ®jyï4k õ¼EîÃFGYN–“åè6ñþ>÷Üsó$qsB4ÁÍ1Çžc¿öµ¯åã¢o^ùÊW¾’o$‰ã¢V½Ó8¯Å17aÌ9çœyûýèGùx[a…úÍFƒ j9ÿ —Óª,ÓÈ9¶èš[¦ºÛ­×ÈS«·îª¹­WÄXX™3Z™2]­ŠØ&·ß~{þͽ·w;¶I=¯EF56TÍØŒ 7qM4VŒ cÆŒéùz£×DËtý¦ÛÇúÍëÝr_{ßÕ&Ù¹NµÒIÇ:çœsòÅÞ˜ã¿ÿû¿süô§?/÷n$¸é¦›rAŒ ±±toÅŠ+®˜‹áe—]–Î>ûìܱ7‹ÅMdqaøúë¯Ï¢sÿûßÿ~Zd‘Eòßc†‰ø¸æškò‰ë­·ž¦`=òÈ#*½Å…àx®xŽ(øb£XǬ1 B\ð¬UtäÇÏ]ùqA´÷¬W_}u^":ž'f¨E¬€7sÅ6Œ×#Äv½B,Ó3ÑÇóWþË*ÿË¿üK戳 G˜¨ˆ°»÷Þ{ç÷@#7“ŲÒÕªæy›±e9YN–ƒ¡E­‹~âÆžhhŒsĶÛn›gÆŽz‹¼7=ÔšWjÇYœ«"sTD“eÜàõèòË/ï·r sA­ç¿á’cÚeª9Ç]sËRw]#kì÷¢zꮚ PÕžóËžÑÊ”éêÑÈ6‰•iN;í´üçÈíÜ&E½ÕØŒ tƒhf|ã7Ò’K.™æ™gžüµ"®‰¶"çÔrNîæ±¾Veu÷µ³sŒn7M@§Š"ycÉßèô‹·1Ûl”Þ*K2Çl}E¡ qóXˆvÙe—ž›È*bFÖÞt‡7A Ì"xU–}Že†;î¸ü¢ÀÅÒÆõÚj«­ò瘸·X :ÄÌõ¨ÄÊì²!–°ŽÙg+d£ˆ÷.üñ½ÊÿkD„—ÞÅ;Tf٨̌Ҍǎ%ž›%nð 1ÓB#Ýßµ­PÔv 8c†ZÂb-ï‰JãRßåÙãf…ÁÞ?•óA5« Ý¡r¾è{ÃZÑçî"k^½¯¡šss3òŒW¾Œ×Ì|×鯈}]¦ŒWd¾“ñ ûÆsbfŰÇ{L3‹xÌl¸úê«§)S¦ä›û[Y;*\âçè}a-.<^rÉ%uÃÚ•mäšò] öÞiÖ>êy›µe9YN–ƒ¡ÅŒÓ·ÜrK¾yáõ×_ÏÇF4ÂűUD^©Ål³ÍÖo戛@B¬@Y˹ ÖóßpÊ1e£îu×5²ÆÞõÔ]5 <ª=çwBF+S¦«U#Û$V؉•bRŒ#lç6)òµ26$£ÃMLðÓŸþ4pÀy%¶£>:¯ˆ÷‹_ü¢çßuM´L×oº}¬¯[³PÛÕ˜]ýï ×Éè6V„¤cE¨‹‹|1ãé‘G™»ÕcJ­xì±Çòç˜!7f ííÿøÇ‡NÞ•"Ð{ÉñzÅ2Ýa ›Ï¾þõ¯ç öë_ÿ:wåDzñŒy#6ÝtÓ¼¬r̆ðꫯæŽÿ¸N£WB@­¢`Æ,¾q‘5f Ž"|à 7ä¯Å¶ÙHb¶†ƒ>8ÿûøsøæ7¿ÙðvŒŸ¡¯9çœ3~á…Jû؃‰0ËNÇ’í±­bæõ×_?/™ÝÊý]豄 Em—«_ÄrèÍzOÌ?ÿüùslûÊJ±!ÎGýÉŠ¹çž»é¿¥rŽ™êy›µe9YN–ƒêĪ‹·ß~{7n\Úo¿ýòìÚ1zÔ¼˜l¢‘¼RW^y%7g>óÌ3y•¿ÿýïùëï¼óNMç‚ZÏÃ)Ç ç1š2×]×È{?ÔSwÕ\€ò¨öœß ­L™®õl“?üáùwh 8÷ÜsÓôÓOß¶mRôk16dlHF†›¨ ÿøÇóXαÇ›'7«(êšh™®ßtûX_·f¡"¶«1»úß®“Ñm4BÒ±bvŒ†_ûÚ×ÒÊ+¯œWˆ³Î:ëôœàÃĉóçXÚ·¿—Å_<­½öÚùÏ•j+üˆ×Ö; õ§ÌBtì÷~Ýõˆe¥cö×XûìùëM9qýÿjq1¸YÝL±ÊEÜœ«_ÄvŒ›>Zh¡<(ïåVíïZŽV(j»´â=±ãŽ;æ™z;ì°|ÞXe•Uò/›±âçˆã¹?•óA½ƒ‚Àðu4ÎwÕ®Sä¹»Þš×ÌúÑŒ|'ãuFÆkf¾ë¤ŒWľ.SÆ+k¾“ñ ³Æs¢öô'.p…Ê ¡­ª•›ûãœ]”ve¹¦3Ç®š½Úµe9YN–ƒÚŒ;6Ï|M§Ÿ~zžU¾ÒÙŒ¼ÒWÔ¸¸ù#fcóÍ7óLöñ|S§N­«ÖzþN9¦¬c4ýîºFÖØû¡žº«æ”[çüNÈh0®QäØNÜ¾á†æÜ«êDnl×6iÆk16dlHF†››nº)›E£Q¬“^ tîoôšh™®ßtûX_·f¡"¶«1»úß®“Ñuã6ÃÁª«®š gœ¬ã$^™5Ì3Ï<¹¹àüóÏrÐ%‚ÑsÏ=—?FÕpH O?ýt¿ßÙpcfŠ(FqÑ2fyÙâÆ¿FDhˆ?oü¹¨eºcûÆؘ•!w hí³Ï>ù{ñšo½õÖ¼‚Ãç?ÿù<Ã@Eq`K-µT?üp:üðÃórÞë®»nþ{-ï½Föw-ÇF§m—f‹ã |îsŸK§žzjþˆš¿õ­o¥“N:iÈ_ðšy ÐYâ<âæ¥Êlïe?w·ê5•ïd<¯Ó²L£ûºl¯SòŒåωsDÔî˜Ù1Îm}UVÔ^`òçÊ…ÃV*Êl³Í–?ـٮl#×t~®iÖ>j×>”åd9Yjïñ¸©áŒ3Îȹ  cÖôzòJ­YêÐCMÇ|žÔ"r\e†é¸Ae‰%–¨yÅÉZÏÃ-Çtc–iwÝu¬õuWÍè<’Ñ:y\£–mµ4&—‹,xÊ)§ä&Ävm“f¾cCƆdT`8‰F©J“ß@j½&Ú 9ÇX_÷f¡F·«1»Öf!yˆN6MÀp«„¸à[™q6T–à†|ŒJP¬,<”Ê…ÜX¶¹¿pÚ»@ôuÄG¤‡z(í²Ë.yÆÜ°ÓN;åYK€Ç…î›o¾9Ù¸(³ ÔÉ_­ 6Ø §PFAk¬±Fþ\ “,ã#ÄîÃÍ`û»^qsÀ/~ñ‹´É&›¤W^y%ÝrË-5=g#û»–c£ÕÛ.ÍØµ:묳òÌ"q®ˆ›KbN™2%]|ñÅýþ2Ú70–m0hŸ˜­jÆgÌ™à’K.)͹{°sm+ëGù®ÖŒ7T‘ñd¼fg™F÷uY3^£¹Wƃîω•oúSùú¢‹.š?ÇE¯P¹ØÛ@Íê³µ†˜ùq óA­Ú•mäšÎÏ5ÍÚGíÚ‡²œ,'ËAí*3hÇìØ•ÈJ^ùãÿXu^©5K]zé¥ùsÔŸJd#j=ÿ ·Ó Y¦YçúvÕ]×ÈZ_wÕ\€ÎÓ)­“Ç5ªÝ&ч&LH‡rHÚyçÛ¶MšýZº9£’QîÎ\Õ^턜c¬¯³óûڋψeÍBòL#$ÃÆØ±csŒ“÷e—]Öóõh0ÇsLz饗¦ùo¿ývÏ ·o¼qþ|ÜqÇ}èß¾ÿþûùk}‹yeií¿üå/Ó<î¿øÅ\Øb(ž£·d1+@úƒ:(Ïò³Ä Qü½·˜Iw‡vH“&Mªj;Äì¯[l±Ezï½÷òì ñÜßýîwûý·µ<ö¾ð…~3÷Q‘ûP–“åd9h\¼Ÿcõ¿X!°¿ã=²Ç +¬Ð3»|=y¥Ö,U9õ~Mqþìg?«ëg¬õüWTŽ©µN6+Ç•e:¡æ–¥î9ÆÙʺ[†š[oÝUs:O;3Z3Ç\š9®QËë¨v›ÄMÖq3yäô}÷Ý7pÀ…AU»Mêy-2ª±!`pµ^mWÎé„ÙÌk–eå¾öæe¡"3bY³øàƒ¹+¾·/}éKyùæüày'|vß}÷Êf™e–|#_\~ê©§òÅËŠ·ÝvÛ\ˆãæ´O|âùë'Ÿ|rºé¦›Ò 'œ_G¥ D€‹p»úê«÷\¼J,%! pˆ@ÑŸZ;dlŸ¸¡..°ÆEØŠµ×^;~úééå—_γÖV }#ÏW6ƒíïj¾aK/½t¾Ñ3¶g̲³2ÇRÙ .¸`~ŽZž³–ýÝW-ÇÆ@yä‘´ß~ûõ¼–_~ùA¿>Ð÷jÙ.Õl“f‹•JâøÉO~2Í÷F÷GÛ-~ù ñ³TÄ91mb&ý8wÄy/α"Rürüä“O¦Yg59²sw£5¯U¯¡¨|jÉxCÕ™²g¼zWÆkmÆkf¾kWÆk4ßÉx`<§¿zõ:.èýþ÷¿Ï畸?nþŽscLÙ(êlÔôJŽsÞ9眓V[mµ|.ýÛßþ–›+—[n¹Âf–×±Øb‹å‹+­´RÚh£rŒçÔëyžvecW­Ï5E=~+öQQûP–“åd9h\“çw^žmú+_ùJαdÌç†ÈEQãz畘!>rAß¼2÷Üs÷ûµf©8W?>¯î™$jÑ\DzâµÕ:1D­ç¿¢rL=u²Y9¦ˆ,Söš[¦º[Ôg«ënjn½uWÍ(V4£UÆÅ*b…’Ê 9EhgF;ôÐCÓO<‘ÿî»ï¾ôoÿöoùÏ /¼pÚgŸ}šžéšõ:jÙ&qƒùÉOæ¯ÿö·¿MG}t:þøãóÙž¢hUfTYâÂqÌ~ÿ'þ}˜k®¹r·ÎÞ"`ÅsÅ…à“N:)϶»×^{õ|? J³¸m×]wÍ_»ð Óí·ßžÖXc´Ùf›õüÛ(Š1SV|D!¼îºëò×£C?D!¯VÜEõŽ;îÈKIôk}ì¸1¶I¬0°ÞzëMÆ+Ú¿ýío7ü|q¡¼²¿ûŠýý}¯šÇ©÷±‡ÚßÕü|ñâæÎ˜ecܸqz¯œrÊ)ù—–Zž³Úý=ÐÏ\í±1X ;þm¼'bÆè¡¾>Ð÷jÙ.ƒm“Áöí`߯õ=5”~üã§UVY%½ùæ›ùæƒÛn»-//Çv¬ÞÖ;ÄFXŽòø%mþùçw2zÄ Íï~÷»ôóŸÿ<ÄMj• j1[Pä’øe»Ö\ÓÌšWÔkhE¾«5ãUS{ËœñêyÜVe¼¢ò]§g¼fç»vd¼FóŒÆsª÷×^{mž±0$ã|âfð¸Ø_‹c²·8¿ÄMúÑxÔQGåsMœ_âfò¸`Øèy"¦Ž‘oL‹.ñzãf¨˜´"~†¸7œU[Ÿê=w™mŒ]µv쪨Çoæ>ªö=[í>lVÝ–åd9YŽn2Ï<óä±£È7ñž¾õÖ[{Ž…¨o‡vXÎ }óJÌž}óÍ7ç¼ÇTܬ½Ûn»å•#—õUK–Š c†öhœŒ•`"ãŹ/nF‰c°–cºÞó_9¦ž¼Ñ¬SD–ifÝ-¢æ–­î6ZsÛQw©¹EŽ©ÕZwÕ\€âÄ„÷ßO&ì­²bJ‘ ]-þOLDÖÛÓO?ógXvÙe?ԀجL׬×QË6‰Ü¢àšk®é÷ñúN\Ò¬ñºz^‹Œjl¨ž±!ètƒ]íO­×D;OÖ[‡Š¾~ÓŽÙÌk–Er_{ù³P+Çì\'ƒúÄU®âbUPèÑa³<ôžµï÷£p4j\˜A©X’8NÚÍ(q‚B¸¿‹Â/¾øbž1!:æ{¶(j1‹ÂÊ+¯œ Iˆ×þꫯæ¢ÙßcƬQ£8Eˆÿ!î†n¨i;Åj gŸ}v.À;î¸ã4߯÷±ßzë­¼ôsefÞ¢hƶïzê}¾)S¦äýíû{®jo–ïïqyìþöw­?_l§í±=ãq*7yÖú«võ3×rlôw¬Äûºº†úú`ß«e» ´M†ú9ú~µï‰XêX©æÜ]tÍ+ú5´2ßÕ’ñûÙËšñyÜVe¼¢òÝpÈx­Èw­ÌxEä;Œç Vïÿþ÷¿çúœcw0q|GžŠ[9þã<çÀ¾u ‘Úß•’ây*ç×ø9" ôþ?Õœ¯Û•mŒ]µnìªÈÇoö>ªö=[M^iFÝ–åd9YŽn“FĬèqž_`úï-nB~ôÑGó9$&oˆsJÜÌ1f̘¼’cY*fŒŽÇŒºTù·q|ö}mµd¢jÎæ˜FòF³rL#Y¦Uu·ˆš[ƺ[oÍmWÝ­·æ1¦VOÝUsZ¯¿{·*£ÕªY™®™¯£™Û¤™ãue}׌ÚícC2*ÐɆº.:˜j¯‰vž¬·5#C¶*G¶#E³YL^’ÄØk_±Š`¬`f1–[díkFjõ}í…Z5fç:Ô&ú­IljÐÒÈ÷ciâøJ˜I­³Í6[þèkÕUWMsÏ=w“&MʳöF¬ö¾°fæ™gî™áµZü¢³? ò¦›nÚï¿©÷±£ ÎgœqÆÿ_=Ï7Ø…ÜÁž«šÇiä±ûÛßµþ|>^x᪆ÞcÕîï¡~æZŽþŽ•Z¾>Ø÷jÙ.m“¡~ξ_í{âñÇïù%k ýúÎŽqÑEåϽg¦è³ìT£šswÑ5¯è×ÐÊ|WKÆìg/kÆ«÷q[™ñŠÊwÃ!ãµ"ßµ2ã‘ïd<0ž3˜Ï|æ3ù£q|÷­÷Õ¹FjG|¿÷êKýµÜ¨ÞêlcìªucWE>~³÷Q5ïÙjóJ3ê¶,'ËÉrt³8ÿ/¾øâUÿû˜db‰%–¨é9jÍRÑTÙhîªçü×hŽ©·N63Ç4’eZUw‹¨¹e¬»õÖÜvÕÝzkncjõÔ]5 õšñ»w«2Z­š•éšù:š¹Mš9^WÖ÷çpͨÝ>6$£l¨ëžƒ©öšè@çÉFêP32d«rd'd "k_3òP«ïkï„,TTFt Š7MÍÅo·ÝvË…åâ‹/®éÿÞsÏ=éšk®IGuÔ43A å׿þuzýõ×Ó:묓>ýéOúØõhõóµZ»¾¡ö7ÅZa…òÍ#W_}uÚf›mÒyç—g ‰åØ7ß|ó —[n¹´æšköüŸ‡~8Ï®3«ô^V¯¨šßŽ<"ãÉw2žŒÈ5Æ®Ê+šù¼eÏ+²œ,'ËÓ¬ZQ¶ÓÍ5WÝ팺«æÈheÌ"Egˆ²dÀfŽ×ɨ2ªŒ Ð:µ•õše5¶Új«´É&›¤|0ÿ}‡vȿ뮻ä!y¨”YHb8°"$4ÙvÛm—ƒÄW\‘vÝuתÿ_,U…gã7®ù9c)éð¯ÿú¯…?v=Zý|­ÖîŸo¨ýM±æ›o¾¼tx,~ÖYg劘¥9– ¥Â{ÏêÇ,ɾÏ>ûØ€2^S2^;òˆŒ'ßÉx2 ×»*w®hæó–=¯Èr²œ,rL³jEÙrL7×\u·3ꮚ £•1‹!Ê’›9^'£Ê¨2*@gèä:TÆk–ƒùæ7¿™8_yå•ü÷©S§æÏñ÷çŸ>¯"ÍgÕ¬ð)ÑÊ,$1|äÿ>>8è ƒÒhk@“¼÷Þ{ùsžV¸÷Þ{ó祖ZÊÆïöwû<÷ÜsiÒ¤Iéµ×^KóÎ;o“±”{_1Kͻロ¹@ÆSó±¯e<¹F­“Wld9h‡xïßqÇyîEYDŽqžWwizÝUsºS«3š a›Ø–Ȩãëó>¤ùYH¢ÓEÿ£FH ”¢ÿq:›(+@ii„JK#$PZ!€ÒÒ ”–FH ´4B¥¥(-@ii„JK#$PZ!€ÒÒ ”–FH ´4B¥¥(-@ii„JK#$PZ!š‰×ü IDAT€ÒÒ ”–FH ´4B¥¥(-@ii„JK#$PZ!€ÒÒ ”–FH ´4B¥¥(-@ii„JK#$PZ!€ÒÒ ”–FH ´4B¥¥(-@ii„JK#$PZ!€ÒÒ ”–FH ´4B¥¥(-@ii„JK#$PZ!€ÒÒ ”–FH ´4B¥¥(-@ii„JK#$PZ!€ÒÒ ”–FH ´4B¥¥(-@ii„JK#$PZ!€ÒÒ ”–FH ´4B¥¥(-@ii„JK#$PZ!€ÒÒ ”–FH ´4B¥¥(-@ii„JK#$PZ!€ÒÒ ”–FH ´4B¥¥(-@ii„JK#$PZ!€ÒÒ ”–FH ´4B¥¥(-@ii„JK#$PZ!€ÒÒ ”–FH ´4B¥¥(-@ii„JK#$PZ!€ÒÒ ”–FH ´4B¥¥(-@ii„JK#$PZ!€ÒÒ ”–FH ´4B¥¥(-@ii„JK#$PZ!€ÒÒ ”–FHþ—½»®ª¼óÅÿBi„¨)F1@n‹JÀ´ƒ X´X©NVo­×®™®±·íj]â²®™þ;s§³ê¬êµ3ÕA[±#u‚ M‘"­¢"F¡…""PäEy ðçy¼É€¢Hð$ù|ÖÚkï³ÏË><çðž³wïÞX·n]sÌ1qì±Ç¶Éößxãxíµ×¢o߾ѽ{÷·=oçαqãÆw¼84‚@AÚ¾}{Ìœ93–.]zÀúêêê¸ð cóæÍqÇwD—.]âÚk¯>}ú4?æ‘G‰'žx"ÆŒûØÇÚdû .Œººº1bDL˜0ámÏ衇bÉ’%qñÅÇСC} p˜ºj ýøÇ?Î!Īªª˜ùd”••ÅèÑ£Ûlûi9yæ™gò{Ø_ccc,[¶,ŠŠŠbàÀ>L8*BÆÖ­;cýú×ó”–›n'{òòöí}nEEi””¼y*Eyù1QZZ••½£¨¨kómàèIáÂU«VÅÉ'ŸW\qEtíúfí‡aÆÅG>ò‘}ûçò|û¼óÎË}ñÅóüÌ3ÏŒ3fÄž={bâĉ9ˆØVÛ/--Ý7^¨ŒÕ«WGCCC 4¨ùù/¼ðBìܹ3† =zôðÀ„Ú…d\¹rS¬]»5OõõëâÅ7ÆêÕ››ÃiJk+ô‰ÁƒËs02…$ *êêòº¦%Ð:R5ÅäœsÎi!6I›¤ ã„ âî»ïŽÙ³gÇk¯½–ŒC‡ÓN;­Í·_SS“ƒ©räþAÈT52>|¸Ž¿ÀgÓ¦íÑа1-Z¿üåÊzL·ß©šc“T½1SuÇTÁ1…Sh1­oªìØ£ÇÁO—øÃ67‡(SÐ2U“L!˦ÛiÛé=¤é`ªªÊšC‘gžyBM¦Ûee%>P8 7¾¹Ï=á„Þó±)ð˜‚)Œ8gΜ}c€’7nÜQÙ~ª@9kÖ¬xþùçcÇŽ¹úã®]»rEÈž={î ðaÀ„Þw)\XW·2~ýë51kVC ¬²c 2¦ a ¦ùi§õÉô>­K!Èxl )¹lÙú<¥dªFÙt;½ÿT­2M3f,?àyé=Û?þäON‰‘#+s8xo©²c’„‡¢)ùæþ·2zõêuT¶ŸB—©ä3Ï<“«H¦ )™Â©Zä[«I-' u)è˜<òR,X°:÷—ÂŒ)è˜BƒçW•çiJÇ÷Kª.YSS‘§·jªÙŒüío×6/§ëw.ÎS’ªUÖÖVå`äøñrx»dܶm[žJKKßõ±»w~8‡9æ˜}ûå†\‘ñôÓO?*ÛOÇ„\²dIBÖ××çõÆ óA@+„Ú\ª ¸hÑšøÙÏ–Åܹ/å€àþSÀ1…?ò‘~yžÂ†)xØ^¤÷ÚÖÜ_ú7¦÷¼y¿ÏáÈôo_»vkÜ{o}ž’øLÿæO}jp®™*[±oŸXëÖ­‹5kÖĉ'žø®7o^¬_¿>>úÑÆ‡>ô¡}ûÙ{cæÌ™qíµ×F÷îÝÛ|û§vZK®X±"¿ÄìÓ§O®L 9AH M¤*‰Ó§/‹ûî«ÏÕÓí&¥¥Å1jÔ)1aÂÀ|k€°£H•-S¸1MMR4UÁüéOŸºº•Í•#o»mQ~üرý㳟“'ÎíÕ!Cbùòå1þü8ãŒ3¢¤äÍ‹ìÝ»wß¾tAôë×/‡SððñÇÏAÄÚÚÚ(..Ε SEȺºº8ÿüó›_ó©§žÊáÆ1cÆÄ±ÇÛ*ÛOºté’«?¦Ç>ðÀ¹BåСC}ˆÐJ!€V“ÂŽ)ôØ~Ü´i{ó}©òa ù5…ÛSÅÇÖ”Ú!MW^Y“o§*‘³g7äöª¯_—çiJí3qâÀøô§‡ä¹P$MS°0o¿ýöFLÃp|õÕWã3ŸùL~܃>˜ƒ‡ãÆË!Èä‚ .ÈÕS`1½NSEǘܲeKœzê©9èØÛoRSS“Ÿ*H6=h‚ÀKÁ½ŸýlY®¹víÖæõ)ð˜‚)YSS¡¡"µMš¾ûÝOäÊ©-zhy®yÿýÏæ)… §L’C‘ãÇÈ•# £K¡ÃÏþó1cÆŒxî¹çâÉ'ŸÌë{õêµoß96õõõ±jÕªèß¿TWW7?÷¸ãŽËU}ôј3gN\~ùåyýž={ò¼OŸ>­²ýýüñqòÉ'ç÷SYYyHÛ $pXÖ¯=î¾{IÜu×âX¼xmóúxüÜçjâ²Ëª£¢¢TCµ@SµÈ¿þë‘9Pš‚¥©},XwÞ¹8OéþÔ¾©¢¤ö £ëÑ£G|úÓŸÎÆ 6D·nݦðãé§Ÿž÷V£GŽsÎ9'?'IÅmÛ¶åÐdEEE«lÿ­ÊËËóvTƒ€Ö% ´È¼y¿{îY’+¦0dRYÙ;ó>ûÙa9¨Ç‘K!Çk®‘§•+75‡NSÕÈo|cnÜtS]›¦Pdª¼ Y×®]sÅŃ9X²Iqqqóòc=Ý»wI“&µêö›ìر#W¨LÝ¿:%pä!€C2cÆò¸å–ÇruÂ&cÇö/|áÃ1qâÀ(--ÖHm¤ªª,¾õ­1ñõ¯j®™>¦*‘#Fô‹¯}mTLž<8ŠŠºj0x‹—_~9ⓟüd”••µÉ6–.]»víŠAƒEÏž=5:´"AHàmßÞ÷Þ[ßùμ\‰0)))ÊU S%šš t¥ã”)Cò”>ˆ¼õÖ…±hÑš¸ä’ÏÕ8ÿæoÎË/–?'àMݺuÛ·¯¼$† ÒfÛxúé§ó|øðáZY—}ÓÞ©S§Æ7Þ¨5€f)ùÕ¯>«Woη++{çêý×#£¬¬DˆVMaÈÛo_ 󺊊ҸùæÇUW}XÀQ²víÚÿ·v¡hM)ÿèÒÀRò†mÕUWŸ« ¦*„¥¥Å¨À¤Ê×_n¨Þÿ³qË-E}ýº¸úêóò7¿9F ŽHh;‚@6oÞïã+_™‹­É· èû·cs’ÂWTÔ5.»¬:O³f5Ä—¿<3‡YS òŸÿya|÷»Ÿˆ±cûk(Únû¦©µµµ‘& óIa¹/~ñ¿âߘ«WoÎÈú§ â¶Û&æj´?é3¼öÚ³bذãé§_ŽgŸ}5¦Mûmüö·k£¦¦"ÊËÑH´ uuu*B@gµiÓö¸á†Gã¶ÛEcãž(--Žo¬¿þ둹º í[ú S5ÏÉ“Çø›t>}Y̘±<®¼²&Wûˆ =„€N&…Søñ–[‹µk·æÀÜ5׌ˆo~sLTVöÖ@LÓç›B‘ßùμøÞ÷ä`äý÷?›ƒ¯é¾’§P¸ü:‘ºº•ñå/ÏŒúúuùö¨Q§Ä÷¿ÿɨ©©Ð8\ªþøÝï~"¾ð…çïÀܹ/ÅW¾2+þéŸÄ~01Æ ‘(HÝöMSkkk#M@Ç´~ýëñWõóø›¿™«@VT”æäÿù?ã¢_¿c5P'’‘ñÃcðàòøÕ¯VÇïÿZÜsÏ’xñÅqî¹'Gii±F `ÔÕÕ© ÝŒËã/ÿrzC–”Å7¿9&®¿þ\·Nî²ËªcòäÁqë­ ã¦›êâî»—ÄôéËâG?šS¦ Ñ@ AHè ¶nÝW_ý`Ü{o}¾=jÔ)1mÚÅQUU¦qÈR06…bS(òŠ+þ3êêVÆ%—ü{Lœ80WÊÊJ4ï»nû¦©µµµ‘& cX´hMŒý¯1þªfûîw?ßûÞø(/?Fãð6½{÷ˆ+¯¬‰ÊÊÞñä“«bñâµñ/ÿò›8÷Ü“ã”S> xßÔÕÕ B@Gó÷??þìÏîM›¶GmmUÌ™ó1vlÿ(*êªqxWþðã/þbx<ÿüú†üÑžÎß›1c>¤qx_B@²víÖ¸âŠÿŒïÿW±gÏÞøú×GÅ~49W„„CUZZþçCóòOü>y䥸å/WÆøñò}p4 B@±`Áê8ÿü‹… ÿUUeñÿñ™øŸÿó#ѵkÃaIÕD?ñ‰Órò7¿y9î¹gIŒÑ/¿àh„€à‡?üM|ö³?õë_ÏUû~þóË£ºú ë¬ìŸÿüÿˆ¥K_iCöéÓ3Î>û$ÀQ! íØöíqõÕÆÍ7ÿ2vîÜßúÖ˜¸ýöIQZZ¬qh5ÅÅÝâÏÿ|h^~üñßÅC-_ܘ«E¦û:»mÛ¶ÅêÕ«cݺuQ\\=zôÈëwïÞ×¥yIIÉÏÙ»wo¾/Í›ßÚÛã7bÆ ùv·noÿ;ÎÎ;ãÕW_}Çû E BihV¯Þ—\òï±`Áê|œ6íâ˜ýú«qhRøqÍš5¹òM A¦©±±1¶lÙ={öŒ¢"õ%8tÿöoÿ/¼ðBTUUå\Îgœ‘Ç”õõõQYY'Ÿ|roþîw¿Ë•‡ –Ÿ—*7NŸ>=>ðÄ”)S¢k×®m²ý´>…!ÿøÇ?ÆÈ‘#ØNz_>ø`iNš4ÉX(h)Ô- íȬY 1nÜ´X·n[ÔÔTÄã>ì«a8jN8¡W\~ù°øÅ/VIJeëcÚ´ßæï¢ï!íA:á|×®]½/­ïÝ[¥cÍ3Ï< ,ÈaÇÏ}îsñÁ~0N<ñÄ2dH ¦åâââ8å”Sr01]ãøãN8!î»ï¾xíµ×r²¼¼¼Í¶_VV/¾øb®ˆÞ¯_¿¶õüóÏçjƒnhª„쪠}¸óÎÅqÑE÷ÆÖ­;ãÊ+kr2U„„£­¼ü˜xòɫ⪫>Û·7ƤI?Žþð7€‚—*AÎ}ðVË–-ËósÎ9çmS±´ôͿۥJ‹&LÈ˳gÏŽùóçǪU«bèСqÚi§µùökjjò|éÒ¥<&…3“áÇû0vAÚoû±¸úêsèì[ßwÜqa”–kÞ7EE]ó÷ðÆkóíôýœ:µNÃBª²˜¤ ï%SðqË–-1gΜ())‰qãÆ•íŸyæ™9Œ™*@îØ±#¯KÕÐ_xá…èÙ³g 0À‡ ´ ‚Pà¾üå™qà FcãžøÁ&ÆÍ7<‡Ð LZ?úÑäü¼é¦ºøË¿œž¿«PˆŠ‹‹ë>x«×^{-ÏS˜ðP¤ d“ÊÊÊèÕ«×QÙ~ ]4(›«H¦Pd CVWW¿­š$@¡òk*Uœ4éÇqë­ £¤¤(~ò“)qÍ5#4 çÊ+kbÚ´‹ó÷ôÎ;çïmúþ@¡éÛ·ïAOôîÒ¥K¾USqÛ¶mïùØÝ»wÇÃ?œÇ¢¥¥¥ÑÐÐ+2­í×ÔÔäù’%Kò¼¾¾>χ æƒÚ AH(@©¢ÞEÝ3f,ÏᲸ,.»¬ZÃP°Ò÷3}OKK‹cÖ¬†üýݺu§†  ôèÑ#úõë—«ætëÖ-Ÿˆž*ä¤ui‡ª¬¬,Ï׬Yóž7o^¬_¿>Î9眘8qb^7sæÌ\•ñhlÿ´ÓNËÌ+Vä÷‘‚˜}úôÉ•)Ú‹nû¦©µµµ‘&àýׂœ9ó…*{ä‘ÏEmm•†¡à Ð'>ùÉÓã§?}6–,y%žzêqñÅgDqq7@Á(**Šc=6Ÿ8~ÜqÇEïÞ½ó:h‰={öIJeËbÆ ¹²bÓ˜rïÞ½±`Á‚|s¦àáþçæ Ž—^ziœxâ‰9¼˜¦T)2…›<õÔS±xñâøà?˜/âÑÛORôT9rÕªUñ‡?ü!6oÞgŸ}vTUUù v¡®®N Éöíñ©Oý$‡ ‹Šºæ {B´'¥qöÙ'å0ä²eëãé§_Ž)S†äï3@G‘Ï=÷\¼úê«ñÌ3ÏäpáK/½³gÏŽ¥K—ÆÀãøã{ï½76mÚ“&MÊÇ$Ubüõ¯ƒ‰ƒÎÕ“ûî»/V®\™ïOÏmí7IIAË-[¶äÛ^xa®”Ф ¤¿0@¹ä’Y³r%È‚?~€F¡ÝIáÝÿú¯?²²’ü}NNS¥S€Ž"UYüüç?ÕÕÕ9\øä“OÆüùóãõ×_±cÇæ€c}}};öïß??®IªL>f̘\µqΜ9ÍëÓí¤OŸ>­²ýý¥PäÉ'Ÿœ—SÐòP¶PHŠ4†/}iF̘±š5«!®¾úÁhlÜ?øÁĸì2Wc§ãIßë;î¸0‡!¿ô¥1cÆrðÿ¤ª—\rIœuÖYm¶§Ÿ~:χ®Áöù[¹ošZ[[iŽžúúu1nܴؾ½1¾õ­1ñ¿ÿ÷Ÿh:¬ššŠèÚµKüâ+âžË•"ûõ;VÃ^¯^½âøãoÓmœtÒI1bĈ8õÔS58ÐîÔÕÕ© ï‡Õ«7ǤI?Ž­[wÆ•WÖÄ7Öj:¼ø½æš9ü{ÑE÷ÆÊ•›4 ÀQPQQ‘'€öJ޲ýC`©JÞ~01ŠŠ§søþ÷?#Fô; ïÆQ8ʾô¥±hÑš¨¨(Ÿÿüò())Ò(t)ô›¾÷UUeQ_¿.÷‡ÆÆ=€w$ GÑm·-Š;ï\¥¥Åñ_ÿõç9 Myù1ùûŸæwß½$n½u¡Fà BÀQ²`ÁêøÊWfååŸüdJŒÑO£ÐiUWŸÓ¦]œ+D~õ«G]ÝJÀA BÀQ°víÖ¸ä’íÛã[ß'Ô(tzãLjo¬ÆÆ=ñgv¬^½Y£ð6‚p\}õƒ9äÕüÞÔ Naá+®øÏŠ€ý B@û»¿›3f,ªª²˜6íâ(*r¸ö÷“ŸL‰ÁƒË£®neî/°?GV  -Z´&nº©./ÿèG“£¼üoQZZœûG ßpã±`Áj@3AHh#[·îŒ‹.º7¶ooŒ¯}TÔÖVix#GVÆ7¿9&/OšôãX¿þu@& mä/ÿrz¬^½9 o¾ùãÞÃÔ©µ1qâÀ‚LýAHh3f,ûï6ÊÊJbÚ´‹£¨È!z8wÜqa”—Ó܇ÀQhe©šÝÕW?˜—S%ÈÊÊÞQEEi|÷»ŸÈË©*äÚµ[5 @'' ­ì«_}8‡·F:%®¹f„ºòÊš¨­­Š­[wÆW¾2Kƒtr‚@‡²{÷îX»vm¼öÚko»oïÞ½ñÊ+¯Ä–-[Züºo¼ñF~Ý]»vôþ;w¾ëýtuu+ãî»—DIIQL›vq94‡#õŸÒÒâ¸÷Þú˜1c¹èÄm:”M›6ÅwÜßÿþ÷cãÆÜ÷È#Äm·Ý‹-jñë.\¸0n¿ýöxøá‡zÿC=”ï_¶l™ klÜ_þòÌ<ÿÚ×FEUU™FÃTYÙ;n¼±6/7õ+€ŽnÇŽñòË/ÇÊ•+cÅŠ±fÍš¼ ³„:”¾}ûƹ瞛+CΜ9³yýºuëâÉ'ŸŒ²²²=zt‹_·ºº:ÏŸyæ™üÚûkllÌÈ¢¢¢8p  »í¶EQ_¿.**JãúëÏÕ p„®»îìˆ\¹rS|ï{ 4С¥Àc >¾þúëù¼õ={öÄo¼øÃbûöíèÔ!ç¼ó΋ãŽ;.^|ñÅ\Lf̘‘'NÌÅ–JËÊÊÊ<ˆlhh8à¾^x!vîÜ™C=zôðtR›6m›nªËËû·c£´´X£À*))ŠüÇñyù–[‹õë_×(@‡µaÆ|Þû[íÝ»7ßЙ BN :N˜0!/Ïž=;æÏŸ«V­Š¡C‡Æi§vد[SS“çK—.=`}}}}ž>\ãtb7ÜðhiuJ\~ù0 ­dÊ”!Q[[•ÃÆßøÆ\ tX©@ÏáÜÐBR <¦àã–-[bΜ9QRRãÆ;¢×<óÌ3sÈòùçŸÏ%Ç“]»våŠ={öŒhx€Nª¡acÜvÛ¢(*êßÿþ'óh=?øÁÄ\òÎ;Dzeë5@'ãÈ Ða¥ d“ÊÊÊèÕ«×½^ S4(cÙ²ey] E¦0duuutíê' ³JUê÷Ä5׌ˆšš ­lðàò¸îº³s?ûêWÖ @‡T\\|X÷tR;@‡´{÷îxøá‡s8±´´4råÆ#USS“çK–,Éóúúú<6l˜Fè¤æÍû}LŸ¾,JK‹ãk_¥A ¤þU^~L̘±<êêVj ÃéÛ·ïA ôtéÒ%ßЙ BÒ¼yóbýúõqÎ9çÄĉóº™3gæêGâ´ÓNËÁÊ+Vä×OË>}ú䊓tN_ùʬ\¥îæ›?••½5´‘‚Lýlÿ~БôèÑ#úõë={öŒnݺåPdIII^—æ™ $Ðᤀâã?ž‹µµµ1hР8ýôÓcÓ¦MQWWwÀcŸzê©x衇bË–-‡ôÚéJ©úãÞ½{ãÈ•'‡ªÑ:©»ï^‹­‰úÄu×­A ]uÕ‡cðàòX¼xmÜyçb t8MaȪªª8õÔS㤓N‚AH zðÁs@qܸqQ\\œ×]pÁQTT ,ˆW^y¥ù±)0¹hÑ¢XµjÕ!¿~MMMž¯Y³&ÏS0€Î馛êò‡û÷ïÕÕÕÍë;î¸3fLìÙ³'æÌ™Ó¼>ÝNúôésÈÛ8þøããä“OÎË•••-z.GªÙа1Wƒœ2eˆ£dòäÁ¹*äÊ•›T…è$Š4Б¤ðãé§ŸžË¿ÕèÑ£ãœsΉnݺåÛ)0¹mÛ¶𬍍hÑvÊËËóóUƒ蜶ooŒo|cn^þîw?¡$E©¿ýã?Ž .¸;n¸áѸì²ê(--Ö0˜#1@‡s°d“âââæ äc=Ý»wI“&µèõwìØ‘+OvíÚõ€ª“t÷Þ[«WoŽêêru:àè?~@ÔÔTÄÚµ[s c„:¥—_~9âüóϲ²²=wéÒ¥±k×®\y²gÏž úÎwæåù×¾6JcÀûä›ß“çÿðó£±qèÀ!N)U…¼ä’K⬳ÎjñsŸ~úé<>|¸†è„fÌXË–­ÊÊÞ1qâ@ ï“Tµªª,÷ÇéÓ—i€Lè”N8á„2dÈa=wÒ¤IñÅ/~1Î8ã Ð ÝrËcyþÅ/Žˆ²² ï“¢¢®ñ¿þ×Ùy¹©J+“ $@ UTTä €ÎgÞ¼ßÇ‚«£¤¤(®»îl ï³ÔS yÑ¢5QW·RƒtP‚pˆîºkqž7…¯€÷×þ¡äù—ßh€JÁúõ¯ÇôéËòòç>W£A @|ö³ÃòüþûŸµk·j€¨HÀ{»ûî%9 9vlÿ¨®>Aƒ@<¸<&N3f,;ï\_ÿú(@AilÜ s`¿iÚ°áõX½zslݺ31“t;=6Mé1iþ^**Js…äý—ËËÉÕËÓí¾}ÿ{9Í«ªÊò2´7‚pîºkqžá ÖP`R¿LAÈÔO¯¿þÜ(*êªQ8ªšÂŽiZ´hM<ÿüúæÛ›6mo³íî_ yåÊM‡ü¼–¬¬ìƒ‘i~ÒI½ó¼iJëíO($‚ðfÍjˆÅ‹×æ“ÂÇ A À¤Š)°±lÙúÜ_Ómh+M¡Çºº•ñë_¯ÉãÄ4½SÇT‰1M)|ØT‘1UjLcËTÁ1Í“ý«;¦u‡DLAÈíÛXNóTi2Í_yåÍåTm2Užlš7Mé}Liiqs(²¦¦"%SUôôþ èãKÀQ' ïá§?}6ÏSÕ¹t;PXRPä‹_ßøÆÜ¸ï¾zAHZU 8Λ÷ûX°`uüìgË¢¾~]î/.ÏS ôær ¦€c[ÙÿµÓvU I¦ ’)™æøÃæ¼¼ÿ”.0¦¹s_:à¹MÕ$GެÌÉ”LËi=´AHx©ªÎôéËòòå—Ó P .»¬:n¸áÑÜ_7mÚ.´ ÀIAÀûï6ìß|l =ÖÖVÅðá1vlÿB<” Ž…"(ß- ÙT=2Méßÿâ‹sÌŒL!ʃU“lª 9bD¿8çœÊŽT=€Ö" ïbÖ¬†|¢wªtãDn(\)€’‚©ZWê·) -‘B~)ü˜ª§ð_ª™¤àc õuJŒ7 ÏÓºŽ,UwLS¿µÒrS5ÉE‹ÖÄï~÷æ!þê¯FÆe—U«>x”¥cš.¿|X¾*D¦ä¯~µ:ïÃ/^›Ã’iJŸYRSS‘‘ãÆ ÈI)è|!à ¶nÝsç¾”—S…9 }hꯩÿ¦`Eyù1 “JÈï|g^Üyçâ<¶K&Oó7çæ0…!í«SUÎ4Ý|óÇóg•³g7D]ÝÊ\12ÝNSS0räÈʼÏ?J¼,Ì Ðñ BÒ¡íÞ½;^}õÕèÙ³g|à8྽{÷ƺuëâ˜cމc=¶E¯ûÆoÄk¯½}ûöîÝ»¿íþ;wÆÆßñ~@?ÏæÍ›óßÿwíÚ'žxbôéÓ§ù¾Ö>nN¸Þ¾½1W«IhÞmûŽ-@ëH}eíÚµûúÞö¨¨¨ˆÞ½{pÿ¡ôó“N:&÷Ù–Hýxüø­þ>ôyÀøêÈÎË8”íÉom ÒÝrËcqë­ ›S¦ ‰¯}mT®*HaK¡ÆTm «¦*ž©bdšR82íã›nûÛ5?þOÿ´þœ«ªÊ4b+¯Ž´Ÿ_ÆWí|_A!„¤CÛ´iSÜqÇÑ¥K—¸öÚkD>òÈ#ñÄOĘ1câcûX‹^wáÂ…QWW#FŒˆ &¼íþ‡z(–,Y_|q :ÔQàvìØ‘wàiž=zôÈ;ó4G?×Ï 0¤ã83f̈§Ÿ~:/76lX\tÑEy¹µüô§Ïæyª4ó^ÛwlŽL:Alîܹ±`Á‚ÖWVVÆ”)SšO;Ô~žúm IüìgËZ„<Ô÷¡ÏÆWGv^Æ¡lÿpkSõÇ›nª‹•+7åÛ©d @¦ª´Oee%yž¦©Sks¸5UŠLãõ†\¶l}ÌšÕ§¯~õá0 O®.™‚‘iLPRÒ9O•míñÕáôsã+ÀøªcŒ¯ã+(ÝöMSkkk#MÐѤ”|cccüîw¿‹ 6äA\’ôÓ§OÏ?ð釾k×®-zÝ”¼O;…?þñ1räÈžŸ¶÷àƒæÖ¤I“¢¨HÞ¸¥ðãš5kòÕ Ò€?Mé3ܲeK¾ƒÏO?×Ï 0¤“:æÍ›gœqF>q묳ÎÊ·LWÖn:I£µ|íksö=w[ÜtÓÇbýúåïº}ÇàÈÜwß}ñÛßþ6÷± .¸ ÷±Ô§Ÿ}öÙxî¹çòíÔ¯µŸ—”t»îZœƒ×]wv«¿}0¾:²ó2eû-ý­Mø‹.º7þùŸæ ‚©:ð\×_nTVööw ÅÅÝbàÀ¾9äšöó—]V«¸à=bõêͱfÍ–üÉO–Æ?üÃüxâ‰ßïÏ¿ž+E¦ê‘ÆW‡7¾:œ~n|_µïñ`|…${}SéðÎ;ï¼xæ™gâÅ_Ìó3Ï<3_ébÏž=1qâÄÃúÁN”–_½zu444Ä Aƒšï{á…r¨nÈ!* ¶i`‘¾ o•‘龓N:I#éçú9€ô÷údܸqÍW´®¨¨ˆ~ô£<®µŽ¤¨Se™t¢tª"ó“Ÿ<ù®Ûwlߊ+bùòå1`À€øÌg>Ó¼>õ©õë×ÇóÏ?Ÿ«Y¤«×j?¯­­Êý7õㆆ¹2Tk¾}0¾:²ó2eû‡ú[Û­[÷ø»¿›·ÜòXÀ§ê7ßüñ¸êªwÚJ€M A¦©éâ)™ªCΞÝмœ¦¯|eVȦñý¥—VïÛ§÷3¾jÁøª­ß‡ñ`|U8ã+¿µ`|…ª«& £K;ˆ¦2¾³gÏŽùóçǪU«òU-N;í´Ã~Ýššš<_ºtéëëëëó|øðá¿H;ïäõ×ã±ÇÖÄ?ÿóÒøÕ¯^‰;v7߇~®ŸÀû/]Ñ:IWµ~7­uÜ`ÆŒåÑØ¸'F:eßkv=¤í;¶‡']Á>9ûì·WnL'u%©·´Ÿ§0d’‚mñ>ôyÀøêÈÎËhñձǞ£Gÿk|ãssòÊ+kâ¹ç®Ë8!ÈÎkäÈʘ:µ6ž|òªxùåëcÚ´‹ów£¢¢4Wýû¿Ÿgõãøãÿ¿øÒ—fÄôéËbûöFã«6èçÆW€ñUû_ù­ã+(dþ§O§vi‡‘~¼çÌ™%%%ùJG"%ògÍš•ó;vìÈ)ø]»våd|Ïž=›w"¦µk·æƒ] ¾O=õÊ¾ïÆ†~lÒ£G·øÿãø}ß“ÁùàXúã˜?Žêçú9¼FŽÏ=÷\<üðÃùoõéDtë.]º¼í±­qÜà—¿\™çŸúÔàCÞ¾c px6nܘç‹/ÎýgÛ¶mËó?þñ-îçŸþôjNý¹©BTk¾}0¾׿Û·ßÚgžéÿ÷çdeeïvk ÁC“~¼üòayJR…ȹs_Š»îZœ«FßvÛ¢<¥J¢S¦ ‰?ýÓþ1qâÀ\YÚøêÈû¹ñ`|Õ¾ÆW~kÁø TFÓ$Iå}{õêuD¯—vBi`˜Ê/[¶,'áÓÎ!íRj¾kWW Iúƒg]ÝÊ|€+ —-[Ÿ¯à{à@ãÍðÇSË—oŠ ֿ馛"‡ Sò¼óª#õsýÞ'tR\uÕUù„‘—^z)V¬X}úô‰É“'ÇÉ'Ÿü¶Çéqƒt‚t’*Bêö[€Ã³yóæ<å•W¢[·no»ÿ„NˆÓO?½Åý<×MæÍû}>>œª»¶æûÐçã«^m¾ýƒýÖ>ýô31mÚÎ}·ß|Téïÿq|²Á{Iãƒ4}ë[cò9dé¢ ÿò/¿ÉË?üáoò”Î ›üñü÷û{î¹'®»îº(--m~Ü‘7H'A¯\¹)ÊˉêêZ´}Ç åz÷öZ\|ñÅѯ_¿CzΡôóÁƒËs¨µk·æ~½n­÷¡ÏÆW§·ùö÷ÿ­íÛ÷Ը袙±fMD¯^ÝãÖ['ä $Ž4VHÓõן›Ï7»÷Þú¸ï¾úX¼xm^NÓ—¾4#‡"/½´:_$%-:óøêh¼ã+Àøê莯üÖ‚ñ´'R;t óæÍ˃¹~ô£ñ¡}(î½÷Þ˜9sf\{íµÑ½{÷Ã~ÝTª8 ÓÕ2Òë§SºjFJêstmÚ´=_Åsöì†V|kÅÇteÏ|L¡Åt¥®4o .¦Ï6lˆ;wÆÞ½{£¸¸8N:©oœqFI|âócRˆ²) y(ÁȦjèç@ë+//‹.º(ãÙgŸßýîwqæ™g6ß¤Ç Òq§dìØþ-Þ¾c ÐrÇw\¬Zµ*Ö­[wÈ'pj??~@Üyçâܯß+y8ïCŸŒ¯Žì¼Œ–Œ¯~ñ‹qýõ·íûÞ'žØ-æÎ½ú=ÛáPUVöÎÈ4­_ÿzÜÿ³9™.ÀŸ–ÓÔô„Ï~vXLœ80_8¥3ޝÚú}_ÆWGo|å·Œ¯ ½„¤ÃkºšEúá®­­Í!·” OIúººº8ÿüó›ûÔSOå˜1câØc}Ï×îÒ¥KNÁÏŸ??xàœØO¥‹i{é ¼)„øÈ#/åÐcªú¸ÅÇ$…ÓžR81ÍËÊJúZ=zôxÏA@ :¦€cš’÷ F&UUeùñMáÈtý8Yº0ÿ§>58.¿|XÁ]4¿-ÇW-éçÆW€ñUᎯüÖ‚ñ´G‚tx>ø`þ¡7n\Þy$\pAN³/X° ÿ Ÿxâ‰y}ÚÑlÙ²%N=õÔ2dÈ!½~*œvkÖ¼y0Tyà¶‘*>6Ó<÷×TLÕS2UgLWéj+‡ŒLaÍteÑ4%‚‘ú9pxÒq€Ûo¿=ªªªbРAÑ«W¯X½zu>á#]ûä“On~lk7˜5«!Ï›Žµdû‰c Ð2 ˆÆòåËã¶ÛnË'b¥«Òïܹ3÷£çŸ>.¼ðÂæ~Ú’~Þµë›^gÏ~±Õ߇>_þy-Ùþ÷¾·`ßôb46FŒñ‰Ot‰Ñ£?âCä¨(--Ž«®úpžÖ®ÝšC©Rdš§ŠÓiúêWÎÿw¸ôÒê¸ì²êw¼`G_µäü+ã+Àøª0ÇW~kÁøJŸ§½„¤C«¯¯Ïeû÷ïÕÕÕÍëÓ`.¥å}ôј3gN\~ùåyýž={ò<íÕñÇŸ†i;©4pKžË;K!ÂôÇ¢*\´hM44lŒÆÆ=ÿýãUÔußθ"ÿi„9ø˜Â‰ïÁHýh;éêÕ£GŽ… ÆÊ•+›×§ãŸüä'›O$iãé«Won>Õ’í7qlZîÒK/ÍW²òÉ'cÞ¼yÍë{ôè‘Oð:å”S«Ÿðƒ]öõçtþíû¦×s…§Öxú<`|udçeêö§N­‹›nªËËŸùÌq1dÈ&¿µ¼oRÕÇTý1Mi\qÿýÏÆ]w-n>O,M_þòÌ?~@|ö³ÃbòäÁïë9mm5¾jéùWÆW€ñUa¯üÖ‚ñ•>O{Öeß´wêÔ©qã7j :¤;väôƒI©÷4èKSúAÿ×ý×¼³¹âŠ+Z´”Öúé§ó ñ¬³ÎÒè‡!_Ô IDAT…›Bƒižnï/]YkÀ€>1jÔ)988vlÿ‚¸rÖ¡:X02­ÛŸ`¤~¼»½{÷ÆÆéšQVVvÐãGzÜ ]¤ó¬³þo \Ï=w]‹·ßı8|¯ÿÿìÝ t•ÕÙöû0B€ˆiL)B„|!µ(Æ%¶T±bÅQú_ÏU>q+»eCÝØµà'ÝÚŠ}±bE mŠSM!ØH#"F i”ˆ!Ø\7}bˆ€„׳þ¿1¦ÏÊÁ$Ì•¹ÖÌšóš÷®]öÙgŸY§N¬k×®Ö¦M›óGmo%%UöÚkÿåkÎõs0æ0¿jø¾Œ£}ÿ»îZiÓ§¿ê·çÏÿ¡õìYÂc-Z%íu B‘……ÛkÞ¯=o C*©ýn:l%,ó«ãÙÅü ó«–Ÿ_ñX 0¿bÌ#’)ÿHEH„ÞÑ&oµO¶X¹r¥µoßÞ.ºè¢z}}=A)±ß¶mÛCÒú8ºo >ê$,Uy<÷Ü> ÌÈHjÑÓ±ŠŠ‘ŒsÐpÚÄÑ£G£~NC× „”´´žÇõý…µ a:wîì­±Æù!½­¤d£ïú!¿éç`Ì`~Õð}GúþuCW^9Ðî»oµh•´¯kêÔáÞŠŠÊì©§ÖÙ‚ë|?˜®júûBÈñãxA€HŸ_Õwÿó+̯Z~~Åc-ÀüŠ10  ðñÇ[qq±'ÛuòE}¬_¿ÞöìÙcýû÷÷=ïXƒAèO·#9øøMF2Î@Ë9ÚºÖjDv/Ö€Ö5Γ’ÚÛŸÿ¼ÑV­Úb7ß<¬Ñ¿c¶/£®{ï]}Hròä3,??ŸÇZDU–¿óÎxÓ! O?]èAHUŠœ6m…7í»é¦a^-2’öÈ5æ8g~-ÿ¸Ëc-Àü ˆD!TRøÒK/µÔûÿUi`ù¿,!¡óq}Ö€Ö¥¼¼ÒN:éÿõÛ;wþß¾V͘€Æ±bÅ&»à‚>'»ãŽL›1#“ÇZ„ÒâÅEöÐCk}/˜~ßþnÇyð÷²ËÒt(KX0æ€ÇZŒy .åI! ­O›#ç éi½Eë0 @oRX[ZU[R¸ ¤äs+.ÞÑèáÆ<4Î<ìÊ+ÿì¡°©S‡‚ä±a£*jee»¼Bäï_àû¿îºk¥·ôôD»þú!6~üŸÇD#Æ<Dïc­æƒ¥¥þ<©ƒDχéï~µà5=½€ù‰T€còMÁÇÚ¡;…õ6ÁǦóMÁÈ¢¢2‚‘õ¤MV’šš@g!“’ÒÝ7NjœSe Zmp?ï¼?øcô˜1)6kV‚¨  Æ5×dxÓÞ¯§žZçÁÈ‚‚R»êªìÚks,;»Ÿ‡"µß+&¦-ˆx 4jŸ³*ÒõƒÊ}ϳšþn¯¨¨j”ï£0dŒÔk§œÒÅß—‘‘ä¯ýÅÅuà΀DJ ÀaÕ'øH¨®åÕ Fé>$ pdA2Z«®aŒë`œZo\ZS±÷™g~BØ QIÅÔ‡!Š\±b“-^\äMÁ "'NÄ>/@ÄÐßàùù[mÕª-¾Y·u=ZuGINîRSáQôÕüsèÐdGê¶¾/ u# À| ÝG“&¥{;Ò}L0à+……ÛýJEH |‚q½aÃv:Z‘G}Ë,Xçy‚¤2¢‚À&¤ySˆcÞ¼|Eêo•éÓ_õ¦Ê©?ûYº—zÔPÍIaDUÙXÁGÝÖa‡£ jéé‰^©Q{–BÔµ1(S(R?ƒöLÕ'ƒj”AÊœœ5Ÿ"ÇŽí猀Vø7]D'‚ч`$Àѽûn™_û÷' „ÍÀ=ýz¤M˜€æ§Mèª)?œÍa@ ‚Ü~ûoyy[ì±ÇÞòêË–{Su,&¯¾:ƒñh ê+Hø÷¿oõç&Ud¬M‡\(\8bD/ëÛ·»‡ ~l® àª(©¦Pc]z}@aM˜¤}Ôº´`/µž_uð@Œ¤r9´<‚@” øˆºFª´´Â¯¬ƒáTqÆ9 eUWï³+¯ü³Wš<ù ›8q…$j÷ß_éUTzh­‡‰x`7íçºþú!Ö ¤h* :*(¸dÉF@깨6<ë¬^xlÎÐc}U)ëþÛÖ¬)ñŸ*[êßwÏ=yÞ¨TUæK.`YY}¨d-„ $RQ_#@´ RÚØ \TQ©ö8´,·´Ñ\›åï¿ ÔcNsà C¼i =òH¾-\Xèû»ÔtøÃ/~q†Œƒƒ hý­ÐãóÏy…âòòÊšÁG…uä×ÔlÔ~h5UcÖÁ+VlòP¤*2—”|î‡¨ÅÆÆø~ký»õùB͇ $:M§O¡5ÝÖûj#´†újH0RØèMô"E°‘+L`3fí ›€–¡ê:Ó§¿êÆ|Õt€ã”‘‘t ýÐfÏ>ß}ô-Eï°™3s½©jÕÕWgXvv?€zÑa…ŸzjïK¯ûüó£¥Úøñ|¿pXé¹SÏ¥jsæ\è!P…"-zÇŸoçÍË÷B þHKëÉ/4õã3]D‚h õ F*©Ó]‚ßAÝ& Z«ŠŠ*++Û囜¨œ„ƵÖ+5Ά$ð -çÚks|¯Ó¯=Ê÷“hÍk¦NîM{ zh­‡4–-+ö¦yö|)¤Á>BÀ‘öÓó‰*"Š­1¢—W?7.5¢«>6„ú@mÖ¬¬šªÌA¥Èà}\Ï·&¤±gš®@ øˆÖèX‚‘ºªÍœI0´n Giƒ—6 S-§ÄÄ8_ÇÔx' -ãÖøÞ'í;¹å–átÐÈ´'KMU"u ½BšÿÜu×J»çž<¯V¥*‘ jðw@¯…)Ì§Ê ÏûÓvT˜oôèßóKïCU™~8ÛúOU#Õ¦LYf×\“a?ûYz¨«f@K ´B‰F€HVZZáW¥„“6rjÝRã=%¥;ÍLAtUÊ‘ûïC(hBú»æöÛGxS0ã±ÇÞò ËÂ……ÞÒÒzÚM7 ó`$c¢OAA©ýþ÷^Rs4ÑÞ‰ÙØ±ý,;»áÇc CUÓë z¾ÕAª©ÔTES‡deõáhŒÇ^ºhy:MCJÄ^ye“_ >"Ò5$©É¾NK! šK°>Ç&/ ¼‚ñ­5z@ó»ûî•V^^icƤø†pÍCcN­¸x‡=ôÐZBn·«®zÁn½õ%›<ù ûÅ/Πb„œ^ûRHOÏzh¿îW ò0„?훾óÎxÓáªÊ¼bÅ&¿­–žžh×_?Äû™×àø‘0Z@EE•Wy\¾¼ØC`zA/òÕ |DØ&øÇŒÌ­ùãJ/Â{n?…L'´4­á 5ál4¬»>hzÚ5wîZ¿­Í᚟*b«ë¬YYÈxðÁ5¾ñÞ{W{Ó>­Ûnáûá¡ý¹zÌW¾´´Âß§ý¸ªþø³Ÿ¥{@K‡~¨•y RTMÒ!Ó§¿j·Ü2Ü®¹&ƒ@$Vp€f 4ô‚APí1?ë×*>ê#zÕúÂìhÁHMþ5VÔîºk¥ŽÆHP5Rã„?@c)+ÛåWfÂ+ß! ùM›¶Â+òj÷ÆÐrtø‹ªQ©iŸÖc½e ¬³eËŠ½©2äÕWgx¥Hög@äÊÉÙèHíÍÕŽ“âÁG¶t€#S0RM'‰‚‘ ¯ZµÅV¬Øä/¸i¼©éý‘ ?Ð4¾Î=·ßÖ¸k¬qÂ)X7ضí»ªªÒßǺ¾q¾gÏ—þö¶mŸ2Πwƒ}ÿû¿ä{©´¹[{A´>Úg5cF¦ýú×£láÂBDj–jãÆ¥úVhòpãœýWм󫺻*Ø£½´ª2XV¶Ëß§PÂ*Pr¤ý´h~ªÆ©¦ýÏ3gæúuîܵ^1’@$ãœùðÍBuž<¶nÝjûö}büòË/í£>²¤¤$‹­™,ê† ø¨pVÝŠ :ê~²23{3!(FŽ?ÀßÖjµƒ‘—jª)I@þÞ÷’|ÆÇÇó8áW{Ý`Û¶/ü}ݺµgÝé8ïÑãà˜ÞºõsÆ94Ã㮬_ÿ©½þúÇÖ¹sŒ]~ùÿ¢ƒ€VNUÃ&Nä­°p»ýæ7yŒTUWµ¬¬>vÅivæ™'˜Gµ­ùÿØÍ7¿ªý¸Û®]W›;÷ï¢+/¯ô©ÈM7 óÇr=®£uÒ¾æÌÌI¾Z<õ<"^½å–áTdfœ3¿÷7 ]|åÓO?=äÉ#°}û.[±"ßÖ¯ÿÜCªøx¸à£&$ \)xEðhZ =y´`¤Æ«š?áøcNÁÈÓN;ц?ÙÎ:ë[þ"{@'gè1à”SN¡sˆµ×÷îÝï×víÚ²n„tœ×Æ8€æyÜ}â‰úuòä$-­§=ùäÅvç?ð0ä#äû¾,µnÝ:Ú¤IÿË~úÓ~~›ù4ïßµ|°Ó{ì{þù÷í‹/öøû´ýŽ;2mĈ^ #ÈÁ@do+.Þa·Þú’åäl´yóò=©0ë¯~5Š@d”ŽsæWÀá„j©ªúêŶ÷ÞûÌž{n“­Zõ±½óÎŽš…ÏÚäkÒqöÙ½}ÂHÉp eÕ F*¬\;©@äÁŠ‘fO>ùOk×®õëo=íòËûùíÚàØTWï³ÒÒ ?”@/ÈUTTyÓÛ‡{¾Ž‹ëà‡†¨R³ÞÖm^|Ñ6Ò\I¯]誹“è”Þà¤Þý}4±±1‡¼>Yûm]õvÝÛà £­ °n0Î {Üýä“/mÙ²-~Põ¤I©<îªwïx»ýö6uêp{≛={¥mÜXn÷ß_`ÿçÿ¬·«¯N³_þró+h¦¿kgÎ\küãÆš½íÙÙý<´®Jˆ\ ;>÷Ü/Øtß}«mÑ¢wüywÁ‚u"£lœ+è|ÒIj þ0¿ÅŽà–,ùÀæÍ+¬y»oß®vÞy§Ù¹çöñ¤6mh½´©OÕYÕôž6*ùâ‹ëìõ×?¶þóß5­[·XB€£ *.+ð¸aÃv[·ëVK¯×¥1m}áL¨Êzÿþ ~[Mú R(Ш×47Ò!jŸ}vðm}¬¤äóšd|lI:œB¯o*©*tÕÛ]»ÆúAº­¹YpxÑbèГ½÷8ÿå/3Ö ™œpB{HI—.è Âimwòä3,+«»l÷Н99›mïÞ}t4£„„N~½ä’¾và ƒ-3s" ´ª"óìÙçÛôé¯z²v RU?µ× áSRRa¿ûÝ{úéÙ5פٔ)ét p¸¿Kèà+:t°/¿üò÷%&v¶E‹.80aèNIa ‚iãÞ¸q©6dHçÿþ÷n?þ/~jFíÇðmÜWeåçŸ/òÓÆ´¡ÿp‚ó:uLϹ 0êíví­ô¸m[…‡&ƒŠ‘›7—{Ó÷Q«M›ó‡ K®9ˆD·ZŠæ0š¯è µO?ÝU󶇫†}4AUì bvðÚ…Z@·L<»wW2WÓϼ„0Eï æcjš‹}“àçÒ\O‡Uèg n+,Éá€HW{}0HÕþ€ðóÚ!HÆ94íã®*˜UâxÜÂ5ÎO?½‡Ýwß»õÖ3¬cÇvŒsh¦ù•Lž<À²³{Ûw¾s¢ÅÆÆÒA!¥ýXóçÿÐn»m„Ý}÷JB¡HUU 2##‰Ž ÂÂíöë_¿f99›¼Òk»vm|Ÿ;ó+àðØ¡ÔÒ£GÛºu«íÛ÷Õ E?ùÉiö­oàžqÞ­[GûÑúØoû›6mç ÀãÓOÚ¢Eï|-œ¨Øtê˜6½÷íÛ½æv°yÿx¡H}¯W^Ùä/ì¨iS¾B˜j¢°À„ ivÉ%E€&£Ð`0ùÇ?Jk‚ßTűvXPs&5u«æ1z[ÁºaÇ–¢ð¦šþ½A3¨bT´ ªYŸÌÏêÒ¿KHµ{ÖÌ9‰)ê®)Ö €ðŽóãxÜÐðq®BŒshÞù•è Aò¸´þøøããìW¿UˆÌÉÙèMHUŽÔ:"O~þV›6mEͬÆö%—œjW_æcœùpx!Z:vìhIIIöé§Ÿú“†’˜'fŒsÂJ›Ü.,´gŸ}ǃU(ÊÊêccÇöó ízÑL•‹“B”jÚ(¯ïÐæû¼¼-¶|y±åænöŸñÞ{W{ÓÏqÙeiŒä…<Ð9вeÅöî»e>R;Rà1¨~­@£®ýû'Ô¼¯)æHM-dkXQ}¥p¨‘ï½÷Õm…D5o ú¯î½h¦Ó®¸bed$µØÏ¦Áøñ¼‰NÀÒÏ©ª‘ HΜ™ëMW(R/èEZ4U>Ô" ª<-ôTuTxoðàÄšÛš›D3õƒÚá¨jdPE3•AIÍáÔdÊ‘€Ö+X7À8𸠀q<î¢5 ‘³feÙƒ®±¹s×Ú¢EïxSòŽ;2mĈ^ì£je´6ýÄvß}«móærŸÖ¦oºi˜Ý|ó0¿ àØ„€(¢À£N•Ò‹*Ú .z!eâÄAvË-Ã[íFt…2ÕfÌÈôƒt*Ö£¾UóBž^äÓ C:K§d€è¦ƒÈ[²d£ñÖ«K¡<-šìÁ¼´´ž–œÜ…EÁzReLõcÝfƒJ‘šs¾ñF‰­X±é°áHÍEÕÿ£G§ø|O_‡û82äª0¤öKiؼyù–›»ù@{Â×ÞT9’ƒå[^QQ™=òH¾ß?A¡íÏSOö¹LJQQ ºzŸ¿ r÷Ý+}Sºh³¿Âªª¨ @¤èÝ;ÞfÏ>ß_ÌSR/æi3ý7.µéÓ_õ“Í®¹&ƒŠˆ" Ü)h·jÕ¿Ö­ö„õT…Ps -þiNÁÂ_ÓÑâ«*>ªt EP=RÕ9u_)¤zpQv³Ní€jVVGr?_§59񎦶`Á:ßG¥µÓK/ý“¯‡*p§}T‘´7,ÒiŸ^NÎF{ì±·üШöêiý”õOàø±+BN/¨L›¶¢¦¤6“ëůìì~Ô B&¤yS òÁ×xõ§)S–ùíûïã'›€ðQxN!ºåË‹}®S·â£ªEkÎ3vl?=êTM“Zž[Õ4 﨩ީ«šîS5r¡«‘ª©ÿOoøŠÖÐn¾y˜‡}ô-DêÒ[o}ÉΟ4)ÝCxÉÉ]è¬&¢½yO=µÎ.,ô¾PU¤@:µ†  áB@Hi¹Ë–ûÛª~tç?ð äa è#5"ôoV…Èÿx¡ÑËæÌ¹Ð ²iáhñ⢚À\eeuÍÇ‚*‚ Ë)4§à#"ƒüÔ4—- !× b¤ ÕDWÝÇ?úQª –Lÿ¡Cño¸aˆ7­­þæ7y¶fM‰=ðÀ›;w­ïS•H­­rlÃUTTù:¦:À_Õ EëŸW_a“'Ÿaññ±tЈB@ÈèUEœ7/ßoëÅ õÂJ$W€<ÚŸ•õß~²™‘zé»ßçÿvUˆTHDµ(÷ûßXnîæšS3EÏéZ¤:4ÙÆŒIñÂ>ωª©¹›š(«@äªU[üª/Ôî¹'Ïã|þwŃü æzÀAãÆ¥zÓ!³ª¹hÑ;ŽTÓúª>ö³Ÿ¥S©°ž´Oë×O?]è}©ƒ]EU9uø«Ö.u +AS i°3BD/²\{mŽ•ùÛ“&¥Û¬YY¾I<šhã¼^XRR¡H5mœøálK€ÖKÕ¬Ÿþà\iiEÍûµ§çñ±cûù ¥£ƒaÕn¾yXÍ|WUAsr6úœwÁ‚uÞ‚ÔüO•"ÃX8éé‰öä“Ûœ9zeȇZ뇑ªéðQ­ÁªJ¤ eíÈ´N©ÊµÃܪµJnš;E tÒÔôé¯Ú½÷®öÛzK¡?.­T S}põÕUE© .X`7Ü0Ä«Cò­‡Ù{ì-_4ª]ùQ8-¼é$ÒÔÔž¿á!XµÙ³Ï·ââ‚TpV'Ù>ñD·ää.~‚í/~q†Ï‹€h§½T3fdÚ¯=Ê.,ôPŸ–פj:hâÄA¾6«uÚhWYYíý³|y±櫵É@FF’÷“ÂÑV hi! ©RÒ¥—þÉòò¶x8@ U1‡*Iióûë¯Oö¨Â¢s箵üü­ö?ÿ3Þz÷ާƒh!:%SÁǧŸ.ôÓ3u˜ƒèùY‹Fªê§$àHT%T‹µj ÓjVZ-BjΧ¦ƒAˆT02!¡3€¨¦ýe <ª•y(ò÷¿/ðkµ¿JMëp ùýèG©Qu¿ú`Ñ¢wì7J<üXQQUó1íAÓ!¾—\2€ul %Ãèˆ\ ?*©0¤*ß<ùäÅ^!_7uêpaîÊ+ÿìÕ!Ï<ówöÌ3?¡¿hfªÜ÷È#ù^É/X8Š‹ëPSÁoĈ^T~D½éTZµÛoás=-ÖêwL·ÕT!|„4»é¦a,L¤¦&Ô<ªÃkŸ}ö_cÓÁ£÷Ü“çM‡j-W¡È¬¬>¡:œ_ëÕZK|þù"?X_-8ÀW´®8vl?ñZ‚¡TAéòËYeeµü^|ñ§T¹ù V¼ýö5vÑEôé,°Çç›â@ÓÑb‘NÎTR h-”ÝrËpŽVh š«Í™s¡­X±É|pŸØªE[5ý¾]}u†Ï Ýæ‡É«Íž}¾¯±)©56Ðÿè£oySR@}ÞYgõЏu^UÀTØqÕª-~Õ!¾µƒ½{Ç{èqô诩ÂZ‚tÚÖôé¯ú 1ÚÀ­0_˜NÛjJññ±ö׿N²)S–Ùܹk=Lª¹t²h\å啾 öÐCkmóær_°xtýõCü„Q ©(ä8fLŠ7Z« ®ÑjÓ¦­ðßÃk®Éð9"í´Më¹j¢uµ%K6ZNÎF+,Ü^³Ö&ZS0R‡”~ï{I~;%¥{«8ŒT?«ª=¾÷Þ<*øXV¶ëÏÑÏ©Ã{êÔ¿7-­'{ð€VŽ š<Á IDAT F¶„”Y³²ìöÛGÐ)õýc8¦­WúÎwâ½?gÎ̵/¾¨òþ¤* §Ð£Â A* ): T¡³qãRY‚!‚¼ë®•þBÐ3ÏüÄ7o£á&MJ÷Ó.½ôO‡TÚÇN'|j®’›»Ù ´`¥ {·Ý6ÂOùZ#-|>ùäÅ>÷S RaH…"Õ²²úØwdúé¶¾NaF­«©**ªj 0ªR£ÞVˆQ-5ªrcCh¿—Ö¢µ6­Ð£ÚàÁ‰þ¾´´žG VˆlŒlˆÓ¦­ðžBÏ=7Á²³ûÑ)h̘ï×ÿx¡÷³^l›=û|:€o Å©™3smÁ‚uþüëÕo¾y˜ß"E5÷ûÕ¯FÙ¬ñP䊛¼éÐ ½ŸÓa€o¦`bzz¢·#Q²²²ºæª G«òT‘Ô´n+€ :„€VNÁ¼ R¡*A‚l:™Lý{ÑE´{ï]m'œÐÁfÌȤc8 -B)0vß}«ýDO-:)0¦Êz:yˆDZ8Õüïšk2ü |Ÿx¢À+Dªº)_ á‚5eUo€ú  ­ØâÅE6}ú«~{þüÚ¸q©tJReHõóUW½àÕ­RSl„4:€Z ›2e™ŸÎ<Ιs!óZx}üñq^ òÆ—Ú²eÅ>7|ä‘|»ÿþ1Ìh! •ÊËÛb—_¾Èª«÷yu¥É“Ï SšªY••í²[o}É~þóž>3³7ˆz……Û=–›»ÙßÖóãwdò<‰ÐR¸÷/™è¿ó Bêªù¹‘ ÿ¦¥õ¤“h&! R…¥K/ý“UVV{Å™ÛoA§4£©S‡ÛG}n<°Æï‡7ßüoëÝ;žŽD%ÍGî¾{¥ÝsOžРCT)Ð9ˆ ûffN²G}«&ùÝïÎó9£ÆB\\: €&FZ ¾Srذd{üñqtJ ˜=û|ËÏßZS™ó¯d±±ü ˆ.zT…äââÓÖn¾y˜Ýyç~!*©Bûĉƒ< yォ=¼pa¡ÍŸÿCËÊêCЄÚÒкL›¶ÂCÉÉ]ìÅJø®…(ì¡þW%È5kJìÖ[_¢SQCU ¯½6ÇÎ9ç Afd$Ùë¯O¶ûïCQMsóY³²|<èÐ’Í›Ëí¼óþà㥢¢Š ‰„*{÷îµÒÒRûì³Ï¾ö±ýû÷Û¶mÛlçÎõþº_~ù¥Ý={ööãUUUGý8À±ÊÍÝl<°ÆCxO>y±%$tf~Õ‚âãcý~Ðý1wîZ[¶¬˜_R@è”Ú™gþÎæÍË÷ç@UI~íµÿò0$˜_á  …ƒŽÔxùîwçyEq„W°¦·{÷n[ë›6m²7Ú®]»¾öùMµ>Xߟ…õDaÐî@›‘™™ij¾¢ èûÛæc£·7D†ÿûßöÈ#ØÚµk---Í:uêTó±W^yÅž{î9kß¾½zê©õúº«W¯¶gŸ}Ö'€ýúõûÚÇ_xá[ºt©tÒIvòÉ'sG0Î8.ÕÕûlìØ§lûö/ìî»Ïµ‰1¿jó«^½ºZ—.mùòb[µj‹Mž|†uèÐŽ_X@()øÙeÏØÖ­;-55Á«#?À‘`~…CµmÛÆ«BŽ—jýëû¶qã§öøãoû\qĈ^t@}üñÇö»ßýÎ×ô´6÷‡?üÁòóó­°°Ð×ìÊËË}½®M›6þùMµ>Xߟ…õD‘.77—ŠÂ¥G6|øp?ÍB±ÀöíÛíõ×_·øøx9rd½¿®&²aÃÿÚµUWW[QQ‘ÅÄÄvRp¬¦LYv`^Qféé‰6uêpæW­h~uóÍÃ|“ûæÍå~?6UvÁ ìÆ—ZeeµÈðöÛ×P’ùŽ©ï{úxÑ:ÜdÚ´vÎ9OXYÙ.: ¤´.÷ôÓO[bb¢]|ñÅöƒüÀN8áûÇ?þaË—/¯ù¼¦Z¬ïÏÂz"€0  tÎ>ûlëÖ­›½÷Þ{>Q“œœÛ·oŸeggû­¾4MNNö0Š‹‹ùØ¿þõ//®I_ÇŽ¹Àq)((µyóòýöœ9¶ªªK̯zøál¿>úè[–Ÿ¿•_Z@h(èÿýï?jË–ûdþüÚ“O^l±±1tó+#ÿùŸñ~;7w³ùÿ¸/vÐ9!¤ £Œ'N´ÓO?Ýoÿô§?õ½ýöÛ¶{÷îšÏmŠõÁúþ,¬'‚BGÁ±cÇúm`¡2Þ~ø¡Oêúöí{Ü_7==ݯëׯ?äý*.ƒ¦óÀq»õÖ—¼z̤Ié6bD/æW­p~¥J7Ü0ÄoSyy[<YX¸ÝRRºÛ›oþ·Wµó+Ÿ Ò|¥¦&xµ÷3Ïü­X±‰Ž™SN9Å+/Ö–””dßúÖ·lÏž=öÁÔ¼¿©Öëû³°ž Ò„Jšjb¸sçN{ùå—-66ÖFÝ ¯9pà@Ÿ„¾ûî»5'thb¨0:uêd)))t<8.99}st||¬Íž}>ó«V<¿ºóÎXBBg,Zô¿¼€ˆ¦ ç÷+-­°ÌÌÞöÚkÿåÁ0¿Bä¥õô0dVV+/¯´‹.ú£-^\DÇ„ˆÖ÷çä“OöëçŸ~Èû›b}°¾? ë‰"AH¡¥‰b@e¼O8á„Oû÷ïoÕÕÕVTtpã’&šü¥¥¥YÛ¶<¤€úSÈiÓVøí dÇüªõίj‡Uƒ*žD¢¹s×Ú¼Ð*+«íšk2ì/™h‰‰qt ó+4’¸¸>®TQ\ãLãíÖÐ1!×±cG¿VUU}íc½>Xߟ…õD‘ŽG)¡´wï^{饗|2gÅÅÅ~RECåÀ×­[ç×  ø Aƒètp\æÍË?0§ØîUcB`~ÕúçW“&¥[FF’mÞ\Îfv@Dºë®•vãK=œ¥€ÿÃg[ll Ãü ,&¦­Í™s¡7™2e™MŸþ*b»víòk—.]yS­Ö÷ga=@$# ”òòò¬¬¬Ì†jÙÙÙþ¾¥K—úi ¡’äšx¾ÿþûþõ5íÞ½»ŸÈP_ Ìœ™ë·U R¥™_EÆüjÖ¬,¿Þ}÷J«¨¨â—1fÌÈ­ biþ1uêp:…ùš˜ªBÞÿ¿­ ²Æ!"Û_|aûöíûÚû?øà¿vëÖí÷7Õú`}ÖD2‚BG²×^{Í'h™™™^¾û´ÓN³òòrËÍÍ=äsß|óM[²d‰íܹó˜¾v›6mü´‹ýû÷ÛsÏ=ç'sÔ.QP ¬;0wÙåÕ³³û1¿Š ùUVV1¢×>¨´G}‹_f@D¸çž¼šCæÏÿ¡ýú×£èæWh&7ß<Ì|œ~¢qH ²•––Ú³Ï>{Hñ­·Þ²Ï?ÿÜì”SN©yS®Ö÷ga=@$# t^xáŸ=Ú:tèàï»à‚ ,&&ÆÖ¬YcÛ¶m«ù\M(óóóíÃ?<比ߺu«_)Ž×C­õëm·hÕÕ ™_Þ-· ?ä~ 5{≂šJsæ\h“'ŸA§0¿B3›4)½¦2¤ÂóæåÓ)ªS§NöÏþÓæÎk/¿ü²`TÐðœsÎ9äs›z}°>?‹°ž R„*………>éëÓ§¥¥¥Õ¼_å¼Gå§\hrN½P9ïcuÒI'Ù·¿ým¿­àõù‹YAAé9KO7.•ùUίt¿©šgqñ[´è~©­Ö²eÅvÕU/Xuõ>»ýövà CèæWh!wܑ鷯½6Çÿ.@äIJJ²Ë/¿Üª««mõêÕ¾¨Š—]v™ 0 æóšc}ðX–ë‰"U ] L49T™ðŽ;~íc#GŽ´¡C‡Z»víümM(¿øâ ŸT&&&Öëû¨D¸þN¿Çë©§ÖùUU[s5HæWG§ûïòËùý9~ü~±­Žûz®RRÕèî¼ót ó+´°32mÛ¶ ¯yå•¶7ßüoKMM c"ŒÖý¦L™bŸ|ò‰uîÜÙÇu5×úà±ü,µ±ž Q@èn’P)ñ`¢¸råJkß¾½]tÑEõúú»wïöS2Ú¶m{È©ÇJU~‰µ Zÿ|‚ùÕ‘)ü˜ÐÙïÏÂÂíürZ•òòJAê:qâ ›?ÿ‡­ú†hÂü ?œíã²¢¢Ê.½ôOVV¶‹Nˆ@mÚ´±ž={5xØÔëƒõùY„õD‘Š]/¢ÒÇlÅÅÅvÞyçY|||½þßõë×Ûž={üÔŒN:Ñ™ Þ.,ôªL ÑÅÆÆ0¿Šàù•Â$A%ȧŸ.ä—Ъ\{mŽåçoõJs ]‚d~…ÖEãRãSjh¼ z5d}°¾XO©Øù *éÔŒK/½ÔÎ<óÌzÿ¿o¿ý¶_LG€ãòì³ïøõ–[†3¿ ÁüJ÷£‚%‹½ãWZ=/éð…¸¸öÌ3?ñ+˜_¡uѸ|ñÅŸz¥ø`Ì :5d}°¾XO©bèÑH%¿ÕŽGPj<11‘Žõ¦ªL¥^ùEùUäϯRRº[zz¢ß·jÆ%ó‹hQee»ìÆ—úí9s.´´´žt ó+´â¹¤*C^yåŸ}Üffö>p¿ÅÑ1­”Öà~þóŸÛ‰'žØ¨_÷xÖ÷ga=@¤¢"$Çä•I8^A5È1cRèŒͯ²²úrÿÐ’¦*-­°ìì~6iR:Âü ­Ü„ i6n\ê!!f´NmÛ¶µ^½zY·nÝ"öga=@Ä>ÓÐ|-:”»âŠAtFˆ\vYš_.,´êê}t Åäälô磄„Î^e@dPõVU‚Ôß ‹Ñ!ÔAšIQQ™ïð`BFF"é鉾q½¤äs¿Ÿh ãO›¶ÂoÏš•eÉÉ]è Bh.©q+·Þú’UTTÑ)ÔBšInîf¿Ž“Bg„Pvv¿CîgšÛ¼yùVX¸ÝRRºÛ¤Iéta&Nd©© ~xŠÆ3ø AHh&Ï?_ä×±cûÑ!4zôÁ€ë³Ï¾Cgš]eeµýæ7y~[Uåbbxéˆ4·³gŸï·ï»o5U!¨…Ý0ÐL Jýš–֓Ρà~U%.šÛ¢EïXIÉç–žžhãÆ¥Ò!@„R•ñŒŒ$+-­°… éþƒ $4ƒ¢¢2ßÌœ˜g©© tHé~íÝ;ÞÊÊv†4»GÉ÷ëm· $á~õ«Q~}ì±·è þƒ1Ð òó·úUUš'„WPrÍš:Ðlô¼“—·Å\ $ùT29¹KÍØ! Y¼ûn™_ƒ Â)¸ƒû€æðÔSëüzõÕC‡N§\ýCÆ7ÑŽ $4ƒ¢¢ƒÁ¸þýèŒ8°ç!÷7Íañâ"¿Nœ8ˆÎBBãYÈœœV]½D=‚Ð ‚`!Ã-55áû€¦VPPj%%Ÿ[FF’¥¤t§C€HNîâãZã;?+ˆz! ‰©‚Kqñ¿M2Ü„TåžÍ›Ë­²²š49U‹“ÌÌÞt2Á¸^²d#ˆz! ‰•–Vx(.11Îââ:Ð!!¦ûW÷³Â¯ºßhjo¼Qâ×sÏíCg!3ztŠ_© AHhree»üšÐ™Îˆññ±~% hyy[üJÕi |‚q­q®ƒ6ˆf!j{÷îµÒÒRûì³Ï¾ö±ýû÷Û¶mÛlçÎõþº_~ù¥Ý={ööãUUUGý8Æ9 ºïðkJJw:# ¤¦&øuóær:ФtØByy¥‡ðU‘@¸è µŠŠ*ÙD½ºaV^^nóçÏ·6mÚØu×]gÝ»µñü•W^±U«VÙ¨Q£ìœsΩ××]»v­åææZFF†;ök_²d‰­[·Î.¾øb;ýôÓ¹#Z¹Ý»wÛŽ;üªà\ÇŽ­G~ãœqh Ú¸,A¥@æWá×Á¯••ÕüòšTaáv¿ê°…˜μc~…0Ò!ª©ñžœÜ…ˆ¬Àá±;¡¦'ûáÇ{Ÿ¥K—Ö¼ûöíöúë¯[||¼9²Þ_7--ͯ6lð¯][uuµYLLŒõë×;!&‰[·nµ]»vù}¹oß>¯øÑGYee%Ä8gœE„Œ ÿY4̯¾ B÷;M¥¤äs¿ÕˆÁü áT•§"$@t`}ŽŒ $Bïì³Ï¶nݺÙ{ï½ç&ÉÉÉñ Avv¶™êKÁ«äädŸPò±ýë_VUUåá(N\hý>ýôSÿ]¨K'gèc`œ3Ρ¬l—_:3¿Š=zt>ä~€9˜_áxUå™[DÖàÈB"ô€;v¬ß^¾|¹­^½Ú>üðC;ýôÓ­oß¾ÇýuÓÓÓýº~ýúCÞ_XXè×ÁƒÓù@a6Ù½{¯½új‰Í›Who¼±Íß>Æ9ãÐXÚµ ÿŸ`G›C1¿ q•—<í³k×X:ƒùB*ßTˆ¬À‘ÅЈ B)¥0ÓË/¿l±±±6zôè}ÍÚ²eËìÝwßõòÓª ·gϯשS'KII¡ã[1mÌÏßj99ëìo+±þóß~ ´kׯ èngŸÝ×Î:«—eg÷³¸¸tãœq@=ýö·¿õ}2ÿÓ×W\ao¼ñÁÜ€¨BQ#HIrr²p úz Yõïßß6lØ`EEE^Na)…¤TE®m[ ®¶&:1]ÁÇ%K6úuÍš«¬¬®ù¸‚ýúÅÛÞ½û­sçÛ¸±üÀïË§ÞæÎ]ë!ÈŒŒ$6,Ù†M¶#zYBBg:–qN§PG‡ìË/¿¬yû—¿ü¥·à¹õ”SN }̘‘k3gæòËšd~¥dqqqTͯ¢sK€èRw}°îÇ š„DTØ»w¯½ôÒKZêܹ³oRE·ÓN;­A_WA(¤Ö­[ç©ÂÂBÿ Aƒèô¦àcnîæšðcaáöC‚11m-3³·yæ·,9¹½ù-ëÒ嫉á—_îµ-[öÙë¯o­ù:ºªRRº[VV=:Å’‰‰qt<ã€ox¾Úúc=lëÖ­¶oß¡ÿÖ6mÚøÇ@ã‰õë§Ÿî¢3˜_!¤>û¬Ò¯:¨ áÇú AHD…¼¼<+++³ïÿûöï|Ç.\hK—.µë®»ÎÚ·oÜ_·oß¾gï¿ÿ¾}¯ºwïî•èмÊÊvYAA©=ÿ|‘‡|T²æÁî?ÁG¿÷½$0›wïÞmŸ~ú©UUUÙþýûý¤ŒSNéaii±vá…©þ9úZª"¹|y±‡!õ½Š‹wx›7/ß?'==Ñ¿‡*FêJ0’q@ ¨$­9KØuìØÑ’’’¾6¿Ò‹pªX ‚0 ÄÍ5Ç(/¯¤3¢h~%šWEÓü*š㛹%@t`ÿAH„ž‚K¯½öš™233}  qª—››kçw^Íç¾ùæ›¶}ûv5jÔ1}mª ªp«W¯¶çž{Î+Ò~úétz3P0qÙ²b{ã’šŠµÅÆÆÔƒ`b|<ÒdñhtâºÂ“j¢ Hyy[ìoÛ\Œ Z vÅÈŒŒ$KNîÂÇ8D©`R»Bu˜Ëü*ìsÕ` @S ^k(**£3¢l~uÊ)§Ð)QB± ‡®DhßGB¡÷ /xpiôèÑŽ’ .¸À«»­Y³ÆN'Ÿ|²¿_Aª;wÚ©§žzÌ_?==ÝR*?-úzh|›7—{•G€Ô ÚAGŒèe©© vî¹}Ž|l úÚÙÙý¼Éá‚‘u+Föîï?—‚‘TŒdœ¢KR¨]±áÜÏ„hjzDôúCuõ>‹‰iK§!ƒñ@´"‰P+,,´?üÐúôéciii5ïïÖ­›Wƒ{õÕWíå—_¶‰'úû÷íÛç×îÝ»øïöcú't’}ûÛßöœüŸÿ UV¶Ë…¯¼²É«=*X¨ }Um1-­§‚lÊàã7©ŒÔϪŸYÿ…#’T˜ó‰' ¼IP1ò{ßKò«‚’`œÂ)˜§”–VÐQ2—­}¿ÐTtÈ’*ë€&Í3áá›Wªiœ3¾ÑŽ $BM¡¨ÓN;ÍKC×5räH:t¨µk×ÎßVÀé‹/¾ð0Ubbâ÷ó÷IHHðÿŸ*qÇO!Á 4¨«Þ®MÁGUQ ‚ ê}­öÁ5¦­ed$y›:uøaƒ‘AÅÈ@튑Æ%ŒdœB$!¡sÍœáWRòù!÷;MI‡C-[VlkÖ”Øøñè D4®E¯RñíB"ôŽ tèСæöÊ•+­}ûövÑEÕëëïÞ½Û+ÒµmÛöjt8ºc >j#_zz¢}vïV|üÆÛ£#—,Ù蛚êVŒ ‚‘ú÷ëJ0’qˆ\ªÖ¤¹ŒrU^Ñá¤ûWó:Íÿt¿ÐÔ†Mö äóÏ„BFãZôš"ÑŽ $pÀÇlÅÅÅvá…Z||ýÂfëׯ·={öXÿþý­S§NtæKðQ'›¡?ÝŽäàã7>øÖ FVVV{2è#‚‘Œs@øžûSRº[aáv+-­ðÛ§²²]~è…îã0Ïg­‡š93×òò¶Ð@ÈãúÜsûЀ¨Ç®Là€víÚÙ¥—^jÔÿÔü·ß~Û¯ƒ¦#k!øX?ú·«Ô„`$ã>©© „,.ÞA2äóààþ 9ŒÑË«ë9¨  ÔÒÓé ŠŠÊ¼i|¯ÍBôìÙÓÛñ¸è¢‹üš˜Ý›Ì>6.‚‘Œs@øÁ¸W^ÙdcƤÐ!!µ|yñ!÷7ÍaܸT›7/ß{ì-›3çB:gÉÎîçæˆv¤€ŠÖ`ÁÇæE0’qˆ|ýû Æ©*$šÎþýûmûöíÖµk×s¨X+++³ÒÒR;ùä“í¤“NjòïÜ¿Áý @s¸ä’„\°`Íš•eqqè„j×½{wûä“O¬S§NþñÃý;w¶O<1}¡×þ‚×ø~ô£T~90‚ŽÑ7k‡îzÔÛ›Î7#‹ŠÊFÐÊûû04 IDAT B6­?þØæÏŸogu–ÅÅÅÙòåËýý;v´Ûo¿½É¿¿æa’–Ö“;Ðl²²úXFF’åçoõ×n¸a‚PÍ㮺ê*ÿX›6mìºë®ó`dà•W^±U«VÙ¨Q£ìœsÎ E_(Ô\V¶ËÒÓ©&ÀRpXõ >ªkyuƒ‘Gº FÐr´‰9!¡³•”|n¥¥–˜G§4¡ââbÛ¶m›zê©Ö«W/+//oòï©ûUs.UáÒý @sºé¦avå•¶Ç{Ë®¹&ÃbbÚÒ)Í<®G6|øpËË˳¥K—ÚĉýsU òõ×_·øøx9rdhúà‘GòýzýõCËüAHŽàcøè>š4)ÝÛ‘îc‚‘4ã_1m½RÓ²eÅþ\#hyçžÛǃ˗³©¹‰uíÚÕ7Ä7§W^Ùä×Ñ£S¸ÍN!|U›>ýU›6m…—J%9„j§ ãرcmÁ‚æÓËí³Ï>³?üÐC“}ûö Å¿]‡j(Ð,Ϫ4" D ‚¨‹`$ÍOÏ¡T$œÐt¬C‡æÝ8®9Tíû€ævóÍÃì‘Gò­¸x‡=ðÀ›:u8‚PÍãxTðqýúõöòË/[ll¬=:4ÿvÛ¢¢2KLŒóñ ¾B)‚¨/‚‘4½ŒŒ$KIéîá„üü­6lX2¥¾i=9¹‹ßÏ´U›5+Ë®¼òÏv÷Ý+½µž›€0 ‚’œœl'œpB(þ]¥¥>nEã8>>–;€ZB!PYYmkÖ”Ô„Öt[ï«Ðê«!ÁÈÔÔKOO´ØXžf¨küøvÏ=yöüóE!Cdñâ"¿*pB¥O@Kš8qýþ÷¶bÅ&»öÚ{ñÅŸÒ)lÿþý¶}ûvëÚµ«W$,++³ÒÒR;ùä“-..Î>ûì3ëÑ£‡µoßþkÿoUU•íØ±ãˆÇÑíÝ»×^zé%kÛ¶­uîÜÙŠ‹‹í_ÿú—vÚiÿo»ñÆ¥V^^鯯i€C‘P"ÁG´„ú#‚T¸#øÔm‚‘˜]rÉÁ ¤ž?š÷/-íñÇÇÙ™gþÎrr6úßéÁßòhü±ÍŸ?ßÎ:ë,>._¾Üßß±cGûþ÷¿`ž—k6vìØ¯ý¿K–,±uëÖÙÅ_ì• Q?yyy¿ººúÀü¯Èbbb¬_¿~t`=)ùÚk¯yø433Ó:tèà• UR}~ÞyçEèïѧÕÕûìᇳ-%¥;w6‡Ah…>"ŒàØ©j`AA©=ôÐZ) ²é~ÔÆõñãP½Ðjèyi„4¯4÷ã/´7ßüo‹‹ë@Ç4"… UÕQÕkKNN¶’’Jöïß¿æý ìUUUÙ€¼z$êç…^°½{÷ÚèÑ£=)\p½ÿþû¶fÍ4h|òÉõoª¨¨²‹.ú£••íª³àðHœ­€6Mk#¼b¯¼²É¯éŒÌÊêãÕ”FÂJœ§OÕ,Xg³gŸÏs^# ª.Õ®¾ÔÔsù'ž(ðÛ ¸К¨º\~þV¯B}íµ9öøããí7¢®]»Zvvö×ÞŸžžîAÈõëׄ,,,ôëàÁƒé¼zÎãÔw~ø¡õéÓÇ«nºuëf£F²W_}Õ^~ùe›8qbDý›5.5>õ:Úý÷á—€£`·-ÐtÒ·ª<._^ì!°ââV^^yÈç|DØÔ/™ëŸ£0ä˜1)vî¹ÚäÖÓ:Ó‘€PHIéî_-zÇ«4Ïh8Uš6mšÅÄ4ÏŸ»ºUÁgܸTKOOä´*ññ±öâ‹?µ3ÏüÀ ¤Âh 5• k8p -[¶ÌÞ}÷]Û½{·WܳgW„ìÔ©Ó¹` WÏyœÂ§vÚa+iŽ9Ò†Úla4–o\êãR•ZŸ{n‚%'wá—€£  4UwTð1¨ö¨“øëV|ÔÆ4UÁSð1;»ÁG„ÞÑ‚‘+Vl²’’Ï}¬¨Ýu×Jÿœ#zÕTT@XňTW\1ÈCt÷Ý·š d#;\ ¡©<øàššû€Ö(55Ážyæ'vÁ ¼ŠqbbœÍš•EÇ4¡ØØX¯¹aÃ+**ò E* ©j‘mÛR•óxæq‡ A¶Äü¯1̘‘ksç®õÛ 's ߌ $ÐJK+¬¨¨Ì\Aø±nðQ!° ø¨ŠwTºC´«ŒÔ8Z¶¬Øƒ‘¥Þòò¶x»÷ÞÕã¡Hm;¶Ÿ‡$õ>"…*ªâqaávDªB$"ËâÅE~à‰*|êþ µÒkO [ýüç‹íž{òì”SºØ 7 ¡cš B®[·Îƒ………þþAƒ8G§ µƒ‘ª©`äo”ø8+.ÞáWµXã!H…"ˆÔ8Óm}Æç4…›nfW]õ‚ýæ7y¤‹‰¡:P$Q5O¹þú!Üw€VO_ëСiÓVØ7.õ¿£'O>ƒŽi"}ûöµ¸¸8{ÿý÷­¬¬ÌŠ‹‹­{÷î–œœLçD1Ue2e™ß¾ãŽLÉÔAH ¡¶nÝjûöí³ýû÷ûûª««í£>²¤¤$‹õ÷©º£*¿ÁGU~¬[ñQUa23{{ K•©ø4Lrrß lÒܼ¹ÜÇÞ’%}<ª «®ÿ?{÷oÙX7ü1$ÕÄüQæe’k$Õä~Kó"$”[¨HoDÉëᕌð¢QR”B(E!•É%b*Mª 1i* É2nõï÷ô®iÏq.kí³×>kŸóý~>ûsîgíõìµÖóÛÏz~¿'±bdˆäãH<^sÍ¥óù8aÂ"¥Ïsè†w¿ûu9!ú´éÓkUÁrõÕ·çb(_H" WvØFy +V£‹b =ô·ü=:oÈ«?Μ93]rÉ%é¹çžK¯}ík5Ì…¼9ä{¹€^$AN:E£@!¡ÅÃ?œ“£úzà¿¥«¯ž•n¹åñZñFVœƒ»ì²z~L›¶yzôѹy•¦+¯¼-'Hþ{ÅÈ”¾ò•_§\àyçuרÛ>û¬õÏxógiöìòé}hÒpçœsSŽ+¢(ƒ× €^‰X±ªñ^‘¿ùÍC鳟Ý*'H2´\p¾yÙË^–^ñŠW¤?þñiÒ¤IiñÅ×xcL$>Fd¬bÌêàƒ7Ð0ЉPÒk_»DÚpÃåÓºëNJ›m¶Bš4iQ :·ÞúUùbÅÈ+®¸-]{íœôóŸßŸn¹åa@3Þ”-4.OˆÞf›¯å•™"©_‘æŠb 1™=Äë«h@/Šdþ(ôµÝv¤ÓOŸ• ]xá;Ä!%,µÔRéðÃÿg7t°ä’KæDH«AŽ=O<ñtÚi§oäñ¨ˆÏ?G+ÀÀ0˜± -^xáôä“OæÏ_ùÊ—¦Ýv[%­»îÄ´ÁÓË_¾hZf™e4ô¨˜Èùîw¿.m²É’ù<ðÁ'Ó¯ýç4sæ½i…w €‘‰û±ÊøŒwä$»³Ï~»Fi¨HV m´¬‰ìô¼-·\)]uÕî92âõ×?3'CNžy˼:ä9çÜ”®¿þn Ò@7Ýt_^1«õõ€^Éý?ÿùûÓª«.™n»í‘´öÚ_ÌñÃwË-·¤gžy&­¼òÊéE/z‘#.¸`vN*Ž$È8¯~ò“=%A@˜µ -^øÂ¦¥—^:OLZpÁs²Ô"‹,’¿ç9Ô%V^ÚgŸµòçøÀt Ò@ûíwYzöÙ¿§=÷\#­µÖÒ€Qc¹å&ädÈÍ6[!÷uï}ï·òcîÜg5Î0Üxãùãë_ÿz1Äù²×^ßI»íöÍôÄOç߯½vœ ßBšæW$IÎsè¶XeðŠ+nË+žpÂué°Ã6Ò( ñéO_Ÿ®»î®œ(2mÚæ€Qgüø…ÓUWížû¼CùÞ¼Uª¿ò•íhÓ6Ûl“?Nœ8QcŒr³g?vÚéyÈX5ü˜c6ɱ¼Ä sÜ…hˆ˜(}ÆÛæGyMN¼cäÅëpøáWçÏ?ÿù­Ó„ V‘`ô:à€õòꫯþòœÔõÆ7~)tÒL«C¶! %AŽn±‚j$¯½öóù²ÒJ‹§ŸüdÏôÑn, :Ìx€™2e¹œ€“ªwÝõ¢ôÐCÓ(#èÑGç¦ÝvûfNþøÐ‡ÖI[n¹’F`Ô›ÄyqàWäódÏ=×H7Þ¸T &!æøã7Km´lºûîÇÓ6Û|Í L#$’Q£ýï¸ãÑ´Þz“Ò´i›kÆŒEY(¯„ü£ýW^å.’¾Ö_ÿÌœùÄOk ƬˆÍcµðH‚¼þú»ÓrËMHW]µ{^Ù}üø…5ÔdÁ>¦N™2%Å€‘7nÜi«­VN_ýêÍiΜ‡Òý÷?‘¶Ýv ÓeGum:ï¼›Ó’K¾8]yånù#Œ5Ë.»Xzÿû×L ,Òu×Ý•gŸ}cN”\ge4Ð8úè£ÓÔ©S5Ä8óÌÒ;|=]vÙïò×ùȆéüówL«®º¤Æ€͘1#-¤šgâÄñé ߑÞüæ/ç ×+®¸x:ì°4L—œtÒÌtì±?ÌIñ:ÄJ?0VExÌ1›¤w^=í·ßeiÆŒ;òÇ‹/¾5uTÙ[N#1ªÅ1ôÑ3òÇÇüg?»UZ}õ—kè‰ µÑFËæfvÚééðïÎ+î¹ç¦fçœsSnïpöÙo—Üÿ'’¾®½vtÁ³Ó^‘“ÂfÌ8'í²Ëê9Qr¥•×HŒ*·ÝöH:òÈkò1¢XÉÉ'o™y »$B4ØÛß¾jN,ˆÄ¼½öúNN†ŒïQ+®¸-·ó³Ïþ=¯pe’;<_ô[n¹Rúô§¯OŸüäÌœ$vÑE·¦w¿ûuéøã7ËÉbÐËî»ï‰ŸwÞÍ9.ŒUQ=t£tÀë¥ Ñ@0üçcê”)SR<hžXò©§žM×]wWºøâ[ÓküGzÕ«–Ð0võÕ·§vøzš;÷ÙtðÁ¤ÿýßM5 ÃbÕäw¾óµé±Ç榛o¾?Ýpý鬳nÈ}éäÉóïÐyG}tš:uª†¨Á£ÎM'œp]Úm·o¦ë¯¿;ýýïÿH{ì19¯Ò¾ýö¯vLÀ™1c†DH€^°Ùf+ädÈþðÎtá…¿J¯{ÝRiÕU—Ô0+AFäO<;l£tÜq›¦qãÐ00„X/V«Þ|óÓw>šn½õÁôƒÜ‘N;ígéÞ{ÿ’V[íeVÐë0‰w÷ݧcŽùAzÏ{.IW^y[NæD߯|eûôßÿ½nZ|ñi$A‘©|1@8þøÍÒB KÇûôÝv¤3ÎØ6í¹çf˜Î9禴×^ßIÏ>û÷œí T³Þz“ÒUWížnºé¾ôÉOÎL\0;zêÏÒé§ÏJ;î¸ZÚÿõòï@“ÌšuO:å”ëóñ±`ÄÚ»ì²zúð‡7Hk­µ´€‘ ÐCŽ9f“ý·9awÆŒ;ò×ãÇ/ÆêW_}{þ:Äê¤{ï½VŽ­N ½ÁÌ€õö·¯š®ºj÷´ÓNßH×_wzÃNO_ùÊö9‘€þEâÅn»}3ÝqÇ£9iô ߡ½  "Ù,’Îâ"!ò¼ónNW\q[~Œ¿pÚqÇÕÒûÞ·FNRƒá˜5ëž¼òccQ£õŒÄÛõÖ›¤‘ ÇH„èa‘(pãûädÈHò{ó›¿œŽ9f“¼ò’Õmæ÷éO_Ÿ?üê4wî³yòûùç[n‚†€.›ªöÄ6ócêÔ)99.’ä"Ynúôßæ¯‹ÄȰêªKæ˜'’åbµÈøZLÐL±Òg$¹Ækãñ쳟÷ó%—|ñ¼$×X)4¾Æ‰£\$ƤñSOýY:î¸æ”vÚé9)àÃÞ í²ËêyÅ¥^ä#Ùáį›·ä„ ‹¤#ŽØ8í³ÏZye(`tŠd¸ˆaâ"ˆUüã»r‚äœ9åÇyçÝ<ï÷§LY.½þõs  •Q8îºûîÇsqŽxüò—÷¥+®¸íy+>F ¯ÕºëNÊ ñZõb¬ t–ÙcáÍßBãÒ¬—#! VOŒ èï}ï·Ò^‘“öß½¼ZRÓÝqÇ£é´Ó~–Î9ç¦yçWZiñ´ï¾ë¤=÷\C$ŒA±úc<>xƒüu$E« ^ýÝ9f¸è¢[ó£1C$ÚEÂ]‘9iÒ¢³C¢Í‹ñ‹{r‚jߤÇ…,¢ýcÅÇx # Râ#ЗÙcHL*dÁxD"À׿>;¯ªxúé³ò#!cõÈw^=ODoŠ˜@Ïó?¸#]wÝ]y5È«]¾ë]¯ËÏÙ„y ÉtñˆB! )Äꃱ aƒˆØâ‰'žÎñE< ‘”ñP$æ«GÆ#V”¤‘Ü«oF»þêWÿjÛøº¿¤ÇH>ö¤ÓUVùWÜ-¢hÀ`Ì£vÜqµü¸í¶Gò*‘_|kžÀ×O:ifžð“Ócuž"  [î»ï‰œð‰±zP<§BLžçÉšÝ|N@ïZn¹ yeìV_DœÉ{Erä£ÎÍ+HÆ£UÄEƒW\qñyŸ…D¾ˆ‹äÆßÿþߟÇ÷ï¾ûñ~ÿ¦oÒã–[®”¿–ô´C"$À“÷§N’³fÝ“"cµÈ˜Ø~Î97åG˜8q|^%2&°¿æ5ÿJŒŒÏc’{»b%¦˜D«4}ÿû·Ï[A(¾.V} “&-:/ù1žƒ ôÀpEVƒ…!ŠÕ#‹¤¿(Ò0XI2WEÜqÓK^²pþ:>ïEüRüÞH‹„ÏxDñ‰Ø·xÄç=6wÞçÅ~Çï &VåŽýŒ¶ŒU4#ñ1>äS1Ð)!˜'’ ãqüñ›åÉï3fÜ‘¾ýí99A2&ÄOŸþÛühû‹‰ÿñ1&Ã/µÔøçýï‡þ[N|,’cõ ¾ …H®Œç±é¦+¤Í6[Aò#Б¼X»ÕÜ¹ÏæØ(Q,"âšâóH,#¶)£HŒŒU%‹¢}“$ãóÅë?irêÔó}ýÔSÏæçPÄUñ|‹¯#†‹¯C|/b±*Z“<#Áq‰%^<ïóáÅ(K"$ý*&·ï³ÏZùë˜ì ‘¿ùÍCóVGŠÉÿ‘Ðø^å7¥ ËÉÅê’+®¸x^E(‘P З1JŠDÈbÅbuÅø:¾±R$%‰ˆñu(›89¿)éè£g k"y1’0[±re$^ÆÇxD²cñ3€‘fV)¥‰‘­bBLä/&þÇjCÅŠ}++Hî%<½®HŒäÁ¡«4ö]­1â©xú~ýoSž÷ˆ§bûÅ Ú­_·®:Ùú9@/1Û€ößTþߊŽa ’˜?~ŠÂ¡ˆ£Ü8M4•DH ±$B%h¬ÿù˜:eÊ”€^ñÔSO¥Ë/¿<=ýôÓi©¥–êÈÿ<üðÃÓ 7Ü6ÜpC ì˜Ò ]vÙe—¥iÓ¦¥uÖY'?^ƒ£¢éõ¾El„ø ñ½L?†¾0¥Y³f¥k®¹&ÝqÇé¯xEzÁ ^0êúϾûø÷¿ÿ½û×Ôv?áøƒ@¯š1c†DHèÏhÀ拏ÿ7ÝtSºôÒKóó½ñÆó Ì—¼ä%ùg^xaš9sfþÙí·ßž^óš×8xÇØ±ÜtnD£Ýý÷ߟ~ô£]–Ó IDAT¥ßÿþ÷é…/|aZl±Åæûù#<’>ó™Ï¤Ç<­¼ò浀 ›l²IÞþn»íÖ‘7H{íµWZe•UÒV[m5¦^˺_«^ÑécÊ1QÍœ9sÒ!‡’_|ñôÆ7¾ÑEh„‡~8O¨~ðÁŸ÷x衇҄ Ò‚ .8`?2PßÒ }ïXŽôÕâ7Ç„øÎ‰1êÿï¾ûÒĉÓ , Ãu¶ }á?þñôŽw¼#xàé’K.I\pANÜl³ÍFMÿ9Ð>.±ÄØ¿¦¶³øIüíøéícB Þ»1qÓæm(4õú4Ö˜[âñŸØjâàBš^ôç?ÿ9Ý{ï½ó¾Žê¥K/½ô¼¹áºå–[Ò¶Ûn›6ß|ótå•WŽºÿ„NHûî»oO<߃:(]{íµó¾^~ùåÓ¦›nš?›úÓŸæýì®»îʉ’!n<œqÆ鵯}혞ä0šåªFâ˜xî¹çò6ã¸7أŹ瞛Ž8âˆùúáoÐN<ñÄyEV¦OŸž;ì°ôÊW¾2'4ÝÉ'Ÿœ?î²Ë.cî5íôk}`‘Ò-#±Í¦N›ŒÄù»Å[ä„¢/}éKb' 1V_}õœ¸1‹.º(í°Ã=q‰ßÄo£;†¿ÑTÇw\:æ˜còçgžyfzßûÞ§CßÛ…~ðÛßþvŽU#žóáÉ'ŸL/~ñ‹GÕk2ö‘Ñ{ ‰xXüÝûñ·¼Y¢Xè/ùËô·¿ý-ÏAˆ9ãÆë÷w›6o£×æ AÌ+kæõi,æ–Q§(ö1K!Þ3/³Ì2y¾õh=¿Ä9€øOü×äcBì×9!éIqÁé;ñ>ª¯¿þúécûXü¥½vóÿÙgŸÍï¹çžôÿþßÿK‹,²È¼ŸýáÈ?ßi§Òw¿ûÝôÌ3ÏÌû™É`£“Éü#+úÚXM!!cÈxS¼îºëæJA³fÍJW\qE¾öEÑŠ@Ý`ƒ ÒÛÞö¶ü±ébð7úŠ(ª0«Âuòµzë[ßš…_ÿú×éU¯zUWžÿHl³é†Û&#qþ.¼ðÂi»í¶KgŸ}vúá˜6Þxc/$0âbÕÇïéZèùC©k®¹æˆ÷½b£±Iü&†¿Ñ bµ¶óÎ;/OUÚ¾üå/w%R?†¾÷_ÿB¬«@ŽFícŒSB“¯#‹¿GGü-o†ÓN;-úhM*Q¸ýàƒNGydãçy(ŠŒfŠÀ¿õWì3ÆéV]uÕôÑ~4íºë®={Íç#}-’WÐÛ±ßHÅb¿Î‘IO*#‹×O<‘®»îº4sæÌœ‘}Î9ç¤ÝvÛMCõÓn½zóÿE/zÑ|I!*ÓÄ£èÀ"¶`2Øèc2?ÀÈû⿘“ _þò—§K.¹äy×Ô_ýêWyâQQñv¥•VJßúÖ·zbßbbRæ;ÞñŽùbб¢“¯UT?޶œ;wnמÿHl³é†Û&#uþÆ ‰ˆbÀGì4ÉQG• A4±ïMâ71œø^ðãÿ84Üc=Òe—]–~ô£¥;ï¼3ßTÖ¡ï­·, ÊNš4iÔ¾&ca×€‘ˆ‡Åߣ'þƒœ¿þõ¯é]ïzW^‘8&-F¬¹ÞzëåŸýüç?O—_~y:úè£óŠ1¯a±Åkä~(ÿ¢ÈÁè¤È̯(öyÜqÇå1²¿üå/iöìÙ9xç;ß™cáˆozíš+ÎFúZ$¯ ÷c¿‘ŠÿÄ~#’ž‰­“ЦNš:è ´Ã;䕈ø·±tóßd°ÑÇd~€‘¢ÿøÇóç‘ÙßÀ×¼æ5ybc!®Û1ˆºÔRKåGë÷o¹å–<ñ1nÏ™3'ÝtÓMyÕïø­b òæ›oNO>ùdZc5*OlŠçÿû±ÇK¯ýëó€gŠ>"Vš.<ýôÓéÖ[oM‹/¾xZvÙeçûý¨&ûö²—½,-½ôÒ•žÓ#<’'~®²Ê*ýÆ«ñœÿûß§•W^9W®¢JÛöý»¾¯Ußÿûü‹_ü"?ÿx-"!¶Õã?ž_¯è³Cìc³ˆ¸s…V˜— [ÕÝwߟ[L2xÝë^—Û¥ì6}ôÑŽ¶uÙã©j—2¯C™ã¤ÇD«8‡ãÿýñÌçÍĉçý,Ρ¢ý7Ùd“¼üE]”¯510õw-â‹å—_>-ºè¢yÕíH‰÷Á± w\?Ûé/ÅF÷yuíÓPÛþÿøaïsÙX{°þº®øm8±J§ã·&ÅpÉkËÄpâ7š(Vƒ ï~÷»ó1«ç|õ«_Mÿó?ÿ3¬÷³Côו=ëì†Ú~}b! ‹ÆDýx ÷G¯~õ«Ÿct£ †z½2vÒ¤¾·µmbµ‡‡~8Ÿÿîw¿ËÛ)3®Væi§¿èd¿9œ}Éý‹¸>&·<óÌ39qf‰%–ðwã½@œ‡‘L¾âŠ+æÕ:†*ÌÒÎßtjÜx X«ד2ñ^'®)¸ŒÄªø»»ñ÷pbð*×1øÈ8餓rd¼¶‘ìçM«ßüæ7iË-·L×_}š6mZ:öØc¹ІÀ¿(r0:)rý‹§ZûýSO=5í·ß~9A²‰Ý:¿Ä9@·®Eò z?öÉøOì×!uû 'œ+™Ä ñ:ë¬3ßχs¨$íÜ+óÛ<-“ØÊ0Ìd~“ù:ëüóÏÏ„⺺í¶Û–ú›n¸!­½öÚi‹-¶ÈÕwú~ÿ#ùH¾~~øÃNÿøÇ?ruÜèëŸÿüç<ð$ qíßyçÓYg5dÁ‹ˆq=ôÐtÊ)§äÿ]XýõÓ\0_Ü?ÿéOš?_mµÕæ}ÿŽ;îÈñdô5Ñ_´N”ˆI›ŸøÄ'raŽcŽ9¦R[~ö³ŸÍ<öÙgŸôùÏþy?ßwß}ÓW¾ò•õ© R,´PŽuc²Hœƒª˜£U×Ùo¼1/ŠO¬¾úê¹ÿ*®³a÷ÝwÏ×ÿ¸VVé/ÅF÷yuíÓPÛŽ¾o¸û\&Öª¿îtüÖ‰X¥Sñ[“b¸Nĵeb8ñMçà7¾ñ|¼þçþgÓDÈ8ûK„ìÔXÁ@ýXÙó±Î¾a¨í×Ù'FÛí¿ÿþùúIX…¯>ì°Ãr‘Ѻûü*Ï£WÆNšÐ÷ö“O<ñÄy_íóÒ—¾4ßoéÄ1ÒNÑÉ~³}éý‹Ä™÷¿ÿýóï}ï{ó}£\pÞï=ûì³ù¹Äþµ>ϸ†Æ¹Ç~ßÉœíüM§®óCÅZu^OªÄ{¸¦tâ0c¨âïîÆßíÆàU¯7bðî‹äûâ5ˆkzß$Èß‹{iqÌúӟίië\•VU’ãÃPó:|ðÁ~ã˜8V¢àV«& lÛÝšƒ4PÑ•n%÷´ýníÿPÇš"Š(r È£CŒ¥E¬òÛßþ6¿§l½ïTåÜ/ƒ t~UÙV™ù¡CÅ9ƒ]KF{q´ÁâŒníÿ`Û¯³ ª¢­³ªÄÿ53þSd¶ñŸ¼r¼¡ M"a0.`±ÄmÜÐ*tâPÕ$U««<ÇvÏGÓ$¶2L3™ßd~€ÎŠ7ƒa³Í6«8üñéCúP~¾!b¦¸ÎÇàT“7‡ýMôç1!4úâÖ7³1Àƒ³‹,²Hnƒv Ö¶U_«h÷Øß 7Ü0'füéOÊ1kìÃZk­•ÞúÖ·æßÛm·ÝòÀǾð…ܦp@®¬qf FTÎ8ãŒüÆ„´8®¹æšü( µÍ¤èD[W9žê6T»TyÊ'Ã9&B$õD\1Ú7¿ùÍüšD,õ™Ï|&êÄßǶ[ÞñüÅNÀh×ßu6ú­x/úæ7¿9'?ÆÀ}\7¿üå/ç÷¹ñy•þRl4pŸW×> µíNîó@±v™þºÓñ['b•NÅoMŠá:×V‰áÄo4EwEâbË1ö¼ÜrËåû7qÓxÍ5׬e¬` ~¬ìùXgß0Ôöëìãoã\Žv‹±ð¸Y7ä<òÈ|^Çd›­¶Úªö6(ûùäy¿“5ã¹Ædœ½÷Þ;O¸‰{Yñ;q= 7ûØÇæûÿíüM'”‰µêŽ·ËÆ{M‰¿Gb Uü=2ñw•c¢Ýë¼»¢GL´Œ×"&$’cRe\ç¾÷½ïåc­¯²Éñ¡ì¼Ž·¿ýí9NîO<—(¸U¯M,RfÛuÏAªèJLÀ­3¹¨í×½ÿe5E9èïõ¨›"Ðy‘Pâ=iß$È*ç~Ùd ó«Ê¶†:‡‹sÊ\KFkq´2qF$NÕ]m¨í×ÙU ÑÖÙU âŠÿšÿ)2ÛŒøO^¹5£ITHˆÉa¡¢C'nU¹AÒÎ`u;ϱÊ–Ñ8‰­ “Áúg2jû\2 «~÷»ßå1i±S~ùË_扊ñF»5¹2&0Æ`éloƒ<ñf6nÇŠÙƒùþ÷¿Ÿû¹x3•s q:ú¸¸Éâ sˆ*w­×ëV1*žO<è#f‹¾3ŠXÄàA­¨*ÞÈÆs‰7³ñ&ºõÍøe—]–«*Åvâw§Û¶¿úÕ¯rÅüè³ SrÈ!yÂYÑOÆÀW<.¿üòÜçÅûáLâ+âÒ裋”7¼á y`¢0Ô6£2ÓpÛºêñT·¡Ú¥ÊëÐîqRö˜1…^bPòmo{[þ^œÛ³E¼çCßç×;4ELŠî[Á1nÄMÞN‹ Ù1+Ý")2®ïÑçĤ¢ï*Ó_ŠïóêØ§2Û®{ŸËô׎ß:«t"~kZ ׉¸v¸1œø‘ÇhØu×]óÇ8^ã¾Á 'œ6P"äpÇ ëÇÊœuö Cm¿Î>1Îç¸&ÆØzáo|cŽobß¾óïÌ—VW”}½6v2’}o_Q•:QQ>’c£;}Œ´Ó_tªßlgGrÿbÂ_‘pYØtÓMó¢3Ï<3Êc%þoÜç{õ«_~ò“ŸÌ»ŸÇÜ÷‹dÍiÓ¦åûÈE…òvþ¦ªÄZu]Oª<‡n\Sš:†*þ™ø»Ê1ÑîõF Þ]Å=²˜9”˜oómú›«P%9>”×ÇUŒ'bŠˆ»£Ÿk-ÐÔ¢!e¶]÷¤2EWêLîjûuïÙcM‘E9Pä€Þ %Åý§ˆƒ‡sî—A:¿Ú¹Î t甹–ŒÖâheãœ:÷¿Ìö»1ß¼L!ÚnP-[Wü×ÌøO‘ÙfÄò !U"Q0–¹Ç*XFã$¶áË“ÁLæoÿFT»7£ÜˆFƒ¨>â [§Äu?Þö½fG_þû¿ÿûyÕçb`l(EåÇ€é+ú¿øÿ1ÈVôuQpb 8)â ˆ‡â¦tTæ¹ë®»òßFSæ†÷@öØcÜŸGųÖþ<ª'oØëhÛvDŒÐÚG•ÑOÇE–Xb‰ü1úNJíf[W=žêÖ©vÎqRå˜(&ˆDŒÔ*V—Ø©¿c¨8Ë®ÆÐ ±zUßëå}÷ÝW˶b¨5 2DÕÁ˜ìE³~øÃÎ[»L¿ 6¼Ï«kŸ†ÚvÝûÜË1\'Ú¦I1\'ã·vc8ñÝI?q“8 >ÆñWˆkkL†Šó9nœ÷]ñ¦cƒõceÏǺú†2Û¯«ˆÕóúcñ!ö±mPåyôÒØÉHö½Rõi§¿‰~³ ûñ}kdq¼Ä¶c’LT)ø>&÷„ƒ:èy“6crM$O^uÕUéꫯžW½½¿é„*±V]דªñžø[üÝíþ¢ì1ÑîõF Þ]Å}þbÒ`Šûhý½6e“ãC•yQ¨«µXW¬àIq´Þ×kjÑ2Û®sRÙ¢+u%÷—Ù~Lê­kÿ«Î!Rä@‘E9 ÷ì¹çžùc$*Æ}¦8ã<‰Ä¦áœûecN^g:‡‹sÊ^KFcq´*…èëØÿ²ÛïFß^¶mÝTË>:ÛDü×~_¯Èl3â?y„qš€^vúé§§ÓN;-/«™Ò±Ärd­Ge„B™@O<ñD¾Tæ&@h]¢»¿›ƒ V‡¾ƒÕí>ÇÏûÛF™Il!n@}ò“ŸìØ$¶2í5ª´]'µÞx©vêFTˆ×¿[:Ñ&…N߈êïx¨z~·žwnD½ìå/yþØÉ ÿ1 •ƒú*®µ­+|WqÛm·åçž{n.$ÑúˆA™¾×ä¨ Z+·Ú|óÍs\tÏ=÷ä7³Q©§oUÞªvÞyç·ÆÂã?ž¿÷׿þ5O¾±è“;ݶí(nº÷w<<üðõs—F•ª¨,ñpyî¹çºÞÖU§ºuª]†sœT9&bâsˆöo•¯û‹©ÂÒK/]{\PU\ë£Ê_ë#nðÔa ›ÅàøÝwß]©_ ÝçÕ±OCm»î}îå®mÓ¤®“ñ[»1œøn‹±Þ§žz*Win«}ík_›ßëÇØB$æÔ1V0X?Vå|¬£o(³ýºû‡Ç{,¯|÷׎=öØ| O?ýtWûÇ2Ï£—ÆNF²ïí´²ÇH;ýÅHô›MØ¿âû׿þõóÅ÷ŤÍ(‚ÒŸb%ÝÖÉ1íüM'Tµê¸žT}âoñw·û‹²ÇD»×1xwÝ{ï½ùcL0-{ÝïïÜ,9>æEr|¡Ýy‘d“EWXa…¼RE«²ECSÇü¢²Û®kÒPEWŠ÷uÍ­*»ýºö¿ê±VW;T}uÏIËóʆs jj›´ª{nY;óÊZ¯Íæ–Q‡sÎ9'?Î?ÿüœÌ¦OŸž 1têÜ,éäuf sx°8§ìµ¤®>®Ì¶G:Ωkÿ«l¿î¾}°B´ñ8 ÑÖyT}â?ñŸøO^ƒ³"$=-–²-ÄR»Q]$ª¯¸âŠó¾_æPÜp/s(nÄ6‹›1xÝßM€ÖÁêÖŠ V·û«ÜP);‰ík_ûZG'±•i¯‘0’7£bÙã¸ñ•öÛo¿\嵿Ê×uµk머ÙÛêÁìzÇÚ‰6)t{2™ó;¸Œ_E…š;ï¼³ömØ‹7µU“‡¢ªNLxè+&^¾å-o™/Žl—úSÄIÅÝ¢ŸhWÜ, @Q1ë’K.IïyÏ{rß«›ÇàÄ ^ð‚f¿‘\¨þ·’Q¹)ªU}ä#ÉAâø[i¥•òý†nص¶®z<õJ»të˜øà?˜‹×Ä@nœÛo¼q~o¯EìKëê0}ß»´ŒfÅDº˜W¥_•Óé}jâ>÷J ׉¶iR '~c,ŠÊºá–[n°:püN•{eÇ ëǪžîÊl¿®þáÿøGƒ +Qý?^b[Qµ»øy7úÇ*Ï£×ÇNºÕ÷vJÕc¤þb$ûͦì_ñ}Teo_–\rÉ~?î݆brg»Ó íÄZ¾žT}âoñ÷HÇßí^oÄàÝU\³zè¡!÷þûïÏû›ƒ0Xr|$´¿jg^G$ý¾ï}ïËý[Lô|ñ‹_Üïq3PѲó6:=¿¨ì¶ëšƒT¥@ks«Ên¿®ýoçX«£ª>ºç¤åyeíMn“VÝ,rPv^Y0·Œ:El +Dšˆ5cEº}÷Ý7Ç'ïz×»†uîƒtò:3Ð9<Ôüè²×’:ú¸2ÛnBœSÇþWÙ~Ý}û`…hc¥ÔÖX¼Žã ç!þÿ‰ÿä0È8›& —Å2¼q£*.‘±=nÜó9íä  ²7ªVwò9tCÅÿÔvÛuRnF™Ì?¼ãÁd0`¬Zyå•óÇ+¯¼2O ªZ%§ŠˆAxàü˜8qbå¿_f™erÂfLªŒÕµËô­áüc¿?Šð1ˆ}ÃË^ö²tùå—ç7Ù[mµÕ°ö3úðèÏãyÆçMX5»i¢ rô³sæÌIÿû¿ÿ›«O½õ­oÍ_W96†ÓÖU§^j—nˆAµârê©§æG\?vØa‡tÊ)§ôsñj™êÚcIqƒ¡o5Û¡ú±ÑÐêÚ§±v"VnÛ4-†¿1–ÄMÛë®».×ÒÄêL‘Ü_ötÙ±‚¡ú±²çc]}C™í×Ñ?ÄÊwQ•“M6ÉãÛÅÍø¸Oïûç©£ ª>c'ÝSõµi§¿É~³IûW(’i^ñŠWä±oq‹¸?b™¾b5ݰüòËÏû^;Ó Uc­:®'íÄ{âoñ÷h‰¿ÅàÝWLj,®«ƒ)^›X ©¬¾Éñ­ÿ§Ê¼ŽX™%*ã<.VžhÕÔ¢!e·]פªZ;=·ªìöëÚÿvç)rPEêi“º(r@¯‰ã+ÞƒÆê|aÿý÷Ï Ï‰ížûCŠݸΠ5?ºÊµd4Gk§}'÷¿ÊöGj¾y…hë8Úyâ?ñŸøO^ƒ¼š€^³"€H§o•¹ Pu°º7©LðoŽ‘¾e2ÿ𘠌U1˜qÔQG¥_ÿú×éâ‹/N;î¸cmÛŠ˜göìÙ¹*NT¼ª*V¯œ9sf^y¢L_Wô5­UµZüñy¿:è üF5V!Êx¿úÕ¯JU²HTËŠjt×^{mîó®¸âŠü¦½åGR1q,*KuJLNûò—¿œæÎ›.¼ðÂôƒü W³*»Íá´uÕã©›k—:^‡vœuÖYy0,Îçx_“übÈ`çNq.6-è–#Šª¹­ï/£Å~ô£üù@éêÄFC«kŸz%¬+nN¬2ܶij 7ܸVüF/øêW¿šû­¸¦žxâ‰ýþÎÚk¯nºé¦|¸ì½‚²cCõceÏǺû†Á¶_Gÿ‰§!VÃ{Õ«^5býcÕçaì¤{ª¾6íôî7{eÿâþo¬ŽÑ:©%®“?þñç‹ï#~‰‰F÷Ož<ùyÿ§x?°Új«ÍóTý›N¨kÕq=i'ÞkÒ5¥Ûc¨âïÑ‹Á»o¹å–ËcÍ‘G9àï=õÔSiúôéùóõ×_¿ôÿÎëˆÕ;¦Nš'#Gòš\4¤ì¶ë˜ƒT¥@kû^eûuì;sˆ9¨—"õ´I·(r@¯ØtÓMóÇ8†sî—‰Aºq*Î){-mÅѪ¢ïôþWÝþHôíý¢‰ªÄÿ‰ÿÄò èß8MÀhƒî¡¸ÑÓWÕ@en´Vß{ï½yÀ?ª4\tÑEý&:vú9ö§ì$¶¨ö7CÜ€ŠjÃ1nR×=l§vJ=öX¾ñRv›ÃÌâ¸lšá´I·T=¿[Ï;7¢€^׸8 þÞ÷¾7'C¶ŠÉ;qs7&ýéOÖ¶Š$˨÷ç?ÿyÞ÷ÿþ÷¿çïµöý‰¤Í0mÚ´ùþ¾7¢c€¦(£Ÿ‰A¥øY«ˆâÍtô»1xÕ“âMu¬p_·ŠÊ>øÀJïL~z÷»ß'BÅ Cl»¨´×WÕÿ=’ŠŠ_¿üå/‡µ/Q1°¯bòG±bzÙmViëáOƒ9á„ÒÑGžyæ™¶¿_¶]†j“n‰6Im‘Hý‹_ü"O舯#ŽH;ÅŠ cQ$†DÿÉ…3Ï<3ß¼}õ«_«VéF*6ªÃŒTlTg¼×É}î…ø­“±ÊpÛ¦S1\7ã·¦Äpâ7†+n’‡]vÙ%ßÜíïQœ£Åïvr¬`°~¬ìùXWßPvûuô‰Å5/&§µžïŸûÜçºÚ?V}ÆNº·/U_›ªýEãAMÚ¿¡öi×]w/¾êåq>Å}±"¾/&%}úÓŸN÷ÜsÏó®­³fÍÊ}m¬jYhçoR師kÕõ¢x¯)ñw'¯âﱋÁ»/ŽñXA#V¤ˆq›œ{î¹éá‡NK/½tzó›ßü¼ŸÉñ­úKŽUçu¼ÿýïOO>ùdî_|ñ~§jÑþæm„ºæ•ÙvsŠ¢ô1d(uì{•í×±ÿíÌ!ª£Úyc¥ÈA·ç•µûztÃP׈¦9(;¯¬õÚlnÝrÿý÷ç­+õµsî—‰Aºq)ç u-©3ÎjÛ#çÔ±ÿU·_gß^¢í‹÷Wˆ¶Îã Êóÿ57þmy½ÿÉ+Û¬ɨõQU8íȬŽÇBÕ@¡¸ ƒøÝh¬Ž›W/yÉKr°•¢“©û9ö§ïÍÿÖ•4ûÞ€zéK_šo@E†z|ý‰O|b¾ÿ7^¢òòG?úÑAß —m¯¦k½Ò_Õ¾*í7^¢JA«¡nFõ·Íá´k ÒGe„øÛ¨¸Ú÷†Oü¯8Vú[Úº¯¸¹¿ÿ?ÿó?ó-]åûj“n©z~·,nD½îðÃÏqCTP ˆ1ø²Æk䚸>$É… IDATÿáÈUjÆ?¬íDŸvÒI'å7…k®¹fÚn»íÒ¸qãr%©[o½5¿IÌ–[n™¶Ùf›t饗æJèñÿâý_þò—<‘à;ßùNN&(&Q.ºè¢ùÍ|L„ºë®»ružb i¯½öÊ4bReÄHá3ŸùLúþ÷¿ŸN>ùäü¿‹ë{ÄS1á(*ç•]13*0EŸñ^ˆþ½?íüÁä SùÈGòÀJ BxàóVì(³//ı5eÊ”<9*ª‰ýô§?Í1Lw±*Û¬ÒÖÃ=žòÛßþ6ŸCá-oy˼InU¾_¥]Ê´I7œqÆyÂÇÇ?þñçý,ªhÇkÒ·íb"\ˆ}hŠHæˆx¤¯ý÷ß?¯hÒIq£8ŠN¬²Ê*iûí·Ïï)#þ‰íÇõ´Ð/Û/ŒTlÔN ÓíØ¨ñ^§ö¹éñ[Õ®Îø­S1\·ã·¦Äpâ7†#Î×8vãøŽU A‡zh¾ÖÆMáÆUÛ+¨+{>ÖÕ7T½tºOŒkbÜÇØo¿ýrÆ~þùy<'Æêc¿»Ñ?VyÆNº»/U_›ªýEãAMÚ¿Á¬¾úêù1H\»¢âëˆë[ãûøYÜ ¾æškr÷ðbe°8öãýAŒ¹ÆyØú¾¤¿H•ã®l¬÷½ëzÑn¼×„ø{$ÆPÅߣ+þƒwß+_ùÊ\$ô‹_üb>Þbõ“¾+ñÆq‹s¸¿Šäø¯}íkó~Þ_r|¨2¯#&]Æd×X%þÿ@›7TvÞF•ùEeçÒ”Ýv¨2W¦ìöã‹ëWôU1Wl3Š®D_«¡¼éMoªmnUÙí×µÿUçÕÕíÌejÒœ´NÍ-k¼²v_þts^Y™vé†væ•sËè¦(ÜçYˆs¼Ýs¿l Rçu¦LœSöZRGœSå:6’qN]û_eûUÚ JûŠB´­±x…hëœO_åyÔy\ÔÿUi^ŽÿF[^A¯Æò Æ6‰Œz¼Tö&@ÕÁê:žc_½0‰­©FÛd0“ù‡Çd0`,‹8!Š7|á _Șhxûí·Ï{cªãZ¿Øb‹åïoNû¾èû­oÌ£ÒUü¿o~ó›éSŸúTþþRK-•ßüFì4Ôÿ‰¿‹–øÛ¨JÕE?´ÑFÍ÷ûqm8)&PFBC¸à‚ rÞÍ6Ûl¾AÛè£b "Ñ/]yå•óÞÔ†èWËzÍk^“û¸™3gæ LýmÙÿ=TÛVù»ÁþW¼Fñèïg[F\ûío;rÊ)y€ùàƒ®´/ñ71ÀC3f̘ïuúìg?û¼dÛ¡¶Y¶­ÚçªÇS¢‚Sü~ è.»ì²m}¿J» Ö&eŽ“N¿E²ôÇ>ö±œ(=>úhºîºëò‘8·b%Ù"¦‹˜1®+ñ~%&—Œ´ÕV[-Ý|ó͹Dâæn‘YåÚ9Ø55núE5ͽ÷Þ;'|„':õÔSÓ¶ÛnÛV9±Q;ñQ·c£:ã½²qYÙ}.ûÿÛé¯;¿u2VéDüÖ‰®ÛñÛpc8ñMÂPcÕqó7î‰Ä=ˆ˜Dþ_ÿõ_+¨+{>ÖÕ7T½tºOŒ )1ìì³ÏN‡vX¾ßÿ?î ĹÞÚîuöUžG/ŒŒtß;Øö‹¯û{^ýýMÕצjQÇxÐ@û8ûןb2[U‰ã8’câ^lˆ Pß·N8 Q©=&Ìœ~úé¹Â"‹,’ï£Å÷b›}Uý›Ž™ª±{™X«î÷íÄ{Mˆ¿Gb UüÝø»c¢ë|dsÌ1é7¿ùM^}"Š~ÄjQ #VyøÉO~’î¼óÎü{1á1Æ“úS69>T™×QÌ]ˆÕQcI«Ýwß=Ïe)ÎǦ i§B§ e”)ºRçܪªb;½ÿU޵:ÛA‘E9¸#4·Œ:ì¼óÎó>$ÈX±*ÎçˆMZÕªç~Ù¤®ëL«âœ²××ÑZ­lœQ×þ·S¿L´SD¬L!Úņ/[·Wã¿: m51þSd¶ñŸ¼‚±M"$=)†£³/›Ø©@eo´3X]å9¶{C¥é“Ø:Éd°Á÷×dþö“ÁRž”¸Æ*1ÈûÆÄ G¼Ž˜¦Ì÷[E2eT¿Šmĵ4®ÙÑ×µnc°ÿ¢‰GLrŠÊYQ(®ÅýÅñfóóŸÿ|ºð çÅI1I)’¢ÿêëˆ#ŽÈ E¹èï¿ÿþSõ­<”¨òÕ˜úSå—iÛ²7ØÿŠùñ³è+ûzÑ‹^”“Eyä‘<¨oè‹>½Ê¾wÜqy€ â×¹sçæÿS$ÙVÙf•¶hŸ«O ¼ÇM…$l­lUõûeÛe°6)sœt☈ ]ñ^&*Á}ôÑóý~ ¶Eƒd1q¤ˆ¾ñoä±b@DõÀ:úÓ¡®ÅqíŒ tQas‰%–°R`Ù~¡Û±Ñpâ£nÆFuÆ{Uⲡö¹Êÿo'†ëTüÖ©X¥ñ['b¸nÇoÃáÄo4Á‘G™o2÷w]í+î9Ĥñâw;5V0P?Vö|¬³o¨r=ètŸãÜq3>&¦Ä ö£/ö1ÆvZ¯u¶A•çÑ c'Mè{Ú~\ŸãþEýkS嵩Ú_Ô54Ð>v{ÿûÏ#Žƒ¸Nm½õÖyBNÄ#U‚ëZL”ŠG¬˜÷ŸâžÓ@çF;Ó_û´»—‰µê~Ñn¼7Òñ÷HŒ¡Š¿»W=&Ú½ÞˆÁGFù 1Ïâ¼óÎË«{ÄkXÿ‘ܾôÒK§Ï}îsià 7Ì×¹C9$¯ZÑNr|(;¯£¸–^uÕUÏûqì´&!4­hHÕæÐéBeŠ®Ô9·ªJÑ—:ö¿Ê±¦È"C½­9P䀑WûŒ1²Â„ rVŒŸÅ{²ˆ‰Û=÷ËÆ ˜ÃZf~hqNÙkÉùçŸ?*‹£•3ê*ŒV5Î)Ûíª/Sˆ¶óéËÄíÕø¯ŽB[MŒÿ™mFü'¯`l“IOŠè¾ÚCu^ýTö&@»ƒÕUžc»7Tš<‰­ÓL|Mæoïx0 =¯?i­¢ÖŸþú¯Á¾ßß6ªœ[öÿDe x æMozS¾AýêŸþô§|ã9&( 6!±¨rŽ=öØÜw|ñ‹_¬Ô†?þx~3}ck¾VUÿwÙ¶-ówƒý¯¾â}E³x g_¢ŽAø²úÛf•¶.Ó~eާÁŽçN|¿J» Ô&eŽ“á¿ÿýïóLjÿzMBë N;µ ô’*×Ρ®Åq½j¼î/»µu;6ª;Þ+Óß–Q:kÃu"~ëT¬ÒÉøm81ÜHÄoÉáÄoŒ´[.“Yœ}¯Á+è¯+{>ÖÝ7”½ÔÑ'mÜ7¾è;>Þñ2Ï£WÆNšÐ÷ö·ý8Ö{^ƒíËP¯MÕþ¢®ñ Áö±›ûWöµ‰kW™ø¾ÅP*ˆ2Ü¿éÛ>ížËCÅZÝxQ5ÞkRüÝ©ë€ø»Yñw•c¢Ýë|dãÝXY8±òÑï~÷»ÜÄ„ÍâxŒ‰¡üàójÄ1™ùœsÎÉó ª&ÇÇx™y1‰²¬& ©Z0$t²PFªèJÝs«Ê}©kÿËkŠ(r ÈÁàÇ„"4M•bŸíœûecNÌa-3?t 8§Ìµd´G+gÔ¹ÿU㜡Ú`8…ê‡*DÛ­ùôe âöZüWW¡­&ÆŠÌ6#þ“W0¶I„¤'µUµ{¨ï€ó@7:qs¬Ìslç†J“'±ÕÁd°¡÷7˜Ìo2ÿê‹8à€\¥ê¢‹.zÞ è`n¸á†tùå—çÚË/¿|¥íÆ`ß_ÿú×ô¶·½-nuò7ÍHïËPmM筻ÚuT\ÜsÏ=ÓßøÆ´âŠ+¦x Ÿg'­½öÚi‹-¶È¿?n¹å–<8ØŠ 4;6N¿ßÔØ¨Îí69F¿‰ßÄoŒÕ~¬ ×È^/èÖs0vÒÛýE/¾>UûÃ^הר)×”±üzˆ¿{ãz#oŽ˜§Ñ_qŽ˜°É&›ä• Î=÷Üôýï?OˆNr|μŽVM-ReÎH]ECŠ×¦¿×µ[ÉýC}©{ÿ‡:Ö9Pä *EžÿšEèŠ?Êœe¶5Ôÿ,ÎêZ2Ú‹£ gt£0Z™8§L ·˜Ó`…h»ë”-ˆÛKñ_]…¶šÿ)2ÛŒøO^ÁØ$Ú0ØM€&ßkê$¶2öØcÜ1G¶| Çré=ôPúùÏ>"ÇÉ`cÉ`£ÛÞ{ïN<ñÄt饗VŠ“^xáÜDu¹ª¾ô¥/å»ï¾{ÇÿwÓŒô¾ ÕÖtÞ+^ñŠ4}úô\û¬³ÎÊBTÎÚo¿ýÒ‘G9¯ÐMœ{QAìÐCÕx= §ßojlTçv›£ˆßÄoâ7Æj?Ö„kd/Œtë9;éíþ¢_Ÿªý¡ãnt]SÆòë!þîë¼7,µÔRé _øB:á„ÒÓO?ݨç¦hHýMéîóPä wÛf4RäÄ9âœzÛ I·ºù\ÍëG^øO^A÷ĺÂÿ˜:uj:ꨣ´”´Î:ëä介/¾8m¿ýöÏûùµ×^›«ö³ŸÍ÷ý¬Þk¯½ò`u§*¥T™å+¬°B^nùꫯ®ô·³gÏÎKqÇ—Á–%¯Ú^ƒùÔ§>•ó §œrʼ ÿ;ßùÎtï½÷æÏcyä¨b0ØÆÖn{të8¤UÎïiÓ¦åß½òÊ+sÐ@ó=÷Üsùc vÃ7Þ˜?¾á oÐøÚzT‹ž¨ºø—¿ü%Mš4)Oò‰B'­¢:Ö3Ï<“'|ŒEq ¼þúës…Ú»‰QÄoÚFü½Õ¹j ýEÿý…ýÃ5EÛйëœNμ¡2êžK3Ô\™‘žË3Ö÷¿ÛÏ£és§Fòõ0¯ldT7jnˆsÄ9åÛ íÏš5+'!m¾ùæùZ;Rm1œçÑ ñ_7ŸC“cyâ¿¡â?±_gDþ£DHhCÙ›M½9fÛØ:©‡É`@¯Q4¤w™+¤=´MoPäÄ9®ÝÍhƒ¦¢Îóp\hm3zâ?±_gH„+òÇi ©$B%h,‰@cI„K"$ÐX!€Æ’ 4–DH ±$B%h,‰@cI„K"$ÐX!€Æ’ 4–DH ±$B%h,‰@cI„K"$ÐX!€Æ’ 4–DH ±$B%h,‰@cI„K"$ÐX!€Æ’ 4–DH ±$B%h,‰@cI„K"$ÐX!€Æ’ 4–DH ±$B%h,‰@cI„K"$ÐX!€Æ’ 4–DH ±$B%h,‰@cI„K"$ÐX!€Æ’ 4–DH ±$B%h,‰@cI„K"$ÐX!€Æ’ 4–DH ±$B%h,‰@cI„K"$ÐX!€Æ’ 4–DH ±$B%h,‰@cI„K"$ÐX!€Æ’ 4–DH ±$B%h,‰@cI„K"$Àÿgï^{£ºî=/{f<¾A „Hrr„&!¥'JÕ( GjÔ£&ôUû¦RÕÐÐ|†öM«J}ݾŠT5¨MÕ6§÷Ó&‡$ž\œ‡` ø†ïž±=‡µÈØc-` k¶ŸGÚ³Ç5‹mÏÏfýöÈ–"$-EH [Š@¶!€l)BÙR„²¥ dKÈ–"$-EH [Š@¶!€l)BÙR„²¥ dKÈ–"$-EH [Š@¶!€l)BÙR„²¥ dKÈ–"$-EH [Š@¶!€l)BÙR„²¥ dKÈ–"$-EH [Š@¶!€l)BÙR„²¥ dKÈ–"$-EH [Š@¶!€l)BÙR„²¥ dKÈ–"$-EH [Š@¶!€l)BÙR„²¥ dKÈ–"$-EH [Š@¶!€l)BÙR„²¥ dKÈ–"$-EH [Š@¶!€l)BÙR„²¥ dKÈ–"$-EH [Š@¶!€l)BÙR„²¥ dKÈ–"$-EH [Š@¶!€l)BÙR„²¥ dKÈ–"$-EH [Š@¶!€l)BÙR„²¥ dKÈ–"$-EH [Š@¶!€l)BÙR„²¥ dKÈ–"$-EH [Š@¶!€l)BÙR„²¥ dKÈ–"$-EH [Š@¶!€l)BÙR„²¥ dKÈ–"$-EH [Š@¶!€l)BÙR„²¥ dKÈ–"$-EH [Š@¶!€l)BÙR„²¥ dKÈ–"$-EH [Š@¶!€l)BÙR„²¥ dKÈ–"$-EH [Š@¶!€l)BÙR„²¥ dKÈ–"$-EH [Š@¶!¸®r# IDAT€l)BÙR„²¥ dKÈ–"$-EH [Š@¶!€l)BÙR„²¥ dKÈ–"$­²%`£X\\ gΜ SSSaçÎáŽ;îh‹×¯ä+@¾’¯ä+ù @¾’¯ØÈ!)¼F£>Ž9’]Ó–-[Â7¿ùÍpçwfùÚò•|ÈWò€| _ÈWò„Pºx¼|ðàÁ(¢W^y%¼õÖ[aÇŽáÙgŸ ŸÿüçC©T 'Ož ï¾ûnxì±ÇBOOOv¯ _ÉW€|%_ÈWò•|ÈWòÝ믿n"$ÅöÉ'Ÿ„£G¦;X|ûÛßÕj5ôÑGCGGGxûí·Ã›o¾¾üå/gõÚò•|ÈWò€| _ÉW€|%_À%–€"‹a-Šw³h¹¦/|á éñرcÙ½6ÅGÍŸ={6\¸pá²çâHúsçÎ…ÉÉÉë~ÝÙÙÙôºõzýŠÏ×jµø<ÈWò€|%_ÈWòäF’BûàƒÒãƒ>xÙsqÜw¥RIE´‰‰‰¬^›â?úÑÂ÷¿ÿý0::ºê¹_ÿú×á?øAxã7®ûuÿú׿†þð‡áµ×^»âó¯¾újzþ½÷Þó—€| _ÉW€|%_ÈWò´EH mjj*=ÆñÞWÒú(£ŸÿüçéÚz饗RañzÅ‚å½÷Þ›®»ÁÁÁUÏ}øá‡¡V«¥äÚq÷ _ÈWò•| _ÈW+EH knn.=–J¥«~N³hËa¹¼6G¼F^|ñÅôñ/ùËð§?ý)œ:u*<ñÄa÷îÝ7üºHG]uþرcéqÿþýù @¾’¯ùJ¾¯ä+hŠVwwwzœŸŸ¿êç,..¦Çžžžl^›%cñqrr2üêW¿J×ÖW¾ò•u½æã?ž~Pyÿý÷—¯Ñz½ž&BÆëqÏž=ù @¾’¯ùJ¾¯ä+hŠ:(ƱÞqÄw,]Is¬w___6¯ÍÆ‹M÷Þ{ﺯ™x}îÝ»7,,,„÷Þ{/‹¥Èx­îÛ·/tvúÖ€| _ÉW€|%_ÈWò´m +¹æÝ*ÆÆÆ.{>†¼x>~Þ¦M›²ym6–xg•×^{-•ûûûÃàà`šÜ¸^Hï¼óNz|Õ;±\«xíÅbåñãÇÓëÇëtëÖ­iâ$ÈWò•|ÈWò€| _@;Q„¤Ðžxâ‰ôø·¿ý-LOO/ŸSø~ó›ß¤÷ïß¿ê?÷ÕW_ “““7ýµ¡U,(þþ÷¿O…ц½{÷†‡~8Œ‡×_ý†®Ë¦x·–8ý±Ñh„W^y%]—Íkä+ùJ¾ä+ù @¾¯ ”./ÇN< h¶lÙ†††Òñ÷¿ÿ=\¸p!œ:u*üâ¿gÏž ûöí _úÒ—Ri¬é§?ýi8qâDšœ·}ûö›úÚÐê'?ùI*=:t(ìܹ3‹×Ý›o¾™®¥Ï}îs©$y=×e«Í›7§dš?ô|ík_[yò€|%_ò•| _ÈWÐâÀ1EH -†´ÇT«Õ+>W«ÕB©TJÇz®ËŸýìgáÈ‘#á«_ýjxúé§-:ò€|%_ò•| _ÈWÐVbÿ±lØHþáó¿ûÝïB¥R ‡ºé¯ k]­Å;¯¬÷ºŒEË8y2Þ¹%޲ù @¾¯ä+ù @¾€vd®1|æÌ™3app0¼ð B…¸.=êõzxøá‡COOÅ@¾¯¯ä+ù Ú’‰ð™R©¾ño„Ç{ÌbPˆëòÈ‘#éqÿþýù @¾@¾¯ä+h[Šð™»îº+P”ëòСCéqÇŽù @¾@¾¯ä+h[Š¥ @tZ WŠ@¶!€l)BÙR„²¥ dKÈ–"$-EH [Š@¶!€l)BÙR„²¥ d«l `µùùù0::šF¨V«aÛ¶mé\›à= @¾¯¯ä+¸½L„„5!qhh(ÌÌÌ„ÅÅŰ´´fggÃéÓ§ÃÜÜœµ ÞÃä+ù ù @¾€ÛLZŒŒŒ¤p¸V¼sF|\›à= @¾¯¯ä+¸½Ê–VÔjµåÿû¿Ï¥£©££# |h‘Öá?þãðüóXˆu^›×óÈW€ì _òò _ÈW€| _ò´;EH¸Š¿÷½·-ÄMÔÑqЛñM¼>[ˆ9y²Ã¢Ü ƒ]“ò ûÈW€|€|ò·._Å}-æwá–ù €ËW€|“"$´èêê ³³³«Î=óÌÝé(—ËaóæÍéüö·'Âý× q“¯Í¦J¥bh«÷°GÝrñØJ¥RèëëµH÷ÀAö@¾B¾B¾¯€,9r6¼ýöÙÐп¸Íùj`M¾êårÇÅÏ-_<:C¥Ò™>Žçâ Áöá÷WÿŠ|Õq…|.~|)_]ÊV¥‹¥ôqg§|ò‡"$´Ø¶m[ KKKËçb ò;ß9víÚº»»-Ò h4^÷f| ®Íô£LG‡‚.m÷ ÷õ¯ïIÏÅ_ÔQl¦/ËþÈWÈWÈWí–¯ž~úîðŸÿy¿|…ÌWo¥"äää¼Å¸ùêZ7é÷õU.]¡¿¿+=Æ?Çü@®ùÊï¯n¾j\s¾ŠûŒ»»ËË«ùØÓSVù Ú’"$´¨V«©ð822²|w±8 R ’œ®ÍZ­–®Ï8 2– ý<í–¯âÈl"¯rÍW[·ö„;¶_ü³N‡¢Ÿ õúRšŒÀ­ÏW×úû«¥¥F˜œ¬¥£Uü~Ýß_ýlã~%}ÜÛ[¾øº¾òÕ?Òh4Âìl=ÃÃ3ËçãËôôT®X4¡€œù—¸JXHŽE3%Hrº6£'¬훯úúzmÒ¯²ÍWw„½{ï|0bq  bÑæøñ±ðÈ#Û,ÀmÈWëýýU,¯Í¦£éÒt£RËôH›÷€’¯:Ö¯ffêé8~¥ §Döö¶$/å¬8Urà ZìÜÙ††&ÃÔTÍb@A93víÚ”Ê3´ŸKÓÒÑ:Ý(nÞ_™jTYž$ÙÕU²h@Q’PˆeÈ[!Þ8(þ>ìÒïĦ—Ï—J«Š‘ÍG €ÛMZÄ)B{öl o½uÖb@AÅÍààh8p`‡Å(¸yrr>­*•ÎåRdëæýR©Ó¢m§\¾½°abb>«3V)ôö–/+HÆópKÞ-¬60жoï çÏÏX (¨ññ¹ô5¿Ö(¶z})ŒÍ¦£)Þü¢»»”6ìÇ ÁÍû==åô@®ººò¨Ôë‹áÂ…x̯ùÿWZUŒlN“Œ“%`=!à vïÞFFfÓd! ˜>úh4lÛÖ:;mÆØhâtàÙÙ…t ¯Üü"¾'¬lÚ¯,O’ŒúrÐÕ•÷TëZm1ccs«Îww_>=²··âgr®™"$\AÜœuß}›ÃÉ“,ÔÜÜB8uj"Üÿ€$Þcrr>­*•Ζé‘+›÷K¥N‹ÜV1—´ëÏàñY9'qljܱ¹º Ù é`-EH¸Šû·;ÂÙ³Óa~~Áb@Aýßÿ];vô…jÕ6®®^_ ããséhÕÓS¹lzdÜÌßaç>p‹tuçç×8¥{f¦žŽáá•óqJdÌT«'Hv…îî’œ°ù .\EœðóÐC[Âÿþïy‹µ¸¸>þx,<úèv‹Àu›­§cíÆý•Mû•åI’]]% ¬[¹Ü‘òFœd]Tñ¿mzºžŽVñwuk§GÆÇjUÎØï–®îî»ûÂéÓabbÞb@A;7î¹gsؼ¹j1X·¸qrr>­*•ÎåRdkQ2nè¸Vq"bÌ““µ ÷ßoft¥œU._ÊY½½«§Hº@±(BÀ?±gÏÖð?ÿsÆB@ ކ§žÚi!¸eêõ¥0>>—ŽV==•åRd5=öô”SÉàJbÉo#!¯faa)\¸0wñX}¾y#е$cq€ö£ ÿDœ·cG8{vÊb@AÅ©¯ñk<~­Àí4;[OÇððʹÎÎŽÐÛ[ùlzdey’¤©F@sÿÜÕnDQ­–/+GÆi’&uäM®ÁCm çÏO‡Åņŀ‚úøã±°}{_(•Làà_ki©¦¦jéhU©”.›7î{ï€%¾ÿsãæçÒ1::»ê|sRw¼!ÅJA²’nRÀ¿ž"$\ƒ8}çþûRQ (¦Zm1œ<9žŠÏ£z}1Œ/~6Õhrù|sÓ~ëôÈžžrèè°iŠ(¾çsó5'u·Šqj%k­L”µn?EH¸F÷Þ»9œ93uÙ†( 8>ùd"ìÜÙŸ69@»hnÚ^9'Å F±¹²i¿ªU[G ÝÅvU*¡^_²·X£ÂÌL=çÏÏ\–µZË‘ñ±»[Ö¸U|‡€k78íÞ½%;ö©Å€‚ZZj„> ûöÝe1hû÷´©©Z:ZU*¥åRduyã~©d¢´“ø>>66k!2ËZ1S]*Ev…ÞÞòrAÒÍ(ÖÏwR¸wÞÙ¶lé±Ñ lxx&}ǯu(šz}1ŒÇcîâŸ&—ÏÇéE­Ó#ãÇqBr‡~$d)¾_ûýT~abb>­ÊåÎU“#›ñ&\EH¸N{öl o¼1†Å€‚ ÿþï»B‡öÄÜÜB:â šâDôÞÞµÓ#M4€Ä÷dÚÇÂÂR¸pa>­ººJ—•#cþŠÅIVó“(\§¸!içÎþ044i1  ¦§ëáÌ™©°k×&‹À†µ´ÔSSµtœ;7½|>N.j–"ã†ýæ$ÉRÉ àv‰7) ýÕj‹Ù˦{ÆiÝ­ÅÈfîŠ7ªØ¨!à<øà@øôÓét7w ˜ŽwÝÕg ¬Q¯/†ññxÌ­:7ì7K‘Í¢dooW0`n¾ÞÞòÅ÷ØŽÐh4,F5§uŒ¬œ‹ßÝÝWš )oƒ"$Ü€8çÂàà¨Å€‚Š%'ÆÃž=[-\ƒæ†ýáá™åsqjQsŠQœ\Õ,HV«¶°Àz”J©7;»`16ˆXzßñ^9K± ÙZŽŒñúèÐ ÄO‘pƒî¹gsš 33u‹uúôDصkS*p×oi©¦¦jé8wnzù|œ¸Ü:=²ùq©d³>\«øÞ©I :=]KÇÅ–ÏÇR\>=²’¦x´#ß½àŪÇIqï¼sÎb@AÅÍ„qòë“OÞm1à&ZXX ããséh7æ¯'f—‹7hÄ ­â )&'çÓÑ*Þ¢9±»µ(ÙÕU²h@Ö!`¶ní Û¶õ„‘‘Y‹5::›¾Æã×:pkÍÍ-¤£5_ÇiF­›õ›Ó#«U›õØØâ{#\¯xCЉ‰ùt´ªT:?+F®ž"‹“9P„€uÚ³g[;î²ÓààHزåžTÄn¯˜³§¦jéazù|Ü”ß,E6§GÆmÖ`£ˆï{p³ÔëWžØ]­–/›ÙÛ[¥’ÌÜ^аN==åpÏ=›Â©S jvv!œ>=î»o³Å€LÄiFWÚ¬ßÝ}i³~u¹ ÙÛÛ:ÜÏ€‚‰¿“Š7ìqs.n¥ùù…tŒŽÎ^1s­.HVÜD ¸e!à&¸ÿþpîÜt¨Õ-ÔÉ“ãáî»ûBWWÉb@ÆææÒ12²²Y?nÈó›õ›“$«Uïë´¯ŽŽŽô~699o1È"sÅk2tW ’—²WOOÅM)€uS„€› \î >¸%¼ÿþ°Å€‚ŠS§Ž {÷Þi1 ÍÄIYSSµt¬Íñk§GÆ ûñ<´ƒøÞ¥I.F˜™©§ãüù™åókoJÑ|ŒS%®•ïp“ìØÑ††&m>ƒ;{v:ìÚµ)lÚTµPñF.̧£UÜ”ßܤߜùÿìÝ |œU¹?ð“­M›tIKKJ *"‚(*ÈAEQqAAEå*¨—ëŽü½z]w¯ˆâ¾¢ârEq犈‚Šˆ È".H[ ,-ÝK“¦ÍúŸç” i›´Iš´É›ï÷ó™Ï$3“d晤ï¯ç=ç<1y_'#†›8^Áp×Û¦UU•¥ŒU½Ýɱc-w¶ç_$iáÂ)馛–)Tt6X´hM:ôЙŠ¶iS[¾¬^ÝÜu[t27®z«î‘ññرU ÀÇ"©ÚÛ;Ò† -ùÒ]¹k÷–…‘/’¬©ÑµF3 !`Mš46MŸ^—V¬hR (¨èãñ·ŒÑɨ©©5_ºë>QK÷È-Çí0Ôt„¤ˆzëÚ=fLÕvÝ#£k·Ü£ƒ…0È,hH«VmÌ¥bZ²dmÚk¯ñ¹30ºõ6Q¿¶¶ºÛDý-Ý#c¢~…øÀ Š…aÑ%¯µµC1(¼––öÒ¥9­]Û¼ÕíÑ¡{Û’qmÜŠÅBHd1áyß}'¥»îZ§PP›6µ¥¥K×§yó&+Ðk^ˆËêÕß‹ c1d,Š,/ŒIúñ¨8®l»0 F“Í›ÛK—æ´fMs·ÜUQÊXÛ.cc Áü¯ †Àœ9Ó²eò$ ˜î¹çÁ4sæ„Üu /:;Sjjj͗+»&è××éš°·ÀÎÄ1ÃBHØ6wu¦ææ¶|YµêáÛ·lL±}÷ÈX4Ya…$ kBÀ¨ªªLûí7%ýýï+ ª½½#-^¼&xà4ÅvI[[GZ¿~s¾t.tïëbÀ¶âôÍ–)Zò¥ôQ×í••9gm¿@ÒÒ+.ü5À™>½.Ýw߆´~ý&Å€‚Z±¢)Íž=!MšT«À ‹ó›7oL«W?|Û–.FÝ»Gš¤0ÚÅqØ5©±±%_º/¬ªªØjadùã1cª v3ÿã€!´paCºñÆe ¶hÑÚtØa3Ø-¶t1jÍ—Ø”¡¬ºº²Û$ýš®N’q;ÅÿÞWTT”ŽŠƒ¬½½3=øàæ|鮦¦j»î‘² - !`M˜06͘QŸ–/oT (¨ 6ç¿ñø[ØSÚÚ:r7úm;Ò[Õµ(²¨ ¹ººE€‚kjjUF% !`›;wr3¦J! Àî¾{]jiiW†T]ŽPtMM:B2:Y {XUUEZ° A! ÀÚÛ;Ó’%k€!¥#$ŸŽŒVBÀ00cF}š0ÁŽýPdË—7¦ ìØÀЩªªLãÆY EÖÞÞ‘š›-†dô±†‰… §(Ü¢Ek€!¥+$Ÿ®ŒFBÀ01iRmÚ{ïz…€[¿~SzàF…`ÈX Åg!$£‘…0Œ,XЪªœÎ‡"[²dmjoïP†D}ýXE€‚klܬŒ:ΜÀ02vlUš3g¢B@mÞÜžî¹çA…`Hè ŧ#$£Qµ»ÓäɵiÞ¼É ;°ï¾“ÒòåiÓ¦6Å€‚Zºt}š1£>ÕÖš¾Àà7®:UVV¤ŽŽNÅ€‚jnnËãñ·£…‘T`·²v.&¯,XÐþþ÷•ŠÕ–,Y›¨ô‹…P|BRtB@Ì™3)ÕÖV+Ø=÷¬O›6µ)}V_?V à7+…f!$HeeEÚo¿…€ëèèL‹¯UúLGH(>!): ! `¦M«K“'×*ØÊ•MiݺM @ŸÔÕQ(¸¦¦E Ð,„€Z¸pJª¨¨P(°E‹Ö¤ÎÎN…`§ªª*Ò¸qºBB‘µ·w¦æf]!). ! €êëǤ™3ë ¬±±%-[Ö¨ôI]…PtMMBR\ÕJ0rÕÖV§É“k€ÍŸßV¬hJmmŠuçkÓôéu©ºÚ~èìX,„\µJ Èb!ä^{©Åd!$À6cF}:ä @jj*Ó¼y“Ó¢Ek ªµµ#Ýu׺´páÅ`‡êêÆ(\SS‹"PX¶‚€›={b?¾F! Àî»ïÁ´qc«B°CõõBBÑ56ZIqY VQ‘tŠƒ‚ëìL:¿°SãÆU§ÊÊ …€knnK A!Y 7eʸ4uêx…€[³¦9­^½Q!èUEEE?¾F! À:;;ÓÆ­ A!Y £À~û5äî@q-^¼6w‡€ÞÔ×Q(¸ÆÆE ,„€Q vûßgŸI »ýß{ïz… WuuBBÑ55YI1Y £Äܹ“RM©Pdwß½>µ¶v(=ª««Q(¸¦¦VE œÝ€Q¢ºº2ÍŸß P`mméÎ;×*=²ŠÏBHŠÊBHEfάOõõc lÙ²ÆÔØØ¢lgìØêTSS¥P`›7·¥ÖÖv… p,„€Q¤¢¢"-\8E! À:;;Ó¢Ek€é ŧ+$Ed!$Œ2“'צiÓÆ+غu›ÒÊ•€íX Åg!$Ed!$ŒBûí7%UVV(ØâÅkRGG§B°•úú±Šר¸Y( !`ª­­NsæLT(°M›ÚÒ=÷<¨lEGH(>!)" !`”ÚwßIiìØj…€[ºt}Ú¼¹M!èRW7F àššZ±F©ªªÊ´`Ád…€koïHK–¬SºTUU¤ÚZ›cA‘µ·w¦M›lŽE±X £ØÞ{×§‰Ç*Ø4¦ܬt©¯×Š®±QWHŠÅBHåöߪ"@ÁÝqÇjE K]…PtMMBR,BÀ(7a˜4cF½B@mØÐ’–/oT²ººE€‚kjjU ÅBH -XЪª* lÉ’µ©½½S!Hõõ:BBÑ56êI±X ¤1cªÒܹ“ ¬¥¥=Ý}÷:… W“*+mŠEÖÜÜš::lŠEqX dûì31O‚ŠëÞ{Ì“àÝ**R?Þ8YggJ7¢8,„²è°ß~ ]/^«¤º: !¡èšš,„¤8,„ºìµ×øÔÐ0N! ÀV­Ú˜Ö®mV€Q®¾~¬"@Á56nV ÃBH`+ NI ¶hÑšÔÙÙ©£˜ŽP|:BR$B[‰Ip³fMP(°˜wÿý`³ŠÏBHŠÄBH`;óæMJÕÕ¦@‘Ýu×úÔÖÖ¡£Ôرթ¦¦J! À6onK­­í A!8cl'&ÁÍŸß P`1 îÎ;×*À(¦+$Ÿ®……@fÍšêêÆ(Øý÷oHMM- 0JY Åg!$Ea!$Ð£ŠŠ”öÛOWH(²ÎΔ/Ö`´² ŸM°( !€^M™2.M:N! ÀÖ¬iN«W7+À(T_o!$]c£…CµÀŽ]uÕ]©³ó· ± ~ûÛ»¯@ö@¾ä+ùjkkëH÷Ý·¡T›NÅ`ĺùæåа‹­N ³See…bƒê¦›–—2ÄÍ òƒd°Ç¯b¸çî»×),8_éIQX ;ñ»ßÝ•/0œ}ôÉ>&ß°gTUU–rÛ˜|é®­­£[f{x‘dd:`à:;Sîκí¢dQÇŽÒ圣>:Å`8Ù¼ysºì²ËRKKKÚ{gÝó{×»Þ•þò—¿¤#<Ò›Uò‹_ü"}üãO‡~x)Õ+ŒÀ¬´;òÍ 7Ü®¼òÊt×]w¥9s椚ššaŸûä'(~j#qIn“Û¥Fš¡Î\ò‘|ÀÈVU$kÒĉcsÉ™3'¤}÷˜f̘jÓøñ5¥ã{e~l[[gŸ¿okkKºîºß–®[Kßg¯BÕì+_ùŸtÇ·¥ƒzü|ÿÛoÿkºé¦kÓÜ›¦OŸ™:;; [˹ÒÅ9=êQ‡”~GÇÈ×0oÞdÿÈ ÖXIlnQ[[7·˜:u|)³Õ—²Û¤4kÖ„4eʸ|ûرUùqíí¹ÃÝhËRCEF+NFëͤIµB2býö·¿µFºeË–¥{î¹'­\¹r§—qãÆ¥1cFÎAëæ›oNÇsLZ¼xqzå+_9ìþ}ík_›ñˆG¤ã?~X×qÍš5éÓŸþtzðÁÓþûï?d?çŸÿügúÿïÿ•þƒ1%uÔQþ85ÚÚÚrn¹úê«óñ02WCCÈËJCobP줓NJo{ÛÛÒüãô½ï}/utt¤§?ýéÃ:÷•E–ºõÖ[óû|çwæ ^'NÜá׬[·.¿¶U«V¥iÓ¦¥ÊÊJù 9h˜ä 8QÙׯ_Ÿ¦OŸÞãcÓ7Þ˜áññm·Ý–3@¼ÎžÄ„ø¨Ã^{핪««w[Î’Ûä6èO>ÎÇóþf´îÇø¡Ì\£1í,#ÉGEEEE^ü‹ 'O®-eº4{öÄ<É>KÖÕU§»îúgéXz}ºûîE©º::MNÚê{,Zô÷tæ™/K÷ßwzæ3_8¬^ßêÕ”ŽÿÿÊ·ÇŽ­íñ1Ë—ß›–.]œ&Mš’ªªª»åÆ?¥óÎ{gš3gAzÒ“ž6èùêÜsß>÷¹”2ÊåéÊ+žóÕĉ ö–ÝmÜØXÊ[~/–-»7×wüøoÑØø`þ]Y¿~méwmJéwïálµté’ôÅ/~¸ô»59|ðFäß’…£Ãp{Š’Ûon1)_ÇÉúú- $#çEWÉÎέŸÿí·ßššš,§öøý››7¦ýë¯yeßš§†2K ­x­7ãÆU翉â\@µ2ÀÈöüç??ýùÏîÓcßÿþ÷§÷¾÷½£²Nííí¥pY5hßüd¾~éK_:ì_û¥—^šÞùÎw¦¹sçæ yCUËãŽ;®'§ /¼0ï¸ £Á/ùËôæ7¿9ÝqÇ[Ýþoÿöoé?øA¯¶†£¡Î7?ýéOÓ%—\’:è ü³š››Óøñ{~·¬åĵkצsÎ9'}þóŸÏ¢e1`úï|'½ìe/ëñë6mÚ”;ì°´dÉ’®× 'ÈOÈAÃDLꎓ°7nL¿ùÍoòÇÛzõ«_óËûÞ÷¾TWW—þë¿þ+Oòþýï¿Ýcc²þcûØ´aÆ|â÷€Øm9Kn“Û ùl íÜsÏÝ-™k´å£¾d$ù€¢‹‰ï×\sUùêˆ#ž’>÷¹o¤qã&§¦¦–üØáêŠ+~’¾ô¥æ‰Û\ðýíîojÚN;íÙ¥|Ó”.ºè7iŸ}æwÝ÷ƒ|-_sÌóýy]sÍéw¿»,ÍŸ@zÃÞ—6oÞ”jkk÷x½::ÚKïgïùjÆõéßøT)}+g±îùêÝïþT:öØzüº––Íéu¯{^Z¶liþüCúJé÷èé]÷~ø¿¥úú‰é²Ë¾ŸN9å?ý0, ÷±§Xü8v츭wžM›Úsf‹Ë=÷,OgžùòÒmÍéüó¿“=ôˆí¾ÏÇ>vVÎ)¯zÕ›Ó«_ý¶ç©¡ÌRCEF=­©©Õ?jŒhBÂ÷º×½.‡È²èþ÷õ¯=Ÿx:í´Ó¶zì±Ç;*kôœç<'ðüã[MB¨x /.œ3gN:òÈ#‡ýë?âˆ#ò‚Ù¸ÊZF·Ñ¾ð…ù÷/&vÿ½€"úë_ÿš^ô¢åID1й vW¿üòËóÎð±{ýHèø³»òMìÄb§øØ-$äĘÔÏõ/ùKzò“ŸœN<ñÄtÈ!‡äÌý»ßý®×nPácûXž(6uêÔ´zõê­Úä'ä =/:ÚÄÆQ1þ­o}kºé¦›¶šDÇæ˜ì>cÆŒtÖYgå¥q|n{:næ3ŸÉ5xñ‹_¼U®iãHr›Ü€|6’2ÚîÊ\£-õ%#ÉGŒö|uÿý·w嫎Žùz„1iþü†Ôظ9O,nnnËcJ{Ò 'œ’¾ûÝ/¦[oýsîŒsðÁ‡ouÿ~ôÍÜ9ç©O}öV‹ ×®]•®½öÊ4}úÌtÐA‡ úóŠIá¥/}}:ì°‡óÛí·ÿuÕêï|Mºþúߥ‹.úõVµ(‹…oû+ÒwÜ–ýèÇ•òϳÒÂ…–~/Ö§[n¹®×Qáÿ÷‹y‚}tSzðÁµÛe«êêštÔQÇ¥Ë.ûAï ÷l4\Çžb!\t¾‹Ktœ;wrúïÿ~W{úÚ×>’®¸âš‡J¶æ…’×]wm^8eÊ´tòɯpžê,5Td´Ñ“Ñ,„d¤³F¸ÓO?}«ÏcÇù8á' Ï;ï<J)ï­¹cçÒÁ¡=¾ßI'”Còp·páÂô“Ÿüd·Ô2vÙß¿ØýÕ OŠî+_ùJžP´íîó‘Ï"“¤n»#ßÜwß}ùzŸ}ö191º>Åd±È8]tQª©©éºï%/yI¯ß7ºpÇd±èµï¾ûæö{««ü€´çÄDö8ÇÉÛxMÿñÿ‘o|ïCÓ'L˜?>óÌ3Ó{Þóžô¡}h«ãvSSSúÔ§>•÷gŸ}önÏYr›ÜE§êoFÛ™k4å£þd$ùùjk55UiîÜI]Ÿwtt>Ô}¨µkr}ccKjiißm¯cüøúRF:-}íkŸHßúÖgÓÇ?~Q×}›6mL—\ra>Οzê[·úº«¯¾}vúõ¯ÒcMãûÆ$û_üâûBPˆl4\•Çžþö·ÛÒOúÝ­ÆžÎ8ã#ùãsÎùPzä#gå W[[Ýï<5ÔYj¨Èh£'£mÞÜ–ÚÚ:Jõªô#’ß\…"¬ÝrË-iݺuùó¡ßûÞ÷J¡îo[=®±±1]uÕUéâ‹/Î;y®\¹²Oß/vD¸þúëó.¤+V¬èõyÜ{ï½ù1?üá·k•¾3}yn±ÛH|ß8î¾ûîüù¢E‹òsÞÖ† ò®þ±cì=÷ÜÓëÏ./*ì~’°¥¥%Ý|óÍiéÒ¥Û=>êõ¹ÿþûûý^Å®¬±Ûmù5ôôœãçÆ„»½ß·ÞzkÞ w ï[_kyÌ1Ǥ†††¼oÔЬœûØÇnwß#ùÈ4eÊ”¿.ŽÉW\qEúÙÏ~–óÐ@³N_ìJ¾Ù•œ·ísˆÜ;LJåË—çÏ{{í=¾lΙèG?úQ~>ñù®džd›õë×çI\1©îK_úÒV“Åv&IDŠÎP;ДŸƒö\ª­­MŸøÄ'òÇï}ï{»2Ï7¾ñœ=¢sÎ+_ùÊ®ÇÇ·ñÚ~õ«_¥?ÿùÏ]·GVˆ¬Ý :è æ¬á:Ž$·ÉmŒ¾|¶­8g—ä«8^GXµjU¯÷Åq{[±/î‹,3ŒÖ[挌9óQ2’|€|µc••é¾ûî,å™{Ó~û5¤ƒÞ;qDt°Þ7rÈŒ4cƘÒý·•2ÂïKycy9¢¹tœÿ[騾¶×ûšš6lw_Lû6lØ’£^ô¢W—rÁätà W—²Þ­]ûùÏ¿›;ßD÷¢ùó·î0ý‡?ü*_÷6y¼¹9òݵ¥ÜrinÝš>ÕuãÆ¦R–º«ôs·<·5kVæÏW®\Þç÷¦µµ%¿¾«¯þeZ¼øùóÞôåyF§x›65?”ùîËŸßwßÝyÒ}ˆ:ÿâ?HãÇ×¥·¿ýÃ[M°ß™Ïþƒ¥çØšÞüæsÓŽ†Ÿ=ôˆÒû4)w¡jkÓ¡€âe£0RÆžÞøÆÓÓ‚ é1™žžô¤}Òyç½/54LÉyj͚ťkÓ˜1U½æ©Þ²Tã-ú{Z±bûó€í9Û¬^ý@¿Þ›È|‘"ö–¿âgÆ¢MMFë.6i‘JGH…b÷Ï'<á éïxGš1cFéàÿöR(èL“&MÊníÚµé-oyK^ù²1cƤw¾ó[íæ±í÷{Ö³ž•wÿ,OŒ/ï„ß}7ü˜ÐuÆg¤¯}ík[-¢{Å+^‘¾õ­oíð¹÷ç¹½ûÝïNŸýìg»>?ᄺ>þÝï~×µ3iœ˜‹]Q/¸à‚\‡²³ñsb§Ó²¸ÿºë®Ëxà]·ÇΨ‡~xª¬¬Ì;Ôî¿ÿþ]÷ý÷ÿwúŸÿùŸÜ1àø@¿Þ«8ÁxÎ9çäG¾ð…/lwÿÞð†\³oûÛé”SNÙáû}ÜqÇåE yßúZË‘xÄ#ÒŸþô§\“8À…UÎ1‰éE/zÑN9æMozSÞA½,޹'Ÿ|rÎEãÇïwëÍ`ä›ä¼žDǤØA¾¬œYb"VLÜÚ‘¶¶¶üšãë»×cܸq9_Eê>«¯õëK¶¹ì²Ëò"×¾öµiâĉ}þ½ˆ…Ñ…ào|cÀÛý§\~@Úc9(¼ð…/LOúÓÓ¯ýëôþ÷¿?_"+Ä϶¿#¿”»B~øÃÎÇüøyçw^~\ì‚ÛÝHG’Ûä6FO>ëIœ/<ÿüóÓ£õ¨ô÷¿ÿ½ßù*ï=÷¹ÏMO{ÚÓÒ•W^¹Õ÷ŽìY£§sÿøÇsŽŠcPýÍh;ÛÚÕŒ9ÚòQ3’|€|5°|ÕÙ9á]Ûå«'>ñIé‹_üFjh˜‘7—ŽË¿Ogžyj:ôÐ'—¾Ïw·úÞßþöçK—Ϧg<ãé¿ÿû“[Ý÷½ï}9}ýëç§×¿þé¥/}}ž^îbôï|.}à_Ê“¸/¾ø+9¼êUoÙnLëÿ¸9^z½_J¯|åÓk^óö>×õG?úfúÆ7>•N8á”ô¶·}p»û/¸à½¥\ùãÒëûdé5¿@F“ѺÄïëäɵþcD²F±Ë/¿]µr€"‹‰O±XLèI?1p6mÚ´^Çë?þñy@.²Ld 8Çîî/ùËÓóž÷¼ç°m F¾HÎëÉI'”w~ûú׿žwu‹çÙh¯½öÚéëˆ]ü£&~ô£Óë_ÿú4wîܼ8ó“Ÿüd^L»µu_pÐ×úõ%ÛÄÏ ‡q°Nö IDATrH¾ŽšÅûñǯX°±­-;…½9?&~^_ÉOÈA{&•ŘKLòŽ åË–-Ë~bÒøa‡¶Ý÷c}ä¢èjwâuÅ×DæÙ¶äHG’Ûä6FO>ÛVŒóD‰ ¢ƒ÷@òUd½Xä÷‡?ü!/쾯ü=cÃÎØ$5ŽåÝǶãK}Íh;ÛÚÕŒ9šòÑ@3’|€|5¸ùê/8î¡|5©”;^œÞýîÿ(e‚ÒìÙcKÇûÚr—rØPå›þ漞<îqË—«¯¾:O‹ü¯igb“Œ˜;Á]{íµ]Ù"Åäý˜ü«1‘lï½÷îWýú’mbûðÛßþ6ïÒ“ÅÊâûÅ$³ØM.”E}¢Æ±éGCCCŸ—ä'ä =“ƒÊ¢sP,HŒcyì,ÇñØ9¶'‘Ib +v…¤‘Sâ9nÛ r$Ž#Émr£#Ÿm+&±ÅÆ¥1-vÖŸ={ö€òU,®‹çŒ“ØBt:¼á†òÇ«V­J×_}þúòçþóŸÓ~ûí— $£í(síjÆMùh I>@¾Ú=ùê–[®Ù*_ýë_ͯ_¿6­\ù¯´pácóäúØláŸÿ¼5Íš57OF/‹'Ÿ|zúêWÏK?üá×Óßþö—<‰üÔSß¼Ýó¿çž-éO›6s»ûžò”gäIë1 ½ìàƒŸ3Æùç¿;/´ÜÑBÈý÷?(_n½õú<ÉþØcŸŸ¿~gnºéyâüܹ Óç>÷£®šGñô<©ÿŒ3^;(Åäû††½úõ<ùÈÇæKtŠIöÏ~öKº&×—E÷ŸpóÍJÿû¿_ÊìË¢Ž11ÿôÓÏÊ‹ Ê.¹äÂ<ÿ¬³>Zʃ}›×U®y<†ÇØSY_Ï–ǪæÌÙ'í³ÏÄ­îÛ´©­T‡¤«¯þEÞØâOº¼”eŽËù$:+FGÇè¤ØÑ­ðÀÍ c“ŒÈGe×]wUÚ¸±)}ôñ©®n‚Œ&£m%6X‘ªR `ôŠEˆ±[è¶-¼cgÓî'ÙÊb±bXº´çyÇî“ãCy‡ùò ·0uêÔ|»šö×@Ÿ[oÊ-Ð#Ìn+‚uˆÉze1`z:™Ôb¢ZxÛÛÞ–>ñ‰O䯓¢Çw܀ߧÿ÷Ï×±“Jw¼Cìüº+úú¾õU¹6å£Pd1P÷»ßý.OìojjÊÇÔ<‹ŒÕ] ̅ج<ù¿,v,+ç›ÁÈ:ƒ™o†*/ôEL¸ gžyæv»ùGÊc=6566¦_ÿú×C’c÷¸ð£ý(Ox‹÷ô‡?üaž$6yò伩ÇÙgŸÝõøûï¿?ï”ÿ„'1¾õå/9wÛ‰­Ûž<ÝYΩãHr›ÜÀÈÏgÝ]zé¥yÒZOÑ’¯b1_(ï´âûÆNüåÉi±€¯,6OûÊ_7Œ¶£Ì5Øç‹šv%#ÉGÈW{>_ýío×–Žù3ÓQGÍ-e£¿æ‘ÇÿÜ4}z]?¾¦k^Ø _øïiâĆÒsünîžóÔ§>{«Å’eѵ'”;ßt7aÂä­&®—í¿ÿ–±±+î’÷âÒK·Œm½ä%§o×¥òxL:ì°#RsóÆtã’çÙÔÔ˜¯ÿûËÓ”Þþö—²ÓòÄúúú‰¹ƒÓ׿þɮǯ^ý@ºè¢ Ò#ypé½xIŸN¹æË–ÝãÙh˜Œ=u×—s„;«ª­­.Õ`JúÊW¶Ôì3ŸyºþúänÝ'žxR)ëœPÊãÊp}ožõ¬-¯ýŠ+~²ÕíW^ùó|ýŒg¼HF“Ñz¨Ÿ…Œ\:BÂ(;îG ñÞ¬_¿>‡Ö8ñ»bD ïÐÒÒó¯{›ñ²éÓ§?VwÝ“»âÄÛYg•w£`a²ªªªÏϽ¿Ï­7‹-Ê×±CIì¤ÚÝÊ•+óu÷wëÖmìëéDdxæ3Ÿ™'¬}÷»ßÍ’b‡O~ò“»ô>|òÉyg֨ŤÜÎ=þA>u9ìT_ß·¾š5kV¾ªE0ÜÄÎì×\sMÞaý]ïzWîÀ»´ÇnU1˜î¸ãŽ|}ÐA yÖì|3y¡/Ê;¤Å䫞vØaéŠ+®èq·ùÁÊŠ!&®ÅÎúÝsÌ1éÉO~rúÌg>“>øÁ¦šššœý"£}ö³ŸÍï½ü€4üsPYœkCtKŒŸ'fË?l«¼ãkìzÛ[7Ⱦ䬑8Ž$·ÉmŒü|Öý~ÒI'壛ô¶Ñ’¯"?ÅÏŠ gyÒ}䀸í£ýhî’çÏ=÷Üüøø8¼à/pFëËØÖ`fŽ"æ£0ÐŒ$ _ ¯|õë__ž¯_õª“ÓN{(Wu¦Ç—×½îé¼ó>¿¾§n¡±q˦ =MR/kjÚn¹åºR^y w¥\½zE¾½µµuHÞ‡r—ʘ´Þ“xLºá†?”2Ô=Cú<Ï:ë#¹Qw‡zDzÃ^”~üão¦ÓN;+o÷…/|$mÚÔœÞò–÷çnD}5uê–náåîF íù±§îúrް/cUÝϾãïÈç?ÿùϤéÓ'w{~©¹¹-/VkjjíºnnÞ>Ç<íiÏMŸýìûÓµ×þ¦”ûÓøñõ¥,²±ôùUy#ŒèÂ(£ÉhÛjoïÌ]Jc.Œ4~kíD8ŒEб+}ssséW›wmkk뺿ÏÿÈToÿÏÌᇞÃo„·ØéôÊ+¯L .ÌíÏ<òÈÝöܽ÷n %·Þzk3fÌv÷?æ1)…£gw}>vìØ­‚jOÊá4D÷¤ò"Š×»‘\|ñÅ¥@öãtê©§æÉl7nÌ»ü—OPêÁ¡zà‡‡rmú³°Šàè£Î»Å_ìüõž÷¼§k¯¼c{¹3öPf¡È7ƒú“ÓzÛm-Cy'µÁΊuuu½~Md¼½÷Þ;=ðÀù½çùï9ÏyN~^塼L^‹ÛçÍ›·]~“ŸƒöL*ûÚ×¾–n¸á†ôô§?=?ïXLyÅIÈž¾OˆM½â$ç‚ zìÙלU„q$¹ FV>+›?~ž€“×>þñwu«Þ•|ÇÝ'>ñ‰éÚk¯M7Þxcî~ç9äÜ! :\}õÕùØ@¾þùª²²"Õ×É—SO=)/„Œ1­SN9ö¡Iõ±Hrs×ûšš-ùª¼ rÛ|õ…/|8ýìgßN›7o*½†±¹ÛN{ûÐæ«•+·l1iRC÷O˜0)_—»Yöó¬­_®Àv÷xà¡¥ µWZ»vUZ±â¾R~ZŸ~󛟖r×1¥Ÿ9©ôžßÕõØr×¢èF·Ï˜±ÏVç\Ë5¯¬”­†ËØÓ¶vvް¯ó°vvŽ02\]]M¾t×ÞÞ‘6nÜz䨱UéÈ#Ÿ‘®ºêÒÒë¿<w܉¥÷ç7¥Ôœ»EÕ/mä‹ß! !‰üÖÛ‰Ý?Ï?ÿü¼#èç>÷¹Ü9²4ûØÇæ4vÕ¡‡šwáøç?ÿ™>üáçöæqr->Ÿ1cÆn{n²ï¾ûîôío;≎áž{zno»±ÆŽ©€"DÇ®#±£ÈñÇ¿KõŠIk1-žg|ü½ïmi'þÊW¾rØýþ”ÿcR®Œ&‘E>ô¡åÝÖW¬X‘wl¾b°*>ËPgÁÎ7{JÔ)ê;¼õ´+ÜòåËóu œEV,ÿÌòsÛ*O(kooO×]w]þ8:-Å¥'oxÃòõç?ÿùtÆgÈOÈAà …µk׿“•1q;~~œPýÔ§>•O FwÆòN°=½Þî×ÉYEG’Û`då³²XÌççâœÝ¿øÅ›~ðƒ¯æ;o}ëûÓ¾ûî—»dÉ?Ói§={ÈòÕ”)ÓÒºu«K—5¥l?ƸfÍ–Ü3sæœ|=ØÏsÚ´-]€âç÷¤< ?:Kýã7çÿô§+ó¥'ŸúÔ–îQo}ëJïï+ºn_¹rKNŒ  ±§婞ôeÖ®œ#¬ªªL&ŒÉ—îÎ<óŒ¼òškþ/½æ5ÿž®¾zK7Ìg>ó…Cö; £|±!ÊÔ©ãüƒÆˆS©À¶¢5xˆÝ7Ê'Ù†J|ÿ‹.º(½ä%/IëׯϭÅû¹•ƒQSSÓv÷Å á¯ýkŸ¾WyÂ^ù¤Þ¶>ò‘”ÂÒ?Ò›ßüæ®ÝKâÄ`캿+b‡‘øÂUW]•‹FH.š± ÈîþKoµ,+×fG“ ÈÊ;¢Çî`S¦LÉ—'5ÅîêCÃ;ßì)å×;µõ¤|û8àúí(Û|ðÁù:ò×¶"·Æ€e¼Ç±Ã[ì÷îw¿;’n{9è ƒò×¼øÅ/Ο?íiO“Ÿƒ†I ï}ï{ÓªU«Òi§–Or†O|âù:&£ÇIÛ-ãHrŒ¬|Ö]L6‹ç+++óþ·ÝvÛ.ç«N8!_Çd´Ø5Ä18”'sÅÄ®¸„¼à»”Ñv”¹vç9Ï‘šÊïÏ@3’|€|52òUÌ™³eS…+–•òÊ´tøá³ÓQGÍMü¬tà [&Ÿ{îÇÓþû?b·Õ}öì¹ùúÖ[{c¼õÖëóõܹûçë?üáWùúÌ3?Ø5Á¾¯ùª¹yû±¹ •¯oºéÚíîkjÚV®¼?UWפ™3÷M‡vdzÅ+Þ˜N9å Û]æÏ? ÍSŸúìüù¡‡>y«ïU @6ºl´'φ¡8Gøìg—Ïþñ¿Oíí+Óµ×^•Ïž~ú éCf”²ÛÔ4kÖ„4iRm)· Î"m䋎0Y l'vš±KFYì;|†›nºi»ÛÆß²ëBCCà?·r»ð[n¹e»û¢µxˆ¶ë±ÃǶ6oÞÜõ3Cœ8ŒP;¨Æ}ÝE(î–$Ï9眼ëHt¹¼ë®»òçÝÅN®±[ê}÷Ý×§šÅ®#¯xÅ+òkQâgŸrÊ)»ýwcGµ,+‡÷ØŠ*Ãbǰì£ýhêììLO|âÓØ±cóm1Q(Ä Y÷Ì;NÅmåÍ #‡ f¾Ù“Ê»ÀÅŽk÷ßÿV÷ÅNp7ÜpCαKþ@³â޲M œÆ€ío~ó›tóÍ7oußûÞ÷¾ÔÚÚšN>ùä¼;[LN‹ÐØénÛË!‡’¿&²[|ÞÓD6ù 9hÏä È±í„ Òûßÿþ®ÛŸüä'§“N:)566îpÇ׬q¤ÐŸ±¤Ý=Ž$·ÀÈÊgÛŠ‰bgŸ}vžhuâ‰'¦|pÀù*<êQJûï¿ÎßùÎwRmmmzÊSž’esæÌI¿úÕ¯ÒÏþó4nܸôŒg=g§¿üåšÒïÅß·ºï Ï/ý¼¶R¶{n—‹Iý§öötúégmwY¸ðÀ‡~G^?ßv@¹ÛP¼†ÇØÓ`d©î†j®yoçÇŒÙÒ|öì 逦¦CQª×¾¥×;§ô\÷.åŽ)iÆŒúÜa²¿e´‘ÏBHFªj%¶õ¼ç=/ïºñ¦7½)ï²aïÿ÷sø‹“fZ*‚Õã÷¸tôÑGçð-½¯»îºÜþÉ­Òã{—OFˆŸÇ{l×÷Þ™SO=5‡ýï!ëî¶£Z–Eñ>@QE¦ùæ7¿™;[uÔQy -²Hìæ¢¾¾>}ùË_îz|ä€óÎ;/ç—Ã;,½ð…/Ì»ÅîcÿûßÓ%—\2h9l°òÍž5ŠÉ`W^ye:üðÃóm1XYè‡?üa®qd­¨ã@³â޲ML&{Ë[Þ’ß·x‘ÅöÞ{ïœí~ýë_çc‡¶Á ? í™ôÆ7¾1Ÿ ŒÎ6qlï.NÜþô§?ÍÕãcœüì¯ÁGÈXÒîG’Û`d峞¼ç=ïI×\sMž$öš×¼¦+£õ7_•Å9À86Ǧ1Ù,&¤•=ûÙÏÎÏgݺuyÁ^y³Ô²þf´m å9OùH>@¾)ùj°Æ´ú’Yb±dtÜ´©-56¶¤¦¦–<¹:.7¶¤Æ¯£Ž:.zè馛þ˜þã?^žò”g–2ÏÌtûí·¦ßÿþ—iܸñéÍo>'UTT>”•žžî¼ó_é‚ Î)=çÛK5©+e¡Ÿå ô1Y¾§xÐA‡¥«®º4}éK)½îŹ‹ÐK^rZZ°à‘yþ‰'¾º”¿¾’Î<óåé¸ãNL {åI÷7ÞxMþøµ¯}Ç.ÿî-_¾e“‰8È"²Ñ0{Ì,5ÔsÍûs~pìØªÒe\š2e\×må,˜œæÏŸüPŽkI••2ZQ3ZdôŽŽÎ¼ñ Œ$:BBÁD¸Œ¢ÍøŽz{Lì(qúé§§»ï¾;½óï̃W!BåÔ©S·ûº}¿ØE!.åûbÇÑ2NÆEÈúÌg>“CZL€‹@¼£ïÛßç"4Fè`|Áä¸xNe?úÑò¦MMMù¤_´:ë[ßšÃl„ìòN!eåÝAÊmÓÃ÷¾÷½Òcç’—½ìe]·ÇâÎð±«Dœˆ,‹"°÷Õ£ýè®…¢OzÒ“úüµ½½?ýyßúZËýë_iÉ’%9¸Ï;×$…5{öìôãÿ8ïhö‡?ü!ïÀ;¼Çë1Ñéúë¯ßjwô8^Æ`ä„ :ÿüóó€ÜêÕ«ó Z|M³ÎŽŽåƒ‘ošzS~\Oïíçüò—¿Lïz×»ò®pQã¨É¥—^š7ÕˆšüÛ¿ýÛ.åØe›xoÞûÞ÷æÈxã¹üéOJ/yÉKòÉÝø=h“ŸƒölºüòËóó‘qÿ¶æÏŸŸÇ®â„Û¹çž; 1¸ÁGÈXÒ@Ç‘ä6¹ €â糞Žy± /vÐc\,ÔûÉO~2àq¦»û—ù=÷¹ÏÝê¾rþ ±Ë~wÍh½m ä¼¢|Ô÷Œ$ _ ÿ|5˜cZýÉ,µµÕi¯½Æ—^ÿätàÓÒž0+/|üãg¥G=jZš8qË‚„ñãk·úþåŽÕåšv÷?ÿóÍtÊ)ÿ™6on.Õó¢ôå/,]{í•éCžTúøÒtðÁ‡w=ößÿý­é9Ï99=ðÀ}ùq1Ù>|éK—–~vCßÿ9Ïyi:òÈg”ò[Sé=ûzž”ß}ãŠ×½î¿Ò«^õ¦Ü];º}å+ÿ“þþ÷›Jùîøtá…——^ïŒþnUW×ôúúî½÷δlÙÒ´Ï>óÒÞ{Ïö‡€l4ŒÆžú›§zËRC=×|WÎvϦM›Ø-ÇÍ.]öÍ·O™RŸæÍ›\º¿®”ã¶ÔAFÙbíéÆºB2òÄÒÝÎøj´Š!N¨ÅÁ¸§rY´îŽ0ÊH½=&vñÜwß}óaÓ¦Mù$Ú¶ß{G߯¹¹y»IòÎb·‹ø~óæÍK“&Mê×óìÏs+[³fMžp?¯û‚Ëî¢ù=÷Ü“rà®ìa‹Ø½5nyä‘9o ¹]{<—žZƒÇIÈX5øãÿ˜¿6‚ìW\ѯ÷6þ…^˜ÿ#ñŸÿùŸ}þºÕ±?ïÛÎjùÁ~0ŸTÁÎÁÚI†»Øm=vðŠc} ˆí(ƒ…´ŠIBñ¸pê);ô5ëô%Ó 4ßìj^ØVä¿x|o9lg¯eÙ²ey·¶8`«I]»šcû’ã=+ïnƒ¡=½g;zÝ‘ÏË»¸mK~@Ús9(žûw”-b¼'~NOy§/cp»:Ž:–4Ðq$¹Mn øù¬·ãyã¾ÞÎÛõeœ©,Žé1©§ckdøY=Ï’Ñv4¶5Ì!õ-#ÉGÈW##_mk cZC•¯ÚÚ:¶ê¹bÅÚÒm‘YzÏM«W¯(}̓iΜù¥šõþ¸ææ¹ƒÏÞ{ÏJãÇo©MKËæü5½e· Ö¥U«V¤3öÉ]Œzzîå®@³fíÛ¯|ÕÑÑžëݶõ­o}6]xá'ÒË_~Æ t.ÚÝŽ>zž0d£‘võü`_òToYjwÌ5èùÁm3Z_ÞÓè$‹èÊ#ïºëÞR–[¦OŸ#£ ±QÉÞ{×ùŽ#Ö?V+϶m¼{Ò—¯xÌAmÝÆ¹{ûð¾~¿qãÆmw[ÈG<â~žýyneS¦LÉ—™6mZ¾ìÈSŸúÔRHš•Cæ}÷Ý—w;‰ÀÔ[0݃vœ¬««Ûi+øž‚õÅ_œƒóÉ'ŸÜ¯¯ÝQûó¾í¬–ßÿþ÷óu÷J è"<æ1éóãcÀæÀ”Ö—L7Ð|³«y¡§ü·£ïµ³×2sæÌ|éköéOVÜYNŒ÷¬¿»¤uݽM¦—ŸƒölÚÑXNYy"WOú2·«ãHa cI»2Ž$·Ém?Ÿõv<ãáŽrJ_Æ™ºÓ{;®ïhLi mGc[ÉòQß2’|€|52òÕ¶:¦5Tùªºº2Mž\›/ဦæëM›Úò¤úÆÆÍM®oM7¶ä®5S§NÏ—‰Iòóç°ÕmcÆŒÝá×L˜09_vôÜgÏX7ì˜ÜßÓûðÛß^š¯=öxÈFC˜võü`_òToYj¨çšïÊùÁm3Z_ÞÓÊÊŠÒícò%¥º´`AC¾½½½ó¡M.ZºIÆuKK»Œ6 ÅûïŒ$•JÐÿ -ÑcgŽK.¹¤__û—¿ü%]vÙeécûXÞ ¥?¾ûÝïæÖïÇ|š:uê°«Ë?ÿùÏô׿þ5ï>rðÁûE€Q’oŸ€¡ËYKîãHr0œ2ò0üòUmmuš:u\š;wr:ðÀié O˜•Ž:jnzüãgå®5ûî;)M™2.[5âë½téâ´dÉíé°ÃŽL <Ò/ Œò,5ÒÏVUU¤‰Ǧ™3'¤… §¤Ç>vF:âˆ9éÈ#ç¤C™‘ößJš={bÞ£¦føf¹Ñ’Ñ¢;;Œ4B Àë_ÿúþóŸ÷ëë¢]yìŽÚßvã!Z•‡W½êUò&Q‹hCÿ_ÿõ_~A`åä'`èrÖ@Ç’†û8’Ü §Ì…|ŒŒ|Uî:´÷Þ[:|ðÞéÉOž“žò”}»&ÕÏš5!MšT›;MŽüãoREEezÙËÎð‹²TaÏÆ¢ÇXü‹ #·E~‹Å‘±H2KÆ¢ÉX<‹(c1¥Œ¶{lé #Kü ÑyÎ9礳Ï>[5ú¡½}K‹îعcwøÿìÝ •õ}èñ?ËòŽˆ°!„J¹Ôn,±hˆA³%«Eƒ h1ÁDÌHÕitbF­XCÕj®Øˆ/˜Ðh| QI¤™­¢¢2ºQB(!jÐRK,ånp¯¿?9‡³¼(*â³ìç3óÌ9û¯ý¬ IDATœç¼ð?‡™G9ßý=ûì³ùò¨£Ž*äzÄo.Ùºuk>œßàü xïγŠþÿ‘œ·Ã9—ó#çGàüªØš›[Òï~·555mΗ±mÚ´åõs™â[mÛÖ’ª«;µÙÏJ}ý`aà=>—:Øþ}pǹܖߟÇmùý¹ÜÖ´}û9™;ÎÑöUüb‘¶ô‹Dhߢ¬¶ oÏþŸ|E?1íСƒìç78 pž%€tÞιp~´÷ó«®]«óÖ·o·ò¾øâ||¾ôeú¦¦-ùúæÍÛÞÓs«öð{p.õî:Øþ}pç¹ÜÎ}'¾újK«0òÝúeíé-ΉcZ'´BHjUURÏžóV©¥eûnÓ#ã ö±€bˆ8±{÷Ny{ßûvî_vQH–.cªäkE^@±^BHÚ!$íRuuU:ôÐ.y«_ž/}™¾4=2&ù>=@qÄ/»èÑ£SÞ*mÛ¶½<5²2’|/§Q¬ ´©ÿ~³°S×®ÕyëÛ·[y_iâPåôȸî õÅÒ±cU:ä.y«´cøÎ0rÓ¦Ó$·liŸçsBHÚ!$¼‰½MÚñ…úÊé‘;®Ç~ŠcÇ4ð®y«´ukë@²½œÏÅŸÚÔßaKoÏŽ/ÔwÉ[¥ææ–Š/ÒiÓÖôÚkÖ  H:uªJ½{wÍ[¥˜ü½ã|®õÉmÛŽ@2BÏ8gIèÐ&þÛËÀþ_(­oßû"‚Œ2¢ÈR _¨/ P,]ºt|}ë–úôéÖjÿ«¯nm5=2ÎïbÛ¾½íýÆ‹xýBHÚ ŸT8:tH©GNy«ÓxJ_¦ojÚ9u(öP,ݺuÊ[MM÷ò¾ø…»’qùê«-¯ßVÜ@2~AG߾ݼ©´ BHxUWW¥Cí’·J›7ok5=2®Ç´¡×^³fE¿ð¢{÷Ny{ßûv’1%2Îß*ãȸ,ÊDðx-Ðfþ»É@ñtéÒñõ­{êÛw羈 K_¦ß1=rÇõ¢|™€ªª:¤ž=;ç­Ò¶m¯ý>ŠÜy>·iSKÚ¼ùÀžÓ !iK„ÐFÄ´¡=:å­_¿åý--Û+¾H¿µ(„Ú!$¼ëÖ5¥Y³–•ãÇOünúð‡g¦––íoz߈&O=õûiÆŒ¥ïÙk½{wõFm†Þ‚ V¦)S¦… WåŸkkkò„Å}™ Ù³gç´hÑêô/ÿòë÷䵯]»1õïßÓ›´)Õ–ö]]Ý |ùâ‹óå§>54ÍÛ˜V®\Ÿ†íó†÷©‘öÊAb„”q÷=zHù˜˜ÚxÕU 9˜Œ¸²ºº*?çùç“Æ?bŸ^c{xºñÆ1yßÔ©‹ÒÉ'ÏË×#â¬4kÖ²|ûOLNK—®MO>¹6}ô£r  PBHÚ•Ï=÷Gé{ß;#ÕÕ Úçûµ´lÏ“Ã'>1(-\¸ª|Ûüù[{2|xÿ¼E0Ó#w üyÏåý•d¨­­É“Öäé55Ý÷ú<6ÆíAF¤æÌ—_ûìÙϤ÷¿¿g«ãÿíßÖçËsÎYV®\Ÿzöìœ,XY!ãÏûãy#¢Ü5¤x· !hW">\³fCºå–eûBN™²0ÍÛ˜^}õòüóäÉGç­$BÅ}õÁöÊ—k×nÌ“$K6lhÎ±áØ±‡ïñ~ŸüäàBFD)÷¤,Æ4ÈRYò™Ï‘CÈ]#Êx¼äüÇÅùö%K^(ß~â‰ßÍÏ[¯ùk_«ËÏQ CÞMBHÚ•ˆ#\¼çžyjbe¸n]SžÈ¸«ˆßlâã¾*=ß®!dL^ D¾]ëòåÿñîafüÙBås–þlÆõK>úÅòŸ±2´ü†§«¯>! Ø+¤ßüæãy¢æ¯~õJºæšÑåãâuGL)Žö7ßF Ý‰¸¯¹¹¥Õ¤Ã+®øiúÀnH³f-Ûíøˆw°øv•B˘ÜX)ÉKÁâ®^|qc¾<¸÷^;þLaO¯µô¸•÷3ƈC÷zÆäǸ=îäÝwÿe~­wÞ¹¼ÕqS§.JúÐÿÙíÏðN !hwÆŽ=ŽÄnëJ/Këö±ýS¾_L’x;„ô-Z&L¸+µ´lÏSoºiiŽùBL…¼úêòõ³Ïž~ö³u9œ:uQúÖ·NÉ÷‰mM„1b@6¬_Z²ä…ôgöÓÊ•ësôÏ]W7(ÇŽñÜgõÃò}"h¼ùæ±iÒ¤#ßð±ã¸K/­KßøÆ#éØcg§Ñ£‡¤eË^JkÖlȱg—¥¸3DãywuíµKÒUW5äõš3g\¾ï}÷­Ìadøä'·:>Ö(»òñ§LY˜Éx o—€ƒZz1á1½çŸÿëò”ĘÓczâ•WÖ—¾ˆ#Rœ5kYú«¿V>~…ᡇ&¥+®øijjÚ’CÃ1c†–o›8qXž¶¸|ùËiÆæ<)2ÂÉÊÀðDÔS#ZŒÇˆð2âʈ?#º¬·G 1ä®x`UÞ?þÄü¸qµ9ÐŒ5‹)•JS#Kbúæ‚+óë8°W«c#Ž<õÔï§Oº6M›VïC ¼!!$µ˜FAáÅWŽC‡ FÙа&OO îÅôŘÖåý÷6ïïÑ£ó~{MƔމ×0|xÿ·ýøRÆV)bÈ]E`Y/îªgÏÎ9$-‰p3¦kÆ:V®e¨œ6Qä׿¾8O°ŒµÝ5⌵±q]ºì²Q> À›ª²´uÛ{îÒ7¾ñHž6X©òíiêáç>wd¾¼ýöÆVûcŠbL†ŒÇŠ ˆá‚Á¶êŽ;Ƨo³ÇÛ¾ô¥£Óúõ›Òé§ß™n¸áñtíµKÒQGÍÊûêêív|D’xÆzO˜pWŽã±#Ьœ9cÆÒ´xñ¯ÓEÜ-ÖÜ“/ãù#´Ú'!hÓN>y^ŽåJ¦O$=ñÄäòDŘzQÞÊ•ëw»ïر‡çɆ ¬ÌS#{÷îZ¾í‚ ŽI=öBžÆòœ}öð0ÞtÓÒ¼¦±žãÆÕæ(ñŸØ=„Œ@2Ö9‚É%K^È“4kjºç0²42öǤÈúúÁéê«OxÃçÇ‹÷ã LkÖlÈÑä®S(€öA @›³|ùË©±q]ìbäÍ7McÆ Móæ=—®ºª!M™²0=úèóíÓ Ú'-\¸*O'Ü5hŒŸ#´»çžiòä£[Ý»lÙKiõêWZE’íÅèÑCòVë" ­TšºñbD“1í1މ)‘!‚ÊMcºfLæŒI”={vÞëóF0yüñsósÇÚÇ㉠ ýª²´51¥0bǸ<ï¼y‹PîòËGåÉK—®m5%2ö57·¤o~óñVÇÅ‘ãí·7îö<±?Ë1_{Ñèõן´ÛZD"PÛJÓKû;v¬JS§.ÊÁéœ9ãÞtºæˆÒ•WÖ—ßÃ*#¤Ú'!$mÎW¾22‡1~éK­§8F@b2dåñ55ÝÓŒKÓ 7<ž£ºˆöÎ9gAš4éÈ|{L*Ü“ÒTø?{aãÝwÿežù½ïQžöïQøñW§¹ss¨:n\í›>^¼µµ5ùz]Ý <­óÿpFžê ´?Õ–€"‹©‚»FˆÃ†õËA]r¥P±$ºˆçÍ{.54¬Iõõƒóýo½õ´tÖY?L—\ò“IF¤S$o¼qÌFŽk×nÌ·ÇtHö.¦nÆViݺ¦|Áj¼¥Hu_LŸþH^ó;îŸÃȘæYŠ#€ö¥ãëÛ´úúúAKËöôï<þâ/þ9G‹·Ü²,‡tR>&®Ï™ólúío7¥‰‡µºÿ‘G¾?Ý|óÓé¿ÿûÕòmÑqƇS§NUiÈÃÒ9ç•fÎ<%õéÓí _Ë£þ{úÜçŽL#F ðƼEvìØ!mÙ²-û“FÄïßEÌïY÷îÒèÑCÞô½> &Bp`-X°2ý뿮ɓ÷dÆætæ™÷¤Å‹' Æ4ÀãN>y^zþù¿Nýû÷ÌÇÕÕ ÊqäÂ…«òÔÆ{•cèÐ>i̘¡ù¹–/9O Cîíy÷&&IòöôìÙ9]}õ yÛWÁ^qÅOsDùµ¯Õív{LòŒÛã}‰“ñÞ~õ«Çå ¡ÀÁ©Êp ÅtÇ3–¦Y³–íñö³ÎúaZºtmzúé/ç)‚sæŒK7ß<6­_¿)Mº¨Õ±ÊE8S#wuå•õ9¢\²ä‹Þ†ÌžýLZ³fC~ÿv ä„ w¥nx<ßvÁÇäËØwÉ%?±xpBp@EØS#j\¹r}«Ûb²cLú‹røðþåý1õ/¢Æ;ï\žo/‰©#F Hsç6æx®RìÿÍoþ&wÞ‹ÞFDèQkL÷<ûìá»ÝŸ™xÿ/¾ø¸ôè£_Ì“&ï¿ÿ³ù2âÈÊÏpðBp@Eyë­§åèíÌ3ïÉ—%À=ñÄä8†¸íœs¤cêê¥êêªôõ¯/nõx_ýêqy*äm·=³ÛsÕÔt·àmH„±ëÖ5å÷ºgÏέn[¼ø×yZdX¶ì¥Ôظ®|ÛEÌ—÷Þ»Â"ÀAH À7vìáéòËGå˜-¦üU9r`¾Ü°¡9üÜÀ=ôФôðßO'KË—¿œ®½vIùøñãHõõƒSß¾¢Ç¶®¶¶& Ú'OvŒ÷¸rbèôéäà5¦…öîÝ5DZ^ø`þœÄþÒg¦$‚ʆ†5_ߦÕ××§Øàݶ~ý¦ôàƒ¿Ì1ÛOü&Çj2Ü»ÕqÂýàËó„ÈR¹}ûké{ì…töÙÃS¯^]RUU‡|½t mWLü<å”?J¿úÕ+yÂgLéaÊ”…iذ~iæÌSr{ÜqJ×]·$Ýxãég?[—V¬ø¯ô•¯ŒL#F ÈÇÇÔÈOün:úè¤Ãïkq jhhHÕ–€eÉ’Ò„ wåi}•Î<óžôì³çåð­$"Èø9&–ÜwßÊ<Æ$Éx¬â8¸Äû=þÄ<å1ÂÈ’ž=;·ú|Œ=$ýâ¤nx<‘55Ý[}â3á-ж !Ø/šš¶¤E‹V§ñãØãíÍÍ-9xŒ -¢ÇáÃûçû̘±4]qÅOÓ9ç,H=4©||mmMZ°`eš9ó©4iÒ‘iÞ¼çÒ=÷¬H?üù<0&Jr𪌠C°¿®]»1 Ø+ï‹ÏÒ´iõ9€Œà±ò3Ÿ·==Ðöø×ö‹ /|0O{Œ`qO.\•#¶¯~õ¸A†Ù.¿|T3fhŽ(çÎm,Í5£S×®ÕiêÔEé}ïû‡tÉ%?I—]6*ÕÕ A¶C·ÞzZŽ?~nþ¬Ä´Ç’ˆfãsQ©4u´4E2>{ñ:÷Üí6‘(¶Ž¯oÓêëëSlðV­Y³!‡‰1¥ñŽ;žO÷Þ»"‡Ò긧žz1ÝwßÊ<1²B–Äñwßýóôƒ,OcǞ㵚šîiܸÚÔ¯_tì±Ó-·œšÎ8ãüŠh6ÞÿŸýl]ž Ÿµ_þò·iРCógdW?þñêôÈ#ÿžŽ<òýiΜgóÄј(Ù½{§tÌ1,O• wÞ¹ðž9v¬T:îV¥'Ÿ\›&O>:UUuÈ[<îèÑCrIûÖ§O·4qâ°üyøÝﶤU«~›&M:2GŒ%æÎ˜±4Ç7nN?ü«ôÒKÿ/oºéäô·ûÉAÆäÒ-[¶åXò¬³~˜žþ?ó1@±DYmx»"(‹)zS¦,L#F È“çÏŸ˜N>y^š>ý‘tãcÊÇÖÕ ÊS#gÏ~&}á ÃÓÈ‘[=Ö‹/nLƒ÷NëÒ7¾ñHš6­Þ³GñYŠ­¤¥e{Žro»í™´hÑêüóС}òm—^Z—®¼²>uíÚúŸD¾ýí§òg-Äm×_’…€‚ª²¼]>Þÿgóõ˜ª×ÜÜ’'8>úèÓe—juluuU9Œœ0á®<¹¯$µØÎ?ÿ˜të­§¥³ÏnqÙgâž~úiéÒµé‚ ŽI¿üåÿ.‡ÿøÀÝ"Èp÷Ý™?“ñ™ “µµ5 ªãëÛ´úúú¼U1òè£?¾õ­'ÓÿüOs3fh0àÔ½{§ÝŽ2ä°ôê«[Óƒþ2Ý{ïŠÔ«W—´qãætþù¤§žz1Íž}Z)cÊ$ì«SNù£4jÔ¤™3OÉ×ûôé–£Èûî[™Î;oDž4º«Ï}îÞ´|ùËùóÓ${ì…4n\í£Ià½ÓÐÐük>ïXÄdsæŒKë×ozÓc¯¹ftjjÚ’fÎ|*{îò¾ˆÏn¾ylŽ*á­ŠÉŽ1Õ±ÒÚµó垢ژ ñcLüÖ·NIwÞ¹<ï‹ûˆp x„ì'Ûçc#>;ÿücÒ‚+óÏcÇž† ëgÙo^|qGYSÓ½ÕþÙ³ŸI³f-K#F H×_Rù³;~ü9¨ŠG À{¢¶¶&]zi…à]ñ©O Msç6¦þý{–÷54¬IS§.ÊûæÏŸ˜'‘–ˆ  ¸ü«>pÐ7®6ýæ7S7lhN&Ü•¯?ôФ4p`/‹m„8(ÕÔt/__¸pUZ¿~Sºì²QiøðþÚ!$pÐ+M€üÝï¶X hc„ÀA¯¾~pºôÒºtíµKÒG>ò}¾ßÔ©‹ÒYgý056®³ˆð©¶TZ³fCºêª†´hÑê4thŸtë­§¥ÚÚ C›wÍ5£Ó§?]›ššö}*ä?Ø+ÿ]8ê¨Yéâ‹ËQ]í÷LÀÔñõmZ}}}Š €ömݺ¦tì±³Óãÿ&uîÜ1­\¹>ýàËÓ—¿üÑÔµëŽß­ÙÆ›S÷î,mÎÀ½Ò!‡íóñÇ÷¡tÞy#òçýê«ÿ5½öÚké„þÐBÀÒÐÐ „`§Oünzå•WÓ“Ož›þáNÌ“ïx`Už9|xÿ|ÌŸÿùíé¶ÛžIãÇ!†ä ³`ÁÊôío?•žyæ?R¿~=RMM÷TUÕ!ÕÕ JÿùŸMé;ßy:MštdêÝ»«Å€ BÈ*Ë@X¸pUZºtmºþú“rø&O>:_ƤȒúúÁ©±q]ºä’ŸX4*óæ=—&L¸+ÍœùTš>ý‘ô‘|'_/ùèG¤ææ–Ôаf¯ST€ý«Ú–-{)_Ƽ’®]wüoäÁƒ{—÷]}õ 9öš;·1}íkuåhÚ²/¼ðÁ<éñÑG¿˜?ó³g?“ƒß˜Œ:qâ°tÓMKó±#GÜíþMM[Ò”) óDɸi‚*ðÎ !ÈFŒ/K“G’'?†]cÇË.•î¹gEúö·ŸJŸùÌéÇ?^-Z°ž=;[LÚœˆ~#fœ4éÈòçý‚ ŽIöÊS"¿ùÍÇÓêÕ¯¤+¯¬ßíïCLL=ýô;óDÕø{TlìØÃÓäÉGç)x'Ÿ)9yòrõžüü|ôñÇ# ApŽQP&òr8¼õî»›5~|o{ß0ÁÈqã)$¤±bcÃè(¸%SµS§ Þ¥t ÿš*óæ%*,¬™­i¾àÜ" (“ BFG·W\\²n¸á?º÷ÞefæêÉ'”˜¸ÇVÂ+ GîÆT†<6™‘qDƒÍÕÚµi80Do¼1Ìîs¬ÔÔ,»Î×—Ó/œMœ‰aªÙ™pceÌœy®¼òu-Xd#0ÐOï¼s£ Iž`ãÆt‚LO϶•"gÌ\6I6U"MÒ„ x &MêK €³„3ðà–-ۦ뮛g\£G‡WøyAAþZ³æ.Íšµ^?ý”©¶möùN§/ aBĦªùþŒß»t½ ?Ž·Èn vjäÈn6ùÄ ö;·té­•€“„`‚\¦‚Ýí·/ÐÊ•;l «¢ÕêLë¾û"èDx¬ÈÈ6ÚµëãÀ¦Bê¨QïÙï•©œjÂ%U"çÏ߬›ož¯—_^k«C€3S«x™%³Ü AvëÖܵÎÓk¯mÐÒ¥[ݞʎ@6U¯¸â_*,,Ò;ïÜ¨ØØ0y{{•nïÒ¥©¶o? Ù³7èþû/9n¨œøøxyÓ à¾fÍZ¯Ö­ÿ®E‹¶(0ÐO/½£¯¾£Ü\—6nL§ƒ€*X»6M™™¹;¶§bbBËܧ{÷@˜ä{À™# nÌå*´ËÕWÿ[“'/·÷{öl© ÆjèÐNtP©©Yöö⋃N¹Ï»ïn¶·õè0ÎAHpc¦b =FG·×_ÿú¹®»nž­R êÌ÷ÉáðÖŠ)enŸ:5^ ;¤à`'À" n.$¤±–.½Uo½5\7¦+9y?œn3¦‡^~y­-ÚRºÞ„ŒGzOÓ¦Å+(È_o¼1ŒÎà,pÐàbcÃìàÌ=óÌ@%&îÑÕWÿ[áár:}µvmš²³óÔ³gK½óÎ'Uƒ4ÛÍ6P9!*É××a+­þío_êí·•’’iCŽ×_ßÅV‹4ÛK˜*¬&,V\\²>ûl´¢¢‚é@* $@˜°ãŸþÔ×.eq¹ mPò±Ç>·•" ¡"$UàM€ûËÈ8¢ÄÄ=tð1Õ{õzE<²LõÙFññ)j×îYÍšµž ¨ n,==[£F½§e˶ÙÇAAþúÃzêúØjvÎþwnâÄ8ÍŸ¿ÙV„4ßµI“úÊéôµU!MuÈ;ï\¨à`§¢£ÛÓaT³\ÀM™Öu×ͳ• ï»/Â>^´h‹&O^.??»ÀÙ3o^¢Æ[¤ÌÌ\……5ÓÌ™×(""¨t»ùÞMŸ­9s6Úï!AH*† $¸©Y³ÖkíÚ4}ñÅ¥a¬&MêéŸÿ\«1czÐAÀY–›ë²‹ ;šJ‡÷Iû˜ªfS9T AHpS}´EÇw) A¦¦fé¹çVéÑGûÛÊt†©`·bEŠ^z)†ÎÐèÑá:´“œNß2·›ä„ ‹mÅÈ‘#»ÑaTAHpS&øä_úxâÄ8ûxìØž¥ëV®Ü¡øø: 8KN‚\´h‹ A¦¤d*<ø‰œN_ª2᫉ãôôÓ_ꡇ"5rd7» Ào÷¼ùæù¶ò£ #›J¬<ÐG>Ú_=¡À@¿ãövjÞ¼D[9ÒÜÿU«x™%³j¦ßÿþùû×Ѻu°a+S Ò„©¾úêg½õÖ·zõÕõÊË+P-äãS‹α[ny׆{ì ®íÛØJ¦ äš5;õá‡?Ø€²ù>&%e衇–jÿþ>œ§k®éHð«øøx‚àžzj¥Fꮫ®ê`ûú:Ô³gKˆlÙ² ^½ÿþ÷6|u÷ݽè0à›1ãkEF¶Ñã_¡.]šÚeúô/Ô·o[ ÞEÏ=·Ê.æ{9mZ¼23smhò™g¾ÒwüΛÀ/AHÝ5_DD¶nÝÒz‡ÃÛV 3¦‡fÍZo’Î=‚LHØa«Bšï¡yl¾§O>™ ¥KoÕ_ܡɓ—kíÚ4 ÚI=©ðð@uê À@?:€c0ãj°ÔÔ,]rÉ,9¾¶¢\IèêD%H¿{îémÃÇqqÉŠ‰ µëî½7B7ß<_Glàñwn<éy& ŽçM@ÍURi.))Æ"{õzEóço¶HçOXX3j|ä‘e6øh˜°²‘’’IP µŠ—©QQQ2  fñóóÑðá]4bÄ…ÊÉÉ×òåÛõÖ[ßêð!ÉÐÐ&òñ©EGçÁUWuÐK/­Ñ“O&裶hæÌuò÷¯£?ÿ¹ŸêÕ«MPñññ! ¦˜;w“Þyç;õîÝê¤pcãÆuuÍ55fLÕ®í­e˶Ù}gÏÞ`·÷èÑÂ#üv|}6¬³¶o? Í›÷Úï᫯^kÊ bLÒA7@Í0mZ¼’“÷ëŠ+Ú+**XÙÙy¶"ä±ý4}z´z(R/¼°Z/¾¸Z<²ÌV vÒ‰Ào,(È_o½5œŽà Pjˆ–-¨aÃ:ºûî^Ú´i·:vœ¡~ȰÕ!Û·o$oo¯Ò}M%º¾}ÛjüøÞêß¿ÂÃé@Ô8T„€ÀTÌÈ8b«:𥄩ð8wî&»tê ›n ³ÁÇ€€z¥û˜@¤©  f1_ãâ’j¿Çx2*B@5ærê²Ë^ÓGm±• KúÙУ FæääkÕªT-]ºU3f|­¯¿NµÕ#MP@Í´~ý.]{í[úæ›tû=?¶â+žÄT„$ ÕØâÅ?ÚpãÛoßPl4"·lÙ§  5kV_C‡vÒý÷_bïÜ™¥øøýë_õê«ëuÕUì>ª§ÔÔ,Ý{ïǺýöZ±"ÅVpõ÷¯c¿ÏAš2å3|îÙ³%ðH&éM7@õ•™™ko×®M+]÷ä“ 64u,__‡ÆŽí©5kîÒ† c5fL9Þ6L zÊÎÎS¯^¯hΜr:}me×ßýîe¥§gÛíÑÑí¦ÇûÜV‡ÀS9訾LµGf|ðÁOôÑG[tË-Ý4þfßû”Ï ÔÌ™×Ðy@5—lCÓ§Gë¾û"lðù²Ë^³ÁÇ3Û}Ìw~îÜM6$Ù†Nx$*B@5æçç£/¾¸C11¡JHØ¡;ï\hÃR¦B¤ Q™Šrj¦€€zövøð.¶ªk` Ÿ^z)F³f­/­ YrKEH€'£"$TsÁÁN}øáedQ¯^¯Ø`” AšÅéôÕØ±=5dH(Õâ€Æ|gMÒT|œ:5Ê®‹Žno‘Ï=·Juê8luHó=7•^ðT! †0ãLò©§®RTT°Þ~;Q/¿¼VO<‘`˜4•ån»-\aaÍè0 šs8¼õÌ3uûí ìãñã{kÙ²mö¾ùN&9{öP†ÀSy/ES§NÕ”)Sè ¨æ’“÷Û`”ŸŸO麄„zóÍMZ° É%½{ÿÏVšPý™ŠwÞ¹P¹¹.ûØ$cbBuÏ=½mÕH‚ÀS™ü#gΠš3g£ C Úé¸ÐcHHã“ö5A)³˜Êr¦š\bâB@ òî»›mÒ|¿ï½7B±±a|‡8AHxŒ‚‚íÚµKÙÙÙjÑ¢…6lX#Ú€gzòÉ%%eØ0dtt{]} ÞEN§ï)ŸcªÆ™*rfPsLŸ­‡ŠTÏž-íw $Ü^QQ‘/^¬ 6ØÀb‰FiĈ ¨–mÀ³}øá%$ìкuiZ»6M&,VFÆ=üp$¸™Nø[2å! ·÷þûïëÛo¿µ•»uëf«5&%%iÓ¦Mš={¶ÆŒcƒ‹Õ­mx.—«P!!í2zt¸]—›ë² àiB­¥¦¦Ú ¢ #ÞvÛmªS§Ž]ß¹sgyyyé›o¾Ñºuë]­Ú€g5ê=[ýñúë»hèÐN ô“¯¯Ã.€§ñ¦ àÎLѸôÒKKƒŠ%"""ìmbbbµkžÍT~LHØ¡qã©E‹¿é²Ë^S\\2pJOO×ÁƒOÚVTT¤Ý»wëСC•n7''Ƕ›ŸŸ_æö¼¼¼r·0¾rÏñ•y}æu9rä”ÛŽ=zÒ6—Ëe·™~Œ¯_y6Æ”PqµŠ—©QQQ2 àn>úè#ûþ€T·nÝã¶ùùùéË/¿´†=zœf<ŸmÀ³ÅƆéúØj=z´PNN¾‚ƒêÒ¥)PŽèŸÿü§V¯^­°°°ãÎã}úé§zÿý÷U»vmµk×®Ríšsï¾û®TzÒö… jñâÅjÚ´©š7oÎ_yÈøjÛ¶mš3gŽÒÒÒ~ܶ+Vè½÷ÞSVV–:wî|ܶ„„ÍŸ?ßög›6mø ÀøŠñ•cL OEH¸·ììl{Û¨Q£2·—¬/ëê ç³mÀ×סž=[jìØžš={¨†ïB§*55K $i„ÅêÕë»ÊÖ¤IõéÓÇ^YßLì*±gÏ}õÕWr:ºì²Ë*Ý®™”f|÷Ýw¶íc™+¯'%%Éáp”9É €ñ•ûޝ‚ƒƒíëܱcÇIUzÌ„v#99ÙVw:Ö–-[ì-ãG_1¾cJ¨8]we®fQXX(yyy•¹OI¥ÆÊ–ƒ>—m<—ËU¨U«R•””¡O?ݦÓíý~9²åèׯŸðµuëV{ÛµkW-Z´Èžß‹‰‰±“Š*ËLP RjjªtÔ±cÇÒm?þø£òòòÔ¥K—Òs„Œ¯>ų³ÿûwe§ÓW11¡ TFÆÍšµ^ úÒqPsžnÈ!š;w®–,Y¢ƒêçŸÖ…^¨:T¹Ýððp;‘ìÛo¿=n"Ybb¢½íÞ½;_yàøªS§NvÒº ”LZß¾}»­ØcoÞ¼Ù†J&­›Éíf›y`|Åø Œ) ÿ†ÒpW¾¾¿L9±<ô±JÊ~×­[·Ú´ ð“'/× /¬¶÷}}ŠˆRzz¶ =~öÙh……5;nÿë¯ï¢AƒæÊÏÏG÷ÝAÀ)˜ cf☙ôµtéR{~oÀ€gÔ¦¹2\\œ~øá{žÐ\=???ßNN2çCBBèxÀøÊÇW¡¡¡òòò²¯»Ä¶mÛìºèèh%%%ÙmQQQv[É~LZ€ñã+0¦€Êñ¦ à®Ì`ÐüÃoJ„›\Yrrrìmýúõ«MÛÏqÛmázøáH½óÎúùçûõñÇ#m•ȇŠ<)iDG·×Сôâ‹«é<8 3‘¬„¹Rú™ž·3çÍ•ô].—xd˜Ieæ|aXX˜¼½9ý_yâøÊôC«V­tøða¥¥¥ÙufÒz`` 5j¤Ö­[Ûõf»©Úc*ùÔ«WÏ®Œ¯_1%Tÿ’Ám™ bI5Æœ´Ý„Íz³_ƒ ªMÛÏѳgKMŸ­áû(  žÖ®M³!ÍãS ôSrò~:ÊQPP O>ùÄNîòó󳃎½šzU…‡‡ÛÛM›6ÙÛÄÄD{Û­[7:0¾òàñUI%Ó'™™™Ú¿¿Ú·oo×]pÁöÖôÙÎ;íöK*þÆWŒ¯À˜*Ž $ÜZÉ?üæüýôÓOöj¦¬¸Oµjà™Ž_NÝ$$ì(s{||ŠæÎݤÆt”#!!Aºøâ‹c×-^¼Ø^ýþL˜ófbÚöíÛmûæ\aãÆíû_yîøÊT^2¶lÙb+÷'NZ7ÚK óWo" IDAT%“Üã+ÆW`L Gn­¤\øš5klèæ Ë—/·÷»wï~Üs̾}ô‘:tÖÛ <¦B¤ 9N˜°Xsæl´•SR2µ`A’®»nž š«ÌÌ\MšÔ—΀S0¼¾øâ ;á+**ÊN 2…ÌUÔãããÛ·¢çK˜+¬›«çéý÷ß·çKÎ0¾òÜñU@@€ ¤¥¥ÙêK‡CmÚ´±Ûš5k&mݺU?üðƒÝV2¡0¾b|Æ”Pqºî,$$Ä.æJ³fͲW=0¿ÿþ{íÝ»WaaaêÒ¥ËqÏ1ƒJ3Pl×®ÝIÛδm 2Lê¹çVéÞ{#Ž«þ–›ë’ËU(??: p3¦"ä3Ï Ô 7üG·ß¾à¤í:èÑGûkøpþþ §²páB;ÁkÀ€öü1hÐ {üU«VÙ‰`Í›7·ë+znðXáááúòË/í„$ôÀøŠñ•™?f~ŽŸ~úÉNJ7“ÓK˜`úuë”››kƒµk׿ƒã+ÆW`L •DEH¸÷ÜÛ[±±±êÑ£‡òòòìÀðóÏ?·%Ãûôé£aÆÙ}ŽUXXhoÍÕÎvÛ@e<øà'ŠOQ` _éº^X­-þ¦ וW¾®¤¤ : p311¡Ú¾ý>͘1X£G‡Ûeúôh}ñÅÚ°a,!H(Gbb¢~þùg;IÈ\¸´D£FÔ·o_{.péÒ¥¥ë+znðXM›6UëÖ­íý   J=€ñ•ûޝ:wîl+0¡¡¡Çm3ÚKtíÚ•Œ¯_1%T!áöjÕª¥«¯¾Ú.¦\¸¹*‚)#^3¨<|ø°XžÕ¶€ÊÈÌÌÕ¢E[ôÆÃJ+?.[¶M&,Vp°S†Øí'ÆéãGÒa€›1èñã{ÓPIfò˜¹2z:uNÚvÙe—éâ‹/¶çøŒÊžWÓŒ¯_•0!ƒ?þñ6¬PRÙ©DHHˆ&MšTæ6ÀøŠñS@Å„„Gq:ån7M‰hl<Ûm•‘’’)—«PaaÍìãìì<·È†£>ûl´ C>ñD‚¦M‹·û9ÞtàAL0Úü~8¶b,àeM"+q졪ž¼‹bcÿ{ÕÖo¾I·È’`÷‘°C'ÆÙdP¿æÏ߬Q£Þ+ÝþÖ[õqcºæÌÙHg@œÉ¹Á 6ØÛîݻӑŒ¯_ð¡"$ð«fÍšÙ¨.fϪ!CBµråuíÚL£G‡—V~3Õ!MjüøÞTƒÜÐ’%Éòõuè‹/îPXX3ÅÅ%kР¹JJÊP§N i¬¨¨`½ûîfÓƒ€J:“sƒW_}µ½ ¤#_0¾à7Bª1SÒ,'Z° I‘‘mtï½tà†\®Bv4!HcàÀ{Ñ¢-6i„‡ÚÊ‘€ßÈ_0¾à·ÇeÂàÚÿ”¡Iœç_0¾ f  cß¾}Ç K˜+g˜m­Zµ¢“pÖÝw_„]ŒììuÑ QjÒÈ_>µkkGÚ=3k¾VóƒçºÔ¨I3yÕª¥]i»t(ë ù×Õ%¿ë¤;n¤×^®»÷éë ktÿ=Ÿéž}tË-·ØŠ™S¦LÕº„%ëÕjÛ§¸í_¦y*iëÏzûÃx ¸êJMûË_4rä(qM®_¿ž=ÙY";;O7¦«iÓzêСqñ ‹¯‘;q:v žžnÿ§î­d2ÎîøÊÌÃ1L¸¯¦NÒ?|ø°^{í5-|o¾.j¨Ø+z©}›þrÔª¥‚‚öüt•šµl£É~\½{÷V»vÁ6jB”»víÒ7›6é½÷Þ×½}MÛ4Ñ}¿®ë\ZüüCú`ÙWºþº·Õ2¨­êfêéIwÉå*P‘ýO&%`+MvîÐZþïh­OüQÏ<ý˜¾ûî;=öØãªU‹ ¾à ã«­Zµr‹ŸËŒ‘fΜ©K/½T!!!š?¾•ˆŠŠR¿~ýøJ‘àŽaBŽ»veWhß½{hß¾µní¯6mòÇ$7bÒ;vìà„7€‡j×îY¥¤d–»Ïöí÷)8ØYåctîü‚’’2Îé1Êc~>ósžoEESùÀ€jiñâ5ÿß³Õ²QmM¹{¨š8•Ÿ_`Šf1•ÓvïÓý¾¤G=ñÔ31âæÒ ð—0AP³tìØQ7ÞpƒvîL³Õïþó?ô?·ÅèòK~§›¯¾\—^´G“Ÿþ—øÕÕáœ\ùÕ««ŸRwkËöŸmð²g÷Žòöª­|WžÂ»†èå¿Þ«{þ÷\@',’[À1’“÷Û’ÚUXX¤Ÿ~:¨Õ«Ó´{÷a:ÐM˜Rñ #>µèÜ( ÀšÍ‰S‡ƒ_“ž&""HN§o¹R]µ*U¹¹.ùúVí|Rdd{^1==ûœãTL›¦íš*++ËM27÷ä÷Èz÷î]ü>FÈÇǧܶL299¹øýv*,,Œ?jùòåšþèTE_ª ·ÆÈUP¨£yù'íçU¼o›ôÔ«jÓ¾“yø¡*ï§Ÿ~²c–;n­÷ß_ —ç.Ôäÿe«N¶nÑTÏM¹[ÿ7}¦öÈÒÂ… K/ôðÙªºã†¥íx{{ëÀþ =zTõêÕã5R:uNZW¿~}{›““CJ‘ðŠýüs–=Ùw¶˜“•{÷VÛ¶NùËÛÛ‹N®A|}}íMÒ„"àYLðpèÐNš3gã)÷1ç,HRllÕÃs11¡š5ký9=FYL›góühU Réç¬]»VË–-+3Y";;Û†LPrذajÙ²å)÷5J,0•$€ç1JŸ:õ/J\ó™sµÚ´hªœ£y§ÜßTc\òùZý¸c¯Vþû=Õ®]»RÇ3¬§L™¢•+WÚÉýƒ R¯^éÿML·^?@-š5V^¾K}ztÑÌé5õÙ7´)i[éó»^\|LGiHÓÌJóóóçbïÀí˜ >pÒ¿t<ÝÑ£.íØqð¬·[PP¤mÛhÍš4ed¡£ksÅWJ©x®~ý‚O»Ï’%ÉgtŒ!CBÏù1~«6+Ë„M§O®ðþ&¤0oÞ<-Z´¨Üä±222ôÚk¯)%%¥Ìí&ü¸eË{?==ÝxsÑ„{ï«fŽ}züÛÔ¢iãrC%>ø$A÷ÝwŸ.ºè¢JÏ\Ì¡ÿþŠ‹‹Ó¡C‡ìøã­·ÞÒÇ‹«~ÃÆZúÅZr4LбS‡Özù±ÿѨaÑòòòR³&NÝuó`¹ ì>&°wÿAu½°ûi«`àBÂã™°bAAá9k?''_‰‰{ôÍ7»uøp>^C4nܘ $€‹Žn/‡£üSêññ)gt SÑÏÏçœã·j³²¦L‰Rxx`…ö5_ýu%%%Uú8&Ühž»gÏž2ú!Þ¶]²Ÿ #÷gæMž|¸ô± 26iÒÄ^˜!¼{wmÛ±ËVx,a*CúÕ¯§?¡¿þïhŽ\ñõ&Õúµ:RíÚµôÉçëÔ=<œ7àB£eeÕî݇“c8£µkÍE÷)?¿ ÚõEQQQq_ì.½zª¹:jbb¢öîÝ둟úõë+;;›/ €‡ òWdd›r÷IIÉÔªU©U>†©Šh—çò'2m™6ϧƺネ ïoª@îØ±£ÊÇ3aÇ9sæwþÏTƒLN>¾2æ™T.WÞxã ]Ñ¿¿¸Òuÿ×ÊÙ ¾r+PÒðööÒãë§ÖAA•>þ‰]èÚµ«¾þúkµiÓF—]v™RwPNnÞIã³nøà¾štÏ=:ãM½·d¥êúÖ±ÁÈäŸ3Ô·o_Þ\€GpÐðdÉÉûÓã™°aZÚ!íÙsXÁÁNµjå//¯êÑ»víÒÌ™3u饗ÊÏÏOK–,±ëëÔ©£‡~Øã>õêÕóØ(~ѯ_ði«'ÆÅ%+""¨ÊÇ2$T $ÓcœØVy:u Ð÷߯rÛƒÍ=í~/½cC ±iÓ&­_¿þŒî#GŽhùòåºæškìcs¿¤d „ìÓ§|ÜÐwß}¯ÿý߉úñûMzd\¬.ë}a…%L%Æi»U¯¿[TYAAAZ³fMéã­[·êª«®²•«û_~¹Þzc¶eQÃõUXTtÜssrêêè5ô¯¯ßX¨&N……¶U‹6!jÛ¶-o0À#„„ÇJO϶!Ï—«Ð†0M(Ò\í´qãºÕ¦_ÌUOMeÈvíÚÙ«effzäçÃÇǧø}rñEð`±±aš6-¾Ü}V¬H9£cÄÄ„Êáð¶çÏÕ1*ÓVU—æõOœwÚýF?mÌyyyúä“OÎÚÏn•={ö´÷·mÛvÒvS%¸“'œ={¶&Mú£.¼ •^úÿج±­Y××§t¿ÂÂ"å»\ööT¼¼¼”››§üüü*½– &ØJ×%Ï7jؼy³Æ§!äåíuRòX¦dßÞªkh[ýéé9zý½|ÿßɼɠƪU«Öq·Çòöö¶ã¯²¶<AHx¤‚‚"mÛv༿Ž#GòµiÓn5iRW!!MT·îùÿJšä…^¨aÆyö/G‡ƒ $€‡3ÕƒƒJI9õÅCv(55KAAþU:F` Ÿ švÎÕ1J˜6Ê;Žqýõ]ªÔöO$())£Ü}üü|ôè£ý+ܦ©Ú˜}VßS¬4E=±ä/¯Ï=n¤¨¨H?þ¸f<û´¼ë]så%:p0[ËW®×7›“u +[Þ^Þò©íPç6êÙ­£Z4o¢¼ü‚2Ç êÜJ‡®PVV–üý+76»üò˧Y³fé»ï¾SíÚµuë­·jüøñÚ™–&_ŸÚªS¼•Ó† C6ò÷ÓÃccõû‡þ.ßcœ5MóæÍõÈ#ØyÛ'2c¥?þñ6 @ ‚ðH?ý”©¼¼‚józöíËÑ;ÕªUµmë´W|=_6l¨˜˜ÿŒ˜“œgû¤*jžCôòËkO¹ÝTB\¶l›­tXU×^Û©Ü€âÙ8†aÚ(¯ò¤ *V´Zã±LPôé§¿<í~3f ®p˜Óœ«3OÇTVûöíh'ΙàBzzº¶lÙRüÚRNx­)§l+**Š<nbÿþý;öneïݦy3&©aƒzzuÞbÍûR~Îu »Põ[«¨¨ÐŽ;–¬Û¡·>þZá¡-uë°+ج‰ S¡ÑUP¨à @ù: µùû$E\ܻү«ÿþv1h7ã³ÉÉ[UTõêÖ)·*¥‘ï*PÛVÍô÷?ýAÜ?Q¡¡Ö•7ÔHæ‚U§RV@àÙø—'''ß^é´º1Àúùç,íÞ}XíÚ5R``ýór‹€€€r”€'¹é¦°rƒÆŠ)gRŒ‰ Õƒ~rNQÒFyzöl)_ßÊO#˜81N™™¹åîÙF#Gv«p›k×®U^^Þi^oO$¨W¯ÞIÛL0²OŸ>JMMÕâÅ‹•––Vn[AAA á€ÈÈÈз߮ˆÎMuÓ=ãõÁÒ•šñ¯…º¤ïzóí÷Ô½{·â1ïIÏÛ½{·æ¿ûž¦ýãu]Þó éa+[ÒÛ5SÜ’%U B–8qRÿ| .‚äS|¼Ü£y§}¾ i†wé Ø!—ê¸KK—.+Õå͸5oºžfëÖ§½jÖùd*UþðC†Ö¯O×ÁƒGyÃÎÛûÇUD ˆˆ Ó†,HRn®«ÊÇp:}O»Ï™Ã<×´Qžë¯ïRévãâ’OÛ®Ãá­gžho+Ê!OÅn½õVÅÄÄ”‚<– 8Ž3F½{—TˆŒŒ,­Èj®ï¾ûN7ßtƒ®‰ ÕCúé/Ï¿®é¯¼¯éO=«¿9W_ܻ̤ѼysÝs÷8½ùŸ÷õóáúztÆ›**,<îbö…º¢ÏïôñGW-òL>rDñŸ.ÕW_.—« â㻣yºùê(ÕUŽþßÿ{’7àöø+><Ê9ÊÈ8R#^ë¡CGµaÃ.mÞ¼÷ŒNh¢jŽ9"???:ÀÙdTTp¹û˜jˆ«V¥Vùóço>í>gz óÜÓUm<ÝÏy"—«ÐVƒ<‡Ž´Õ&+*99YÙÙÙens:ºãŽ;lÅÇŠ2ÇÁƒÛ°cYZ¶l©ÐÐP>ìÔp;wîÔ¨[Fhäà^ºê²‹ôø‹ÿÖË×ëÃ… {S…Ûi¨Wþù’.‰¾Nÿ˜»Pu|j—nËÏwéÒ^ª–ë–.ûô¬¼î9s^W?o…¶ ’« âAHÄ4ãœ?M¡ÿüû %%ýÀ‡àÖBÂc˜?ü$'ï¯q¯{ÏžÃZ½z§RR2íÅü¶nº)ì´û,Y’\åö?ýt[…ö;“cÄǧ”»=(È_:TªÍ'žHPRRF¹ûú顇"+Õî–-[Ê\ïp84|øp5kÖ¬J}­=zœ´>**ŠjÔp999óûßëŽa}Ñ]o.X¦÷—®Ö (""¢ÂíäååiëÖ­ÊÈÈÐÿ=ø€¶íÍ×ÂOV–†!M ÈÚµzàÎëõÀÄÿÑšrªXWÄêÕkôæœêÁ»nTAaå熙àd«ÀÝ4äR=ùä|n¿äÃcìÚ•­ÃÆ[J IDAT‡ókäk/,,²AHˆ4ÁHœ{YYYT„€U‘J‰óæ%V©mSUqÙ²Š!«z ãí·Ënll˜ŽŠO!0ç/Ÿ~úËÓî7sæ5òóó©Ôkݶ­ìþ¸êª«tFïeLLŒþø4Õ CBBøPÃM:M][7Ѐ¾)u×^½úÎ'zóßo©_¿¾n#..®xÿ~êÖ­›ÂÂÂ4nÜ8]7ìz½ðƇ:pð¼½¼~¿(¬c{Ý4°—FÜ|³vüüs•^óÚuëuÃðaË·nQå‹äçäæiPTo}·q­ qà®BÂ#˜“‡Û·gÖøŸãèÑmÞ¼W6¤ëС£¼±ç¹J\ݺué(8Ø©ˆˆòx&h–ÊJHØ¡ìì¼ í[Õc˜çœ®rc¿~Á•jsâÄ8efæ–»Ïðá]Z©v³³³m¦µiÓF={ö<ã÷ÒT~<‚9±—Ÿ_à6?ÏÁƒ¹Z·n—=Y™—wv~®Zµjwëé233åt:éXœ¾j`\\r¥ÛýôÓm•Ú¿*Ç8Ýs|}ªzyl{ $•»©9}zt¥_kjjj™ëM5ȳXlß¾½‚ƒƒm5HsÔ\Ô_¦MÕ¸[†?òÒ¡Ã9úlU¢nŽ­p.—KÏ?ÿ¼ þ;«víÚjÞ¼¹6lØ K.¹DŸ¯Þ$WÁñó´òò]zU¤®½¬³nv­îºëZþÙgÚµ+½Ìã¤üô“f¼ð¢ny³>™?SÓïÕµW]ªÜ£ygÜ&HÙ§Gýç­¹Z·n= €[rÐpwGŽäkçÎ,·üÙÒÓ³µwïaµmëTP¿¼½½ªÜ–ùÃÝ#<"‡ƒ_ ÆþýûÕ¸qc:ÖÀ!š6-¾Ü}Þ~;QcÇV®jáé…gãæ9å1!H\¬—«ÐVƒ<‚ ©üù6sžîD¡¡¡ :«ïgTT”òòò¨ @ ÷â‹/ª™mµohƒ‰‡²(³xiÐÀ¯ÂmäççŸT‘ºoß¾69dÈ]3DË?ùX{22и¡ K÷ËÍË×àþú]×=õÏÿèºkæJµ|ÔµkWuèÐAõë×·cŽm[“•üã]ÒBwºF¡íÛ×e_óÙPXX¤¦Mœº)&²¸O^Ðk¯½Æ‡àvH<Áí%'ïWQ‘ûþ|EÚ¶í€ví:¤+  ^•Ûòññáó+*BàXA v*%%ó”û¬Z•ªÜ\—­°X™™¹JJʨÔë¨ì1̾æ9åé×/¸ÂÇ≄Ӿ氰f•k–ÈÊ:ù"·‘‘‘gýý4!@ÍvèP¶^yåÝ3â*™j^^^6é¨í£ T¸3gÊTŠÞ¾}{éº5kÖhÀ€***Ò•W^©¶íÚ+c¦š5qÊÄ k×v¨–··v¦g(µxiâl ¿M«Ÿ‹ï¯ù&I?lÝ!¯œ]òñòUÓõqUwµÙ_Á­[È»øuž*'2-/ »@wMzAüã$…„tàCp+!áÖöíËÑþý9ñ³æä¸”˜¸GùÚ+«Ö¯O¨±ª²³³í•_}}}é ”2•çÌÙxÊí&tŸb«GVÄüÿÏÞ}ÀGQæÿ¥B „„iRi¢pŠŠíÔ;쇊 <ëy¶¿ Û)*¨¨(UDEA𔡅BzÏîæŸßãeM ½n’ÏÛ×¾²»3;ó̳œÌÌ÷ù-Úm*,VDEסóêgJ3iR·r-KC Ï?¿¡Ôy\]eîܱægeœ„ ”v>p–M›6IrâiSQ«+êý> I©’™U±ªÏ...r÷Ýwˆ Lð±à˜DsæÌoooñ ”¸øD9§k¸äåÿ·ug”¬Z·Y–}¿IN'¦HË2ax?ùÇÍ“¤C» qÊ_†3­V›X¬V±Úl¦’¤¾¶ÕPèz"Ú·‘ÐÖ²`Áy䑇ÙI AH4X6[žDEntÛ˜˜%›7ÇH›6Í̈´nn.ì ÍÅTœeäȈRƒêãw–;¤øÃÑ•jGEÖ¡ó–¦K—@3ÐjyÜ}÷JSŲ43fô‘+­-##£ÈëN:±ã;‹Å"Û¶m“wß}WV®\)íÛ¶”–þÍMÐÐÙÙI<<ÜÄfµJnnn…–;qâDYºt©|øá‡²wï^ñòò’Ûn»M®ºê*3½iS/³n­äøÏ—çËâUL°Qœœ¤]»v¦ªä[ –åÿt•žÝÂEòDÒ2²¤S‡¶&©Ÿý_ƲÆèò›xzȤ1ƒä³Å‹å0•+h(ø+ Öñ㩦Jbc¤'µtûOžÌÐÐæ&©#Œ¡|âããÅßߟŽ@&tOO×R+,jÆòÐJ+WFUªå]GyæÕ*—å¡m]¼xO©óøúzÊìÙC«µÏÃÂÂØñ€±~ýzyêÉ'¥™K¦ŒÒ[Nv 6ÕÝÝÝ$;'Wlyyâãí•?§-ÿx-»ÂË3fŒyX­VS%òÏã6‹<%=Ûu—[|I6nûó˜(¨U+9qâ„™G}ðÙ7²mW”4mâ!û—Ää4¹nÒùëÔ1ùóXk¼rr-2 wwyåƒ%rðàéܹ3; Á ‰)'Ç*‡'5ú~È͵Êþý “fFqõóódç(‡Ã‡Kÿþýé¡!È~ý‚K FG'Éž=ñ¦ÒbiV¯>(ññ•jGyסóè¼¥™8±[™ëÓЦVƒ,ËÛo7aÈêâêê*¡¡¡ìx4rV«MæÌ™-Ÿò‘óÌ&TY–GùNž{nƒ$%eUºÿ¼Á××ׄ!@ã¥Ç{ï½W¾[ñ…Ì}|†œÓ5L2³rLåÓ§¥‰§» þ1ož4oÖTzv ‘;vT[Öý¸^üó›’ÓJœGÙwL/>ÞMMÛ4ð¨­p~WydÆTYúÝÏ’”’fokMÒU îÓC>ûì3v @ƒÂ48©©Ù›NGCG”MHÈ”à`ißÞW\\œè”3;vLÅÓ“ê™8Û¨QeÎóÃÑeγté¾§ fªO–>,Ï:ÊšgàÀqu-}üd­(ùüóÊÕ7ÚÞ™3WÉ“O®•{ï wÝÕÏ„-+ÂÛÛÛþ<((ˆ€Fîµ×^—wÞ~C>yíaià+9ÿ«¨¨ÂS Éàë#ÎÎÞ¥Õ#‡_xžüçí·dü¸qRÕÜaJJª<øà,™1y”ì>pTÖm*°læÝTFí-.é/M<ÜÅbµÚ§iJ D¶nå/™YòÓ¯¿Ë¨¡Ø·¡¦dçXdXÿžòß%ke×®]Ò½{wv$'§9¬›u³î¼î9s†ÊìÙCùÇ(AH48QQ ft/ÏfË“#G’%66MÂÂü$(È›N)äðáÃùýFGPKô¸­6F=­ÍöÖ·mâû¨˜ÐP_S­±´âÏ?3U}}‹|óرÙ¹ód‰Ÿ¿ôÒN&ùÒK?Wz:Mç)Íe—u)s{ï¾{e…+<êüZòÍ77Ë{ïM0ÁÎò*„ÔŠ ñŠ“9s•~=»J‡à {€ÐÕÅE\œ$19MZøùL®Å*=º„Ëk.‘Ï¿øB&^qy•ÚðäSOIKo'Ò¿§,^ý³œ!‘ÚK3¯&&Ù¾MK nhª@Zm¶³>ïêê"É)ér:)U|š5ý£Äe Óktþ¾ÍdðÝäóÏ?' @”—7§NÖ«-ÖͺYwÃ\7P_„Dƒròdº$'gÓå“c5gO‘ˆiÞœ ˆêàÁƒ2nÜ8:€Zpå•WÊ¢E‹ÄÅÅEfÏž-?üp½oo}Û&¾_€Ê™<9R{lM‰Ó³²,²xñ™>½g±ÓW¯>(‹­Øi€:4ÔTRÔÐ¥Vd¬Ì:tšÎS­Y]Neiàó’K>§Ÿ.<0°\Ÿ! ìÙ³WNŸ>-}{Žg‘Ü?‚…1±§å¥÷>—5?ÿ&ϸZlg„ Ý\]åÎë/—{ï¾SÚ´i-ýûõ«Ôú_|éÿdù å­§ï–Ù/¼/V‹E½ý*Ó êC+Pfÿ/ éìäd¦é@¡ù=/ÿ~k‘ô?¿› tj`²6äæ·õ’çÉ#¯|"wÞy—øø4c‡Ô{!Ñ`h¥Ãƒéˆ JMÍ‘­[c¥U+oS!ÒÃÃ¥ÑöÅÉ“'%++KZ¶lÉŽ@-Ø¿¿¹h±XäÀ ¢½õm›êë÷»víZY¶l™xyyÉW\!‘‘‘uº­ŽÖžâÄÅÅITT”ÄÆÆJ@@€tìØQÚ¶m[êg²³³%==]<==¥iÓ¦UZ¿î)))ùox˜~ª¨„„‰‰‰1Û¡mÒcö   ³ nnnüƒ ¨uT|ì±Òçùá‡èCŠË–í+ñsNÔ¤;¶“¼úê¦J­C§•F«ZFFÖÎu±Y³VçÿMŸ#?~Q™óBººrKÙÞ½{ÿ8n‰‹Õ&ÎÎN’˜*3f¿*ûýQùÚ¯y³³Š,jðÜ®árýåCäŠËÆË#sþ)»íÖr¯733SþùøòÖÜWä?ÏÜ#ÇbOɺ_vÈóýÕ„.³²sŠÌ¯áGÉoCRjºüºcŸœ8yZââ%5#G2²rdôÐ>rÃ_FæÖVk}g±X¥sX;97¢•|óÍ*™8q"; Þ㪌#G’ËÑ%‹‹K“øø i×ÎGBBš›‡Íž={¤{÷îì ,77WÆŽ+©©©æõ×_-ëׯ§=Å8qâ„<ñÄ2þ|B|Ø„CÓÒÒÌëyóæÉu×]W®ãó?üP–/_.Û¶m+v U4HF%×^{­øùùñK¨ýú‹¯¯§$%e•8OAÕGWW糦ýøã‘?w饊QœŠð]C‹ÃBdî“wÊ¢ekdòÄñÒ©{O¹ä’âíí%6›M<<=åÈáòïïsT, rõ¨óeØ€óÅÅÙ)¹’—ÿ_VNÎY•'›x¸Ëw?m“U?ï“›þþùrñb¹ãöÛå²Ë.3ÓÃÂ:Èœï•ÑÃúHSOÉ«åþÓªá!m¤_Pyøá‡dÁ‚…âäÄ~¨¿B¢A8p Á„øP=ôÂåîݧäøñéØ1@¼½þMÄ111悪^X@QƒÎ?>Ü\è-o½õ–Œ1¢ÎÚÒ©S' Ó§O›×¬Ó¾q´ö¨Ï?ÿ\fΜYä½6mÚH·nÝÄ××W<({÷î5!B¥Õ"—-[V$Ù¼ys3fŒ,^¼Ø¼ÖAC´Jä9çœSáö|úé§öç;v4•&KòÝwßɤI“$11ñ¬imÛ¶5!<<<äÈ‘#æ^oP( ÛóÔSOIBB‚Ì;—_\@2$´Ô ¤úöÛƒg…׬‰.qþÐP_éÒåÏëU>Ôϯ\U¡u”Vq²`¹Ã‡‡•k;óòæœõž†!£¢dóæyíµMæyyh¥Êk®ù\vìø›=Xy& ?¶lÙÒüí·ˆˆ9u:I“Ó¤uKsXP ?yúþåÁ¿+Lj‡{é·@æZ,&üwõå—ÈEž/‹–ý K¾%é™Y’™•#áíÛHX»V2¨k°´‘¿ü3H¤†.õ2„«‹H·ˆ9q2A=!a!­Íõ §üÿ¾^»YvI•+WIÛ¶m¤Gdws­£ 9qâòúuòßÅ«åï×M0ë«mºÓ')·?%/¾ø‚ÜsÏ=ìX€z‹ $ê½ää,9y2ލ‘¾Í–-[NHP—tèà'îî. v[·nÝ*½zõâK(ƺuëÌO ûi ®.ƒz#ܯ¿þj."kXoذauÚ7ŽÖ ^ýõö×|ÔŠÓ¦M³WxTZ)RŠZ9R«XW]᪫®²!ÕÇ\á ä¡C‡òÿ¦Øb=uêÔçÕöè:µÚCíÓ‡~Ø„#CCC‹ÌŸ››+ûöí“ ˜ê‘ZIRmܸ‘_Z@­ÐŠ·ÝæZjuÄE‹vËã_Tä½/¿ÜSâü…«A˜<9²Ô dEס *MV–VˆÔÇÀ!r×]ýL¸ó_ÿú±ÔvÐÐä3Ïü(sæ -q =„Z²¹Ÿ¿¬ß¼K¦Ž&VkŽ öu ‘_yH®Ÿù¼Ø¬6S±±4ZÉ1+;GüšËí×_ñ¿÷òÌC¯‘èO YjÀQ«Hf±Z¥sX;™8z°¼úÁùû5cM qõú­òÓ–Ýr×=3MRéà¢ï¿ÿ¾ìܹS"##Í{ÜwŸŒ=\&ŽI?Ÿfb˫ݺºM-üó·ûºËä±9sdÈaÒ«×yì\€zÉ™.@}•H'Ô =ÑwâDšlÚt\ŽM–Z>W+222LU­Ç"Ó§O—Ë/¿ÜýhÏŸV¬Xa*<xýõ×åšk®)‚TMš4‘k¯½V¶mÛ& .”+¯¼ò¬e;Vš5kf­AÈŠ*\ R•„Œ—[o½µHR+GjÈôÿøÇY!Håææ&Ý»w—'žxÂ.µZ©VŒôññá—P+|}=¥_¿àRçÙ³'^¢£“ì¯KÉÿ{<¶Äù5ôx&­Üèêê\më(i=U¡ÁÊ+¦É+¯Œ)µ­ž~ƒ©*Y’àà?ú5''‡ €FÌßßO.¼p ¼÷éJIJIW—?q×p¢·WЫ›¤¦gšêŒå¡¡@ DêCª,x®Ë,)¤˜LrëÕ—Ê_§^*+Öl‘%ßn’Aœ#sŸ¸C¶ýú‹Y®rÉoŸNùì³Ïš{®T›6­åü>Êú_vˆ›[ÝÔ­Ðí?¼¿ôè"ÿûm’–FÑ@ýDõš^KMͦ#jÅb“å—_ŽËéÓ jÛôæêN:‰»»;_4êµÍ›7ÛŸkˆqÊ”)¥Ÿpv–É“'ÛG%.LÃ’î,pàÀ"Ë/ÂAÈóÎ;OºtéRì|3gΔ„„ûëÛn»MÖ¯_/aaaåZÞXpóÍ7›@¤V‡ ¶ŒQæ<…«$®^}Ð\w+ŽVh,.Xìc*/VÇ: hp±&̘ÑG>ýôÊ2Ãii92oÞ¶§ë Hžžžf@Sи=õÔÓ’–e•§^[ YÙÒÄÓ]œœòw¬Ò­c{f¬iši´ÙòdÜÅýäù‡n‘7ž¼ÓT¨ìÕ£“äf&Ë‘£Çìó^|ñÅÒ·o_Ù¿ÿŸÇŒ£FËïމ³³Sõ£Ž™yÓä1²eófyë­·Ù±õAHÔ[Vkž“~øAâããMź®]»š‘SŸ|òI *÷²ôóÏ=÷œ cmݺÕ^q®eË–&lÕ§O¹ÿþûÅÛۻζWƒ€úÚ²e‹üöÛoerºÝz³—Vê2dˆÌ˜1#ÿ˜«vþ„;~ü¸©˜““sÖ´ÿûßòÞ{ïyO¿›7ß|³Ö¾_í‹/¾øÂôÇ}÷Ý'ÿûßKœ7;;[^|ñEùè£LðR_èܹ³©†x×]w‰ŸŸ_¥ûˑڣáǺ/ÅÆÆVèwæLÇ7¿/'Ož4¯?ùäyöÙg˰,\ R¿çâªSæææšêgîc:?õ^'ÓGttɃýøãS¡Q¯§-]º¯Äù&O.9T©ËY³VWye­§ºüãä믣LuÊ’DE%ȶm±Ò³gñç.:uêd?Ÿ·;ïœ!ýúõ• ~Ìl/V¿#þÍ<¤[Çù=êˆdf爛‹‹äÕr»´*åEzÊ» >’[þz³ýýˆˆŽróÍ7˺5ßJÇÐÖrÕø¡Ò·g·Z©^Y=Vœ0òB¹÷‰7eëÖm2hÐ…ì\€z… $ê¥ÌL‹;–BGÔ±¼<1ßC\\štèà'­[{W¹âLmÒjëׯ77›€º§¡¸©S§ÊêÕEoèÒPà®]»ÌcÕªU²xñbéÕ«W™Ëûî»ïäšk®)¶¤†ºtYúÐ`—>Î=÷ÜZßf gj8lß¾³oNÓíÖ ›>t›.\(~ø¡tìØ±ÆÛµcÇùå—_Ц!I}vâĉ2ƒÕùýê1\Á÷Z¸âà™4XªÎC‡;}ïÞ½òÏþÓ„·oß^éþr¤öhÀwÙ²eö×/¿ü²<õÔS•Þ6MùÊ+¯4a]uäÈùùçŸËUQ½prРAf’â~Oµ"fÑ£G›ð%õ‰†ŸyæÇ§§¥åÈš5ѦÚcRRÉG*¹ê£†-µ’ãž=ñUZ‡:4´VúE«B®^ýV©óhP²¤ d·nÝdùòåì`ÀèÛ÷ó¸ûî»Íµ>½ÆðÖ[oKrZ†de刻wSS©±6Y¬V9§k„ä.X!Î~LÆŸß¶8™ûÊ‹rËU—ÊÕ.ßfâ”ÿ_®ÅRç}˜c‘½#¥C»Ö²bÅr‚€z‡ $ê¥ÄfË£#Dn®Möí;-11©á/¾¾žõ¢ÝZ R«ã´iÓ†/€:vôèQ8p  ‚) _EDDHjjj‘ ã±cÇdذa-þþþ%.ï_ÿú—<øàƒ&d§š6m*½{÷– .¸@RRRLÈLƒ~:]Cˆýúõ“¥K—ÊÅ_\kÛüÚk¯É=÷ÜS¤êbAuD=>Ñc•Ó§OÛ§mܸQÎ;ï<.Ó°XMÒ°¥§§§dee•kþ²B¤Õýý–‡VÚÔ_AèP«5^vÙe¦µ ¡VcÔàeZØv1š IDATZZ‘ ^M©­ö <ØTÖ,ðôÓOK³fÍä¨ô %W]u•=©>þøã2ƒ‡.¦Õlq¾ýöÛ"¯o¿ývþAÔ;#GF”„TË–í3!Å’T–,Vr|ì±5•^‡Òéýú×J¿ôîÝÆ„;W®Œ*qž•8ÍÝݽؕZ¶liÕÁÏo»õ69q2A|›{‹Íb­ÕvhîÒÕÅEfßq¼öÁbÙ¿m­ôèÔ^fN!-ü$7¿=–ZnSéíÍ_/¹bÔ…²xñ¦Ê¦^;Õë•zÍ.99YZ·n-cÇŽ5Õ¹p4!Qï$&fI||á€t¤ÙmÛb¥E / ÷+óBk]Ò“xëÖ­“ë®»Ž/P8 uà 7Èã?n¬@+"Nš4ÉTÓSž›;w®<ôÐCÅ.ë÷ß7Ó BØZ´hÑYƒheÂiÓ¦I\\œ üÝ{ヲBcmT¸Ö ÛwÞ)Vë>=<MC¦3fÌ(2¿¶¿¶¾ßòÒ iA¥M nذA"##‹Ì“‘‘a*Y®X±¢Æ¿óÚjψ#Lê>_@CÁ_}õ•©©aàŠÒß¡:ØCœÆ}á…Lp·$…«Aê¾ú—¿ü¥Øùt´æÂ4 @}£ÁB(´´JŒK—îW×’ÿ–.­dáyJ B–µ5aB—Z½†wÝu=K BîÜy²ÔÏwéÒ… ”H¯ëM™:E¾Yô¶œÓµC„µ*dë–þòÌ7ÛßË͵HvN®CöYN~Û.pž¼½`¹|õÕ “0`€(U»|ôÑÙrÙedêÔÉìd”ƒÞ_sàÀñõõ56€šãL >ÑQ´¢¢èwêTºlÚt\Ò›ç³rç÷ß/矾ò…à ´J †ÀþóŸÿ -jåÀµkשøÊ+¯˜K‹sÿý÷Û|ZAqÍš5ÅV€>|¸|þùçö×Äûä“Oje[gÍšeo£†â4”yÓM7 Al»¶I§Ð§ÚO5MÃZIS%½_ðÐï®¶¾ßòúùçŸíϧOŸ~VèPiÛï¾ûnS‰±¦Õf{4HªaÖ3×ß§O3fŒüðÃ^¦V…, #ë "¥)„9rd‰>µúg&Mš˜ íÔ7,Ô€aiôçž=ñ%N×jeÑÀeiU#ËZǧGÔjß”U}òر”R§Ÿy¾ àLzÿÏñSib±Úê¬ V›Í ¶¼<‡í/k~?µoÛRzt•«®¾Ú\·¼:ÿçE]dÂS§N•wÞyKÞ{ï]9xð ;å ƒ°/X° Vî? ±£"$ꕘ˜TIOÏ¡#ê›-ON’ØØ4éÐÁW‚‚ç"eBB‚©Æ£Õ”€ãЊyýë_‹æããc€Ï>û¬y­'õÿéEæÓ@Ý’%Kì¯u~ww÷שó´‚^Á‰È7ÞxC&O®Ù‘MüñGS¯Àßþö78p`‰ókÕ= jUÈÄÄDóžVT¼ùæ›Ë¬ÄØÐ¾ßŠÐÑj 8B?Õf{t°Í›7›Š§Ë–-+2M«MêcèСòÄOÈ…^X®eêEï'Ÿ|Òþúã?–!C†”¸­›6m²¿.¢,LÃÀû´êرc©U&õ÷sáÂ…¥¶Sßµ¦nµiÈP™7o[¥>«Aʲƒ´*äol®t;‡ ­Õ~Ñà¦^'Ôk†ÅIKãÚ/µ­¡U,ÒëLNžÍåd|¢ú57¡D”N0Ò·‡¼øâ‹2hà…âææfŸvôèQŽÌÉÉ‘S§NIXXpT„D½‘›k•èè$:¢žÉζ˜‘gýõ„¤¤d;D›¾üòK¹øâ‹ÅÓÓ“/1a¹å–[JgРAE^kUº3-_¾Üþ\+ËiÕDzèè¦öïß_ãÛ:þ|ûs½¨XžÁô¸EGb-pòäIsº±}¿Ñ­[7ûs ‘&%ÕíߵݽyACÁï¿ÿ¾„††ž5]+¥jW«Sfdd”¹¼îݻ˹çž[d ªšžiÑ¢Eöç^^^2~üøbçÓ¤­ÐÍíÚµ+µ ¾Ôj–¥=¾ùæ›"Mj‹V„Ô@ceh²¼Ÿ-OåÈÒÖSZEɚشÔé%…$@Íhˆ‹ *˾Û$îîÔ…(\‹E^ÐCÖ¯[#GŽ-2M¯×è}Uééirî¹=é,€C!‰z#::Ù„!Q?iRÿÿ~Ê„#ëʶmŒÄÛ³''êp$Ú*KëÖ­‹¼>qâÄYó2žsÎ9åZwpðŸ£íkø.++«F·5**Êþ¼wïÞÒ¶mÛr}î²Ë.+òúàÁƒîû­ˆaÆٟ:tÈ-úé§:냺h“““\{íµ²wï^SM1<<ü¬y4(Ù§O9vìX™Ë+\ÙQøßÿ}±ó}úé§Eö[ CçÌIŽ9RêúÛ·o_®í.O°€êæëëYgªH¸Qס몌‘##ê¤oÊ BfeYØ@•Œ;V–­Ý&‡ŽÆŠ‹ ·D–ÅjµIÛ Ò¶¥ŸhV¯]³M›62bÄHñôô ³…!P/¤§çHLL*ÑÄÅ¥K||¦„„øH»vÍÅÙÙ©ÖÖ­7D¯[·N¦Nš¿^NzPß´lÙ²Èëää䳿)2ÔFŒQærãããíÏóòò$::ZºtéRcÛQ¸Zµ²¼ 6U}ªY]ßoE\ýõ¦úfAXoçÎ2`ÀH¼õÖ[M•Jww÷ZÛ¾ºl.W+rÞtÓM¦ªâ¬Y³Š„wíÚe*Cj5E O–dÊ”)òÀ˜ß¥Ë:³êª.wÓ¦Mö×zì]oooñððìì?*ÇïÛ·ÏTˆ,éX}îܹr×]w;mòäÉæóÔ% ®Y]áÏUþ€¢VŽÔê“óæm«ðz† uÈ~ öaçU¢×™&O½Z>^úƒÜëd±Zsè”2x¸»Içð¹ÿ¾ûäƒ÷?   éß¿Ÿ Dnß¾SžzêI: €JÒ{tðÌÌL30xIH€Š#‰z!**Á~³-ê?UìС¤üƒü4 ÷“-jç_oìîÑ£‡ò%P¹¸¸”:] ‡ µÂ]yªÜ¦!0Ÿš»ñ*''GŽ=j]‘ ä™ëSEÈêø~+J¿ËÏ>ûLn¾ùfó³€õ¡ýyûí·ËwÞ)M›6­ñís„öhkUÇË/¿\î¿ÿ~yå•WìÓ¾ýö[yçwLûJ¢ûëÀÍà"êóÏ?—×_]ÜÜÜìó,Z´Èþ·[@@€Œ9²Ô6µhÑÂþ{ªHq¸¸Ê•ªI“&%Vv¯ï€²L™)³f­®ÐgBC}Í£"4pYÑ ¤†  ©“~IK+=ˆàêʦ ênºéf¹xè‡r<.^ZøûšJ¦cc¶i þþþ²dÉWb±XäÇ4×\ôzUM^3 !ÓØõ¾´´4û{C‡•!C†Ð9T‚pxññ’˜˜EG4@YYÙµë”øú¦JD„¿x{×\E­¸+£G¦ãh rssó/þiÒ¤"!ÉâôéÓ§H`yÏž=%!pthìÒ%0ÿïÛør¦2Uµ"¤V†Ôkmå5|xXuÐÛ’P €º×P*µmÛFÆŒ/;÷FˈA½$;‡ dit\Ë@?‰‰‰‘;wç/^$S¦L¡c¨ýÿê|`šÖðcJJй7dÍš5æ~¤Ž;ÒITAH84›-OH¤#¸¤¤,ٲ儴ní-:ø‰›[õ^„ÕQU¾þúk™:uª¸»»Óá4Púÿù¶mÛÚ+.^rÉ%2þ|‡j£V¿ÓQT322Ìk¨¡¼Î¬nÁ—^NãÆ3mÛ¶É›o¾i†¡Ù}ûö™ ßöíÛÍ÷Ó˜ÚóÄO˜ŠŽV«Õ¼Þºuk™ŸùË_þ"wÜq‡ «?þØ„Ôß½7ÚçÕÐdY† f*Køä“OäÒK/e§Ô[“'GÊc­©Ðü¥!H P®\UîÏ Z'ý›f®–¤ fRR’øúúÖéw§çeô¦Dooovd@£ÑÐ*ÝxãòÔ#÷ÈÈÁ½ùrË ØÖ-ü%//Ï\³Ò $¨X¢oß¾2jÔ(û{fi½'ƒ $UçLÀ‘;–’P˜KG4zR-&&U6nœDG42‹M¢¢ä—_ŽKBBf•—÷ã?æ/Ó"çŸ> @#öç ]°‡²µ6l°W‡,Ë?üPäu]! ªÖg­[·–¯¾úÊ üüóϲ=:a=ùîææVæg WzÔõ+W®4Ï?ýôSûû“'O''§rý>Lœ8ÑþZgo¿ýv3 1õÑÀ!ìS®yûõ 6aÈÊÐ`£«»MÕmÙ²ÒoªïÛ7Øü 2çÊôº^]X¾|¹¤¤¤H||<;1 ÑÐk}úô‘éÓ§K¯^½dذarÑETÔŠEõÕ´k®•ϾÞ¬D²46[žøúxIóf^fJTV<“———ùYÞûƒ@éBÂa<˜(VkÑHedäÊöíq²cÇIó¼2´ Ô–-[ÌÕ…«Ì€†«[·nEŽ4\æhÎ9çûs½¹êã?.ó3z!ú7Þ°¿>ï¼ó¤mÛ¶µÖæfÍšÙŸŸ>}ºAì+®®®ftãGŽitíIMM•Ý»wÛ__pÁåúÜe—]f?Q¯.\h~ß ‡7 ‡%Ë2kÖ¬"Çëz ÿ裆ÔK©¾8rdD¥×£ÁF 8–Ç!¡eΓ•e‘Õ«ÊÌ™«¤k×Weñâ=Uî uѢݥÎSPRéy³Å‹×ú9=?²}ûvó<--Ðh4ÔŠE½{÷–Ø›DŽ©÷U!5Ìéáî&nâéánžWç=PîùËónÚÄT?Í/5€û—¨æÿ·ÒpD©©9ËE&èö¦:䉿biyeeeÉ_|!—^z©x{{Ó‘47Ýt“Ú_?þøã7zéwÜQäøä™gž‘'N”ú™'žx¢ÈhôÏ>ûl©ó?÷ÜsrõÕWË­·Þ*{öTý¦5½^`×®]õb_ÐiYŽ?n^8 Z_ÛóÙgŸIÿþýíËrË-·©²Ð·oßr}NCãÇ·¿^²d‰|ðÁö ¬]»v5aÝòÒy{ì±"ï=ùä“2tèP9tèÿ°êò§L‰¬•õŒUü|QQ òÆ›åòËJëÖÏÉ%—| Ï=·Aöì‰7H GVÅüùÛ%>¾äj]ºJh¨¯ýuHHˆ A~÷Ýwµö]þê;™ÿù*YöíO/în®U®x©×ušäï­ZøIbb‚ÄÆÆòKpü¿—é8"½èÐû©M–ILLªýë’è…Ê>úHzôè!t u,;;Ûý¢££M¨IGÕ÷jBóæÍ‹ª´²œÃöíÛWâgôBîüùóM5¾M›6Õx´lÙRî½÷^ûkmÛ…^Xl`QCœ7Üpƒ<õÔSö÷.¹ä>|x‰Ë×mž9s¦ü÷¿ÿ•7ß|SæÌ™Så6·k×Îþ\Cv#ä8uê”Cís?ýô“éç{î¹GâââŠgãÆEªq–·¢#·gÇŽ¦*ãèÑ£eÈ!æ˜X«‰žiïÞ½¦jú‚ ìïþùrã7–{]…+>¦§§›àb©S§V¸í=ô©4YغuëL T—­7@žYTÿÑÁO4”©Û€£˜0¡‹xzº–: ‡+£}úÔx¿hrÞ¼yrøðaóZ¢ZE¯C‡¦*Ÿ†æ4Шm/dÓÊŒ/½ôR©ËNH(:¨ˆÞÐUUÓ¦M“U«VÙ¿ÓʰaÃòÇ|ÍMi¹¹¹&èê(’““M;_|ñEyõÕWeÔ¨Qrî¹çJûöíÍ4 j€® ¢ö«Vê¬ïíiݺµýùÚµkÍÃÅÅÅ,/<<Üܼ°ÿ~9räH‘ÁEüüüdÑ¢EâééYîu9RìáÄÂ#CW&©£3kî¼óNyýõ׋ì¿?ü°ýuÛ¶mÍãØ±cæw¸¸G W0 .hRÇkÖD—8ÏС¡U^†µª¢Vp¬Ìz^yeŒôïÿNþßß9ÅNì±5ÙÒ+J+MnÛVzU¡[né}Ö{þþþù}×Ï~.jÀ€5òéùÁ… žUR+D€ú/00@x`–¼2÷Eyûé»ÅÉéØëü'%§J÷ÈsdÞü…E&edfÊ÷߯‘ÿ{ñ9¹!£Îé!¹¹•«äm³åI—ð?DÍÈHgÇ8<‚p(V«ML¤#P*½«N[´h*ááþEFÔÕ 8zCôu×]×`F© >ÓTqA% aY­Öšù#ÇÕÕ„ï»ï>y뭷̺4L¨ÕåôQ Œi±64kÖL¶nÝ*ûÛßÌ W4©â\~ùåòÎ;Á*¢M›6Un¯VÿÓª”U+SSS嫯¾²O/ÀsZE°iÓ¦&œ§!Í%K–˜Gq¼¼¼ä³Ï>3áÓúÞ ¬.[¶¬È²õ÷L·¡Û3iHøµ×^3!ÜŠpss“I“&™ª£…i%ËÊVe×ß]m‹V³Ôð£†6Ϥ!j}”D+¦¾ð üã ¨s“'G–„ÔéÕµ ,–dÈЧiÈñþûÊ#|Wâ<×_¿Ø„-uÞòÒ`æÌ™«JGšÁÁ>ÅNÓªzJÃZ¡q̘1æ¼AuÑs4óçÏ7¸Ý'‘ì¼47Üp£,[¶\V¬ùE.9@2³rêMÛätb²ddŸplÚ¤‰\:f´tëÖMþ~ãTy2<ÄTv¬LÐÓ–g“–¾ùÇZ.râÄ v€Ã# ‡rôhŠdg[é”Ë©Srút¦´kç#!!Íe×®&Üpã7VëÅPPy99%_PÔ ÎÊîkÒ¤‰yh¨Q«Ç•ö]öo¼!“'O6*­ü¨á½3ér4ü8vìX¹þúë¥K—.%.S«æ•ÕÞòÌSxÞ ȸqãL•ÇÝ»wKzzúYóhå@ "Þ|óÍ•êó‹/¾¸Êߣ†Dúé'™1c†|ôÑGE¦éàW_}u•ú¢¢ßoYËÖðç¶mÛäÙgŸ5UÏÙ_iàPƒ¨3gΔ-ZT©¥=º ¨jX«¢~ýõ׿fÁ3éw¦ûýC=$S¦L1ý]ºO~øá‡öjºÜꨬyå•WÊĉMHxñâŲzõêbûÌœÔÈ?î×0§þéçJû 6Uò@A:ÈguT„,XOIAH]Ï„ ¥ÿ­ü ÷ßß&QQ ÅN×JGž/ß?]""Ê>Ç“””%×\óy‰U& Ü{oÉ•õÄ äÝwß•_ýÕTiÔ™|}}«Ü_z®DÏ9wÎDÏëhè4 î{ýõ×dòÄ Òÿü®âïÛÌ Ò_¸äÅ'¦ˆ“³K±Ó;&¹9ÙâåßV¶ìØ+z÷¨TUH­éëã-^M<Í2@%ÿßíâRägazžCïË(n¨8½Û1oΜ92{ölzu*+Ë"›67'XÁ¼yÛÌE¿éÓ{Êu×õä rp®rêÔ^éÙ³£„„„Ð!(—9sÖ˜›æÌšÿÿÁ¡tÔ€èèèR+?jðMƒLå­~—••enRÒ cá eyh¹ßÿÝŒô®Á5 ›i¬yóæåú¼V¶ÔÊÓ¥µ·<ó”D+WjíܹÓ,GÃ]íÛ·¯pŸkøó“O>1Ï;vìh–çîî^mß©J÷íÛgڪ߶³¸à^eú¢¼ßoE–­ójV+"j˜.((HÂÂÂLßjUÃêàhí)¼OiõÝ÷ãââL%R­ÖžüìQ-ëÐà­ÞŒ¨ë 0•/«›þ¢Õ!O:eÙÙÙæ÷V¦ú;\]Û@uëÚõUSñL‚Ô`auéÐá%‰ŽNªôz¶m‹•þýß1×kKâíí.~xE©ÁJ]ÎÍ7%›7Ç”º¾áÃÃdÅŠiâêê\ê|ZrÆ 湞ßÒ¢ž‹ª,ˆL‘ÒsPgòôô”k¯½Öœo ¡Ókeo¿ý¶¹^0mÚ´"Óôü3Ïjòw¼²¿“奴Ò#ÕõÍäÉ‘ÅVk92¢Z×£U!ßxcóYïZ®Ï÷ì$O?=\î¾{e‰óh…ÇË/_hÖõÐCƒ¥wï6¦â¤Ò¦2ûüóʬ©áÇÇ¿¨Ì¤>|¸9rÄT'Ò› /^,›7o6†–»t§Õ«W›Á½J¢' A‹ÆV±hܸqòÍ7«å¿_}/Ó&\,Ù9¹ÛVí­d},VÞ\°\ÒÜZ›ãŸÜü6ëñϤIÍ`‘úÐkvÏ=ÿ‚FEEÉÎ;MòÌó“…5J"""ØaF«V­dÖ¬YÅÞïææ&>ø ¹–×ÜsÏ=rá€þÔÂOF ê%™Y9×F=öIHJ‘ÿ{ï ‰‰K+/"¶Ä(q÷”fînòÖË eîܹòü /HxX˜ü²y‹\Ò?RúœÛErs-•^¯››«„µ ’-{c$;;G<<|à¸B¢ÎéÈRû÷Ÿ¦#P­bcÓäÔ©tißÞW‚ƒ}ÄÙÙ‰N xxx˜‘ÔOŸ>mFm×c?MCZýÕçСCòßÿþ×I|}¼å‹7çÈEý{JZz¦©\™“+YÙ9fžk.¿D^{ìïòïû¯“«/»X¬k•Cyùoצ… À–t¼€£à/WÔ)‹Å&‡%Ò¨Q™™¹²sçIùí·8IOÏ¥C@<ÞÉ”ùóç› ”³fÍ’áÇÓ)`äȈ"¯'OŽtèõ6•uën¨ö0ä‚“$2²e¥?!×^{mµ °¦ËÐe‚ qÑŠ‹?ü¨ìØsHÞùx…x¸»9LÛòòòÄ×ÇKÆ^ÔW<=ÜÅbµ;†"­6› ,æäæJ^5¬Û–¿Ü–Í%(¨µ©² €Cÿ}O .>œ,99V:µ"11S6oŽ‘6mšIhhsqss¡S@ƒÐ¤IINN6#Áº¸pŒà(´R£VlÌʲ˜Ÿ5Q²º×ämÂ×\ó¹¬^}°JíÒJs玕I“ºUyCCCM€qÑ¢E¦êcehuÉI“&‰¿¿?;'Ј—Èu×M—?ÿTþ2f°´ ð5ÁBGQ\²6899K^žM¨H&Å¢"$êLFF®;–LG VéèhÇ§ÈÆÇó¦æ¿¦O@Ã Õ A8 %fžk8Q_ׇõhrÅŠi2{öÐJ/+8ØÇ*oºéüjÛN 2Þzë­ùÛ8Ôœ+/­n4~üx¹á†AЈiÐï©§ž–fÍýdÅš_ÄÝ:zó”«‹‹X­V±X,ôÀ¡„D9p ‘êŒÅb“ýûO› ‘ ™tjÄĉTCìÛ7¸^­G«9Ι3T¾ÿ~º R¡Ï͘ÑG¶n½Uúõ«þmvww7AÈ;î¸C,>>>%Îb3fÌóÏ?¿BáIÐ0µ”ûï@–|û³¤ed‰S#/ƒ¨ÛŸšž!aaaæ8 GÆY~Ô ž>AG Î¥§çÈöíqÐD""¤IþY@õ)NšÔ­^®G—«•׬‰–>Ú.‹í–¤¤¬³æÓ*’Ó§÷”o<_""j¾ê¢ /ºè"óHHHØØXÉÉÉ1Óš6mšßž RC’ ñºîºëäõ×_—_wí— {u—œ\*!êñŽŽÄjVŒŠJ #àPNŸÎ”ÄÄãÒ¶m3ißÞ׌T TU—.’—7§Þ¯gèÐPó˜;w¬ìÙo®ùj Ò××SBC}%2²e]có÷÷7€òðòj*wÝu§¬þò#ܧ‡Hnã틼ü‡§• õAHÔºãÇS$##—Ž€Ã±Ùòäèщ‹K—|ͨµNNNt ð?vÔУ>꫉'Ê[s_•S§“ŧYSsßPc¤wF¥¥gÊáÇÙ)’g¨U¹¹6‰ŽN¢#àÐrr¬²wïiùõ×XINΦC(ÄjµJl¬þ?2ù¬iyyy'©©©^nff¦Ynnnn ÿÎ)u:”—ŸŸŸµ ‘˜“§ÅÙ¹qßFéêê"§N2×dpd!Q«J‹ÅFG ^HMÍ–­[OÈîݧ$;ÛJ‡/))IÞ~ûmyå•W$!!¡È´o¿ýVÞxã Ù¼ys…—»iÓ&yóÍ7eÕªUÅN_¶l™™¾gϾU!ûW—FÛ:Ø­WOÑÚúGFµ&--GNœH£#Pïœ<™.73ÕL­V‚¼€Æ- @ `*C._¾¼Ðÿ/OÊO?ý$¾¾¾2hР /722Òüܵk—Yva‹Å ]]]¥S§N| ªLƒ_¯Ý,kã ÝÉÉIl6›ƒ8:‚¨5QQ Œ…zËfË3AÈM›Ž›`$Ù!CÄÏÏO8`‚‹jéÒ¥æâØØ±cM`±¢4`,™™™ùÇQE¦íß¿_rrrLÒÃÃ/@•uîÜE6ïØ'ÑGcÅ¥±V…tr«Í&6›†A¹·àØB¢Vœ:•.IIYtê½ìl«ìÞ}J¶n•ÔÔl:Ð(iÐñÒK/5Ï¿þúkÙ°aƒ=zTzôè!ááá•^nÏž=ÍÏ;vyçÎæç¹çžKç¨íÚµ[ž“ü°i‡¸¹6Î ¤Sþ#=#KZ·nÍ ´‡G5N+é8HG AINÎ’_•½{OKNŽ•4:xÔàcjjª|óÍ7âéé)#GެÒ2»wïnB–{÷î•ìì?ÈÍÍ5!›4i"t<€j¡á¿V­ZÉÆ­¿‹Õjk”}àää$i™b±X$/ŠÇF5îèÑdÉʲÐhpôä߉©²iÓñüý<Å„~hL4Y 88X¼¼¼ª´< SvîÜÙ\dÛ³gyOC‘†ŒŒŒggþ|P=š5ó– /¼P‹5a@ 6F9¹s­Öɉë±ÇÆ_®¨QÙÙ9r$™Ž@ƒf±ØäÀùå—ãŸA‡«Õ*«V­2áDooo‰ŠŠ2•«ªgÏžæçöíÛÍÏ;wšŸçœs Z™ÄÅ'ILÜiqi¤³FE—îÝ»K#ÍꂨQ‡%‰ÕJ•<4™™Ù¹ó¤üö[¬¤§çÐ!€íÇ”øøxéÛ·¯Œ;Ö¼·|ùrS½±*ÂÃÃM°òСCfù°ô÷÷7' :õî}¸ººÉ–ûòº4ºí·Ùl{*QºuëÎÎpx!QcRR²%66Ž@£“˜˜%›7ÇÈþý§MµH (®[·Î‡*;w–Ž;JRR’¬Y³¦È¼¿üò‹,[¶LRSS˵l'''Sý1//O¾øâ Sy²Gt:€jÔJZµj%{ÍÕ¸ü×9‹œNJÍïƒ–ì ‡G5&**N@£•—'rüxªlÜx<ÿgŠ sÐP|õÕW& 8räHqww7ï=Z\]]å矖¸¸8û¼˜Ü¼y³=z´ÜËïÙ³§ùc~j0ª›—WSéÒ¥‹ÄJ«µ‘ xîä$¹¹VIÏÈ___v€Ã#‰—n*Bž,Ü¿?ÁTˆLL̤CõÞÎ;M¨1,,L"##íïûùùÉàÁƒÅf³É7ß|c__+ÿr¯£E‹Ò®];ó<88¸BŸ€Š ''§F·ÝÎùÛœ”’&‰)ÀŽpx®tª›ŽŒuð`"’žž+¿ý'M%<ÜOš4q£Sõ’†;vì(gM4hôíÛW\\\Ìk L¦§§›ÐdPPP…Öh>O5H5I}MÏÈ«-¯Qm·³‹³ÄÄ–ÌÌlqqáVR€ãã¯WT»#GR$;ÛBGňτ„L ö‘öí}ÅÅʼnNÔ;Å… ¸»»ÛŸ¯]»VÜÜÜdܸqZ~vv¶©<éìì\¤ê$T·öíÛKVvŽX­VS%±±Ä!]]œeÏ#Ò²UK aG8þx¡-Y²žBôîÝÛ¶Ô6±›6çÜ{+,,´‚‚»å¾§miÕV{ùåñ6pà;øàƒí7^wÇw»hÑ¢ºÛ&Ø=÷ÜkÍ›WP1‡ŒˆÙ²elõêMe6o®±¯¿^f TYŸ>•Ö¢ES ¨SVVf­Ûv¶…KVØN]:ØÖ­[sâ}ZuÍV»ùî'lÕ–{íµ×­C‡öÛï/--µ±cï°‘#GÙÙgŸSwK}³ °•‘„DL”YnæÌ•Ūª6Û'Ÿ,²Ê­W¯ÖÖ´)MpÀƒíéñÏÛ5ž–MšÙæ-Õvݳâí¹'±ÊÊÖ;ü]aaÝtÓ¶fÍkÑ¢…Ë @¦*¤‹yóªlÓ¦ ÈK–¬·?^hsæ¬vAÎ@>;âˆ#ìÃ)_ÛòUk\p`6+mZb+VUÙ7?h}÷ØßžyæÙ°A~-[¶$ñ„DT›7רܹk( ‡lݺÍfÍZm¼À–-[O o=øàƒ¶dù{y‡ִ¤$+ßCQQ‘7±×&~lç¼×Nüù¹vÇ´¦MKø€9@HDU]½Íš5kBA9H™^¿új™}úéb[·n €¼òñÇÛÌ™ßÙ|`_|·ÌVdYVÈ‚kVÚÔ–._e×ýáQ{ö­ÏmÜ£µ .¸ÀHòÈ%B"ªŠŠ0 “í¼s+.¦Ê¹hõêM6eÊ"ûæ›.øÈ<ÙFŒn{íµ§|ú/íÑçß°â&Ù‘8 iI±mܼÅñMvíݶÇþ?´ &ØþûïÏ È9¤ùCL ¬sçæÖ¾}¹Íž½Ú,¨²ÚZÊÈ%µu_ê… ×ÚÒ¥ë­GVÖ¥K v…@N;äCì–[nµ®];Û_|io¼ñ¦4hO°{_Û¼¥:#YšÊZùî¤Ïí¿hÝz}Ïžxú94h( g‰`¦I¡õéSé‚"gÌXi+Wn¤P€SS³Í}¿©ï{ee3 9i·Ývµ±co·iÓ¦Ù9çœcß|s¼úJûÃoÿŸU¶ln5[·fÄq*™AqqÓ¾æŸ}ý=õ÷wlβvÍõ£íÔSO³¢¢B>L@N#q)++¶=öè`+Vl°™3WÙ† Õ cô½þüó%Ö¦M™õîÝÚ}ï€\Ó¶m[;ðÀÝ¿{õêek×®µÑ÷Ün·^õÿ¬°°Ð¶mÛÖhǦÌ%ÅŶvÝ›ôé4{åíÛüåmØyçÙé§Ÿn|€€¼@ $¢©ÊÊ2›?Í™³Æe’[ð¼råëÚµ¥uïÞÒe†rÕI'd‹-²Ñ÷ýt±µo_n½zµ¶ÒRšäžsÎ9ǺwïiçŸ?ÌÞúðûí§Ú;ÿþÌf/Ûd5Û möìïì•·þeçýüëµSgÛ¼¥z{ÖÈp ¬I“"—Qríº 6gáRû`ò—öο>µ™s—XÏ^½ìÔ_œ]÷ºçZ×®]øøÿ¹‚”Q€TÛ¶e6o^•ͻƶnÝF¡9FÏË—o°nÝZØN;µt;Ô¹ä°Ãµ>ú·=÷Ü36úÁqÖ¼ek{ã . qýúõv÷=÷ÚUcµCîbÿï¤Yqq—íñ?ÿÿ“ÔšU×lµÕk×Ù·³ØÄIŸÛûaUªm=ö°3ϽȆ 9Èý»¤¤˜B H©ÂÂëÞ½¥uìXá²C.Y²ŽBr̶mµ6gÎ[¼xËÙ¡C…€œÒ¾}[»ð mÒ¤IvüñÇ» H)//·ï´ÓO;Õ~|ÌOlÞ¿Ø)Ç µ…KVØ¢¥+mæͶió—ýqÞÂeVm%Ö­Goë×oûþO°C9Ôúôéó“ ,!‘M›Ù®»¶µÎ+lÆŒ•¶ví È1›7oµ¯¿^n ®³>}*­yó 9cþüù¶`ÁB;òÈ#w¸¯k×®¶ÏÞ{Ù‡~håÿžg5[«­K—¬ÚÖZÇní¬_›6vþÞ{»Œ­Zµ¢0ˆ@H¤UË–¥6`@g—9N"·lÙJ¡9fÍšM6eÊB— V"KJŠ(d½{ï½Ï.»ìR+--Ýá¾7ÞxÃ^yå?þ¶ß~ƒ(,’Œ@H4 HµkWns欶ùó«lÛ¶Z È1 x^¶l½uïÞʺvma…… ²Ò§Ÿ~j ,°£Ž:j‡û¦OŸn7Þø;{üñÇ ‚ E )4–¢¢—-nàÀÎÖ¶mä ­[k]ö×É“Úòå(dmÛ¶Ù 7Üh¿ùÍåVX¸ã²Ë›omcÆŒ¶#<’ E„D£kÖ¬Øú÷oo{îÙÁÊË‹) mÜXm_~¹Ô>ûl‰­__M kLš4©î¿µÖ¢E {ë­·êÝ7sæL[ºt© <˜‚ …šPÈ­[7³}÷íl ®µÙ³×XuõÖŒ8®©S[mí§|@@’Xóæ%ÖªUÓ°;ä¥Óĉ³ù@РéÓ§Û÷¾÷=[´h‘]~ùåöÉ'ŸX“&ÿ[~9vìXûñ¶¢¢" ¤EAÁõ¼6¯Íkçðk_ýP5j('; !‘a²ëÒ¥…µo_n³g¯vA‘µµ{LŸ}¶ØýòÏÀí¾ûî·‹.ºÐŠ‹‹müøñvøá‡ÛóÏ?g¯¼òŠ9ŠBiјR7Ü0‘׿µyí}m [‰ŒT\\d}û¶±Î›ÛŒ+mÕªMi?†½÷îh{ña)P^^líÚ•gÌñ Úƒaõë×ÏFi×^;Êjj¶Úðá#lĈ«lþüyö»ßÝd:´§@Z4f¯ÍkóÚ¹ûÚ@¶ ­¼¼Äöܳ£-_¾ÁfÎ\i7Ö¤íµõºú\EE…6hPgkÚ”& ×½óÎl«­˜ï¤ÌvÚét«¬<Ò mõêÅÖ´i…UUíd×_?‘:Äĉ³)@R…‚¬Ð¶m™UV6³ ÖÚìÙ«lëÖZ ÈR;íÔ’ H€<ñÝOnZZ÷ó2i@$ ²FaauëÖÂ:t(·Y³VÛâÅ묶–€H ›”–6qßc䶃îaC)ˆ<7th B"ë””Ù.»´±Î+lÆŒU¶fÍ& Ƚ{Wº fä¶ü ‡ûH†BŠÙªyó¦¶÷Þm·ÝÚYÓ¦EáZµ*µvíÊ(¢è±´'öw IDATë‡jú²Qyy‰uîÜÜe™«ªÚlµµ” ê{衇ìÛo¿µþýû'í9§OŸnS§Nµ%K–Xûöíëê]­Mš4ɪ««­uëÖyS¶zÏÏ<óŒíºë®Ö¬Y³ˆWPPPWþí]FWùd;ôÐC“öZ ,,,ÌØç{î¹çÜou|’áÃ?t1={ö´ /¼Ðí.QZZÚèŸy´r[»v­=úè£öòË/ÛÖ­[·ß® Æk®¹&bЮ¿úÕ¯ÜÂR¹ùæ›í€Ø~¿·«ƒvØ8ýôÓÃ>G“&…uåÕš¤Ð±Çk“'OŽéoo¼ñF»îºëR~LjoŠŠ"g¾ãŽ;ÜïŸýìg_¾ãÇ·«®ºÊºwïn³gÏNyÙdšx7Ùå–­åë²­~qÄns‡~Øí†€~P.¶©xýl껥»Œ2©ÏC¿~[®Ôúl€L1}út·^XµfØOk‰/½ôR»è¢‹­}NöØ{®Íå!µõ¥1æÒ˜¿Kn™0‡|G $rRË–MmŸ}:ÚâÅëlÖ¬Õ.Kd>Qæ>ÑnêX+˜M¿úê+»é¦›\äI'”ðë¨ýøãíñÇ·®]»fÜó­ZµÊþõ¯Yûöí­ÿþI)[í&á], 0 Þ…Nc‰VnºðúÍo~cß~û­õë×Ï:è ëÓ§Ûâ³Ï>k0SèSO=å‚ •ñRï¢tH“&nWBªŽ…Ë Ù£G++..äÄ)¤ uß=jëyäwáyöÙg×ûÛdnˆÉÑGm¯¿þºÛM1„Z²d‰»WÆæÁƒg|ùjYø7HUÙdšDŽ7™å–Íå˲±~(ÜñÇïΑÚ)Ñî@?(ÚŒT¼~¶õÝÒ]F™Òç¡ßF¿-—ê}6©¤›­[·¶áÇS1Òº“ &X=l÷Ýw§@([ðf”W_}Õþö·¿Ùï~÷;ëС ©þô§?¹þãÆmàÀn®Lëp5'ùå—_ºóÏ%—\b;vt‰jb•̱÷\›ËCjëKcÌ¥1—ü2aþùŽ@Hä,eºëÔ©¹µkWns欶 ÖÚ¶mµyU „,..vÿ®­­µ¿üå/®ñÑÏqÇ·ý¾xi GÏ«@ËdHöó½ÿþûîù†êêC2,_¾Üýn×®]Æ|ÎÑÊíÁtAÊŠ© 2/zT6‘,^¼ØB*¸QÁ¤o½õVØrÔó*RƒJ¡eeÅ.C+ µÎ9çœzÿ?mÚ4×Þ«½ºýöÛÓ~<6lpÙŠ7mÚöþ—^zÉݯÀdµÑ©¤ 4p™Ž²É4‰o2Ë-›Ë!—ekýЦ&:Gj‡2åúA¹Öf¤âõ³­ï–î2Ê”>ý6úm¹V?賩§—555y¸3qâD»å–[\FÄî‹/¾°Ÿüä'vøá‡Ûo¼‘ôçW ÁC=äêâQGEÙf°|þ¬rå3ÌÅú¡ýõºÊÊFv"ɤu±ÊöXZZj÷ÝwŸwÞy;üͺuëì¶Ûn ¼¦8™cï¹6—‡Ô֗ƘKcþ.ùeÂüòÈýJÞ¤Ðz÷®´Î[ØŒ+lÅŠyYêÜžp öØc¹I,Xàv¢ U]]íR$+®sçζÓN;í0©ÆW^ã«¿U”¿^Cñw¤õZZ„¸zõjkÓ¦{>eeˆ÷ù¼¿Ÿ1c†­_¿¾î³íí‚ôÂQz öSÐàwß}çv€ª¨¨°¹sçºçëÕ«WØ2ñ£²"Šþ=þ|—M1– ˜XÊÕ/Yå¦2R€bYY™Ë 邌æÞ{ïuÇýë_ÿÚžy晈·÷Þ{[óæÍíÝwßµ+®¸¢ÞkôîÝÚ¸®€Ì¶víZ—ñX¥÷ÜsO·;˜ŸíÿûßÿZee¥kü4¡ ÝÍÔªíQ;©]ÆÔFÉœ9s¬Y³f®MR;«LÕâ]„û3Ty ÔVê8vÙe׆{ÿ3gδ¾}ûZyyyØçРƒ^_}ÿ’º][Ý»wwí´ŽsÊ”)î5÷ÙgŸz}”XË&ê“èøÔîkS½—DËñ†¾õ]T—4±¥Ýï’Qn¡ý#=ß¼yó\]ÐnzÕ‹HŸ_"eK9©cÉøN¦K.×mä¡Ý¾Ÿþy·iˆúÐè %ÚÒøB´s£÷÷ZŒÑ¢E ·±‘6qÒuý~ûí×àø‚&Q'OžlK—.µN:Ù®»îZïïãékh¡£Î±«V­jð®ï–Œ¾H8ñ–Q´òIeEjÓ<wRÓÖn»í–5ír¼mr¦öÛ4Æ–èuý¶ôÖúl@êéB×¾ºîÖÚ‡|pÇw¸ßZ¼‰Ì¡ FW]u•»ÎÔÚšDé:µ¨¨(£Þc¦S¼Ç“ìÏŠÏ%·Ê¥±êÇGáÆ¨´ Ÿ@HÉ¢±loó ­¥=묳Âþæo¸á†z·Ew÷þ&YóKÙ¶+ÍcLŸ>Ý͵´mÛ6ì}š×hÙ²e½ût-£ ^TÖz¯‰Šuþ4V‰Ì§„>O¢s5©Z»–Èùä8p ›@yýõ×w¸}øðáö£ýÈõ-´ø^Ô¾5Êýxít´² JýŒóÏ?߯çúwžŸÿüçî=%ò¸XŽ×ÿþ5ø¡MôùjÐQUÉ(7ÏsÏ=g_|± ç²Ë.³±cÇ&½ c)‡ uLw‰~'S-ê‡úÖú,þýï»sÏÎ;ïL£ÐJ¸/¤s[´sãÔ©SÝ9îÆo´þýû»¶Ã;ÇÉgœáοþ †4ÉvÉ%—¸cÒfEM(hA‰7±´¯¡ì§žzª»Žoè©ï–H_¤!AË(ÖòIeEjÓV¬Xa'žx¢{&ܼ—™Þ.ÇÓ&gr¿íwÞIøº€~[zë}6ɦk­ Ñú†ÁƒS 䀰c=ÖýNÔÑGí®_µf(S®%3í˜9žd~V|.¹W.U?4¬5nÊNôÞ{ï‘@R<þøã6kÖ,·÷Ì3Ï ôØhãîþ¿It~)×a…£y­s>øàƒÝúi¿Ñ£GÛÍ7ßvRÙ8µ~üÖ[ouåˆXçã­ AæS"=O"s5É^»–ŒyªdÍßeÊÜ],å’Œ¹»dÕ ówÈFB"ï(8«uëf¶`AUÝ xµÕÔlË›÷®Ì€Z¦ÀÀÐDÔ‘PEeD<æ˜c\é€A5nwÞy§ ÐÓb39üðÃ]Ðáßÿþw[´h‘ët+k¡·è¹Ô)T«Î´vÐEv^Ò‚:ÜÚÉ?Öç5¾}ô‘käÕð+°SÁzÊV¨¬…ê xYõ<Ê à¿H€Ê ÌÊh¨]ô˜HÔ¹V&¥º×N+z}eÝ]$TrME¹}ûí·î·R`‹vjPGEÇ­Ž]¸]€©cÓkGÚÅ&”—eaáÂ….R±}úTr€ ¦vã•W^±Ÿþô§n0A;”)ÀþöÛowm­vÒR;¡‹Ueü3fŒ]tÑEöÆo¸ÇkL¸ÊÖ£2ùÅ/~á&µqÚØK/½Ôµ—Tó66Ðíº W»ªŒÂž ¯„.èÕ?ûì³n°Ã¿ ‘6 ЮhÚ BƒiÑøJü´)‚ŽQ“æZ´¯ÌÛ`Óëî»ï¾n²(–² êü£=ôÐCî3ÔBmN ÁÀÐÁxäxõY}þùçnǧ<Ðõ]’Un¢€ f©õâ‹/º~¬×—ÒSz¬^7eK9©c^ß,‘ïdªåKýð6ïÐñ3(ÐJF_Hÿë¹Qm†Æ8;ì07& ]uÞÒu¿®×õoÚ×4ÁªÉ h'Æë®»Îã4FpÔQG:7kó£þð‡îü¨ój¤slC}·Dú"±ˆµŒb-Ÿ íW2 צéßz¼væÔ¢ŸÆ“´Y–w<ÙÒ.ÇÚ&gz¿MÇ’¬ëúmé«ôÙ$“®Ý´õä“O®·a5ŸÖ´x™Œ¥Ì*úœu½Ÿ)2í˜9žd~V|.¹W.Y?4g«@H­±#@2hŽGN9唸3åEw—Dç—²y–ŸÖhkM¸ÖP+cžÞçÍ7ßt¿ì¥6Êÿyx‰lÂÍÝÏÜ`AæS’È\M²×®%cž*Yów™2wK¹$sî.Ñ:!Ìß![‰¼¤ñÌ®][Ô¬Ëmöì5¶páÚˆ p³™‚öÔ€*ØNÙúÔATçP «PW˜îS€ä=÷ܳ=pPi토 ´#‚ùÔ¨}ï{ßs?ÚEDxGyd½€EQ£§@>æyÔ@«¡Vãúá‡nè‹åù´K‚ õœþÝü¨à>=Ÿ=µ«”è6ñôÂQºìC=Ôí®m[»¡èG ]XèqÑ:~AË5åæíL¢ #e¥ôïÖ ÷¬Îß9çœS¯ó¨tÕ*?]….rŒÄ+g/˜´K—uï·˜“ d¨·ß~Û]ük±¸Îûžý÷ßß]ø¿üòËn‚@m•h!¹6˜0a‚x8餓ܠ‚絓¢Dm˜~ÔÑ…®êCÛKmà¿ ö‹õu‚ÐDz_Ú…Hbþ]”ÔwP?I¯msƒ†|õÕW.‘2<ês]yå•n²Eƒ±”M<¦¢>‚·s•6xÐ.P‰>.Èñ~öÙg.sµvß ²p –rmv¡Í<´«œ××SJý2 é3Ž·,£•E,åŒ:ô;™JùR?üƒrè %£/äܨ@=M,jçRþôx½®v^õÚ Ûtœšüð 2Äè´9’&>ƒ¼þÊ•+c:ÇFë»ÅÛ‰E¬ekùm¿‚”Q8š\ÒXŒ6Þzë­·¶÷cµ“¦&ª4NËXd&´Ë±¶É™ÞoÓ†_ɸ. ß–ÞúAŸ H?-,Öâh­sе”úñº–Z³f»öiÛ¶íö¿]¶l™[¡“ºÑÕp´‰µ6ªV& ]Ãïºë®×rlÜ¸Ñ hÍ‚þFÏíéܹ³•——ïð]Óé:\Ǩl9ZTŽœ£ë?2‡è:½²²r‡,!Êä¡cÑqèµƒÐØ‚6vÒµN¸Í»uÌZ³¢u(áÞS$ PÒõ§Þ£ÿ³ðß§u#¡×·ZD¬ìnú\õ^¥q±)S¦ØªU«lŸ}öq›S%ZïTÖZtëmìZõyè5U¶á^S æµ&Få *Éx›‰G `ð>gèšvóæÍnSv•™Öç4´î(ZÝŽç˜-Ûùóç»÷³~ýzwíÏþËñ„–û´iÓ¶sõë×/ìgÏç•Èw>ѲˆV &Iö÷7ÖóTªårýи«ÖßiÌîÁ¬·â¡>¢¨=ðÓúakû)CšÎC¡çž ãîñÌ/åÊ:,µ¹ DÔŒŽWk›E}")ÊòåËíã?v¯ïý¿úb½{÷víP"Œ&È|Jªž;Ùk×’1O•Œù»L𻋥\2iÍ•0‡lE $òZqqQÝ}eÝ…qs›1ce]‡icN½¿ÐTè8PÐ_è æøñãÝouBpÔ€ 0Àu$5¡Žf,"Ðy(^ªåX©c+Çü÷©C A5Ü^#¬Î44 ©Au,RµÓ_<åšìrÓÀ•¼÷Þ{ng¥ÖV‡PcºÀÑ® –6l˜û»+V¸ì ° ’¡ÀY\\h=z´âL°¢¿BiP@Ú€ÀÐÀ—À”±ø²Ë.síˆî×…¶n :pä¿ öKæëøýò—¿t&O<ñD½mH Úi)8ñˆv¢ÒàÁìÙ³Sö9z'¨=pïã"Ñ v© Ú§ŠµÜ¼,Š¡ƒËê?iÀ%‘2NVY$ZÇ‚~'S)_ê‡w ·[úB©ì ‰PùüdàÀnÜFAé^›&I¤…iš,K2û.ZÜkß-ÙmFµ)SZDªlZh®q¥P^x¡»ÆÔ‚Ve߈Õ;ï¼ã®»”1ÇŸiEFíÆS´.Ä»~õÜvÛm6räH—DŸm"&Nœh§žzêö¹u9jÔ(÷“h½ÓX›@‡ÖE]s«.zkf½¦ÖCÝ}÷ÝÛÿßíÿî»ïF̧MÎõ:ÊtÒ¿÷™ù׿œqÆ6nÜ8à´n=¦DÊVeб*èÌ㯱¿Üt¦1}´ÖH‚á>«x>¯x¾ó±ŠVÑÊAß³d}ƒœ§R)ꇾ£wÒøŸÎíd'(½‹3NQÛpË-·ìð÷JT¢ó›_Ðq÷xæ—ri–Ö+ M¡^ ¤Úeµ]ú•‚Ǽ@Hœé>oÝx"Läù][–mëÖ’]Ÿ2iî.™åÏ9$ž:Áü²@òòbÛs϶|ù†ºÎë*Û¸±:'Þ×þð×ÉÓŽO>ù¤¼ÓnjØý¼ì‰ ~ G Ø[¸paàcP žv$ÐÎÚÕJv⌅Ò2{W=úéyÅËF(ÚuÍß¡ˆÔAˆgG“X%R®É*7²;*c¤ŸjpNé¿Ï>ûl7 ¤‹íj§Û '¯œ5Û³gëºç*äÄlÆŒî÷c=æ&¥ý´{o¸ ÕÃ?Ü „©O¡‹bí¦ºéB,4"¡…É~¿SN9Å.½ôR7X¦,µÃ‘ÚZ œ© í¥ç åíZéµá© AÊ_|ѵóÚKA Ð&©x\$êëTTT¤¬Ü´3¨èóó÷«´«[¸˜t”a²ëX<ßÉL«WÙV?¼Ý»S9è €¾P$‘v¬ÔNŠZ¢]¹Cil@‹M4† ±o D iRÙwi¨ï–ì6#‘2Jfù$£çMˆj¬PÞ.屌»dB»¤,2½ß–Œëúmé­ôـƣŸ`¢Ì3'Ÿ|²[§¡Ì¿úÕ¯ÜbH]év- ÖšõõuÝ¥ë&ïºDµøóÌ3Ït×\º^Pf/e¦QЙ6Bò6hÖz)°F×:Zì­ÇÞyçnFàÖ;F-†Öëþô§?uKº¦Ñ±(HS;Z+Ó´iS÷·ÊT¡q]Çø7¨Öº ]W3Æ-œõ²w((Rï_Y”Á+J™wåϤ5*eÐÔbÔ ©u.ÊÄé]ÛzÞ|óM÷[ÁGZ+äÏ6¨k_I4“Ö¸hA¬> ½?­åQp¥þ½ï¾û&üü¡޼c×g¡Í¿U"½¦H+Ëž ëóրƴT±dºÑg¥@¬Ã;Ì?j|GõO›‰k¬JÿöÄZ·ƒS¢e«…Î=ôû>(SßC}Wý³AŽGßeÑ9@Ç:Þî³ òyÅóU´²ˆV:–d}ƒœ§R)_ê‡?;¥sŠÎS:Ç);šçÜsÏu›'xî½÷^dåmâtÜ=žù¥\Z‡¥¶U펂Ֆ轪«Û|ª6Xó!Þ¦ú·wÜq Þ©^û•Êç϶ukÉ®O™4w—Ìr‰çO`þÙŠ@HÀ§mÛ2«¬lfóçWÕu(WÛÖ­µYý~”½¸¸Ø ¨ã­ÆUŠôW:éІ>Ò"3oØË²ëंêÔ©P_ƒBjŒµÃTC‘xǨ]VôžBõêÕË $z¼¿ñ"C<åšìrÓÀ[$ÚåP5Ú©N»l©©áªzMÿ¢B/³¤:@º]MþÝî¼rÖØ_§NœL ÃyçxMNø'nü}ˆÐzñÆDí…w„7‘ã Ä…“Œ×ñS¿G»)²&–4)©‹÷ 6¸]¥Âõ-¾Ðj’úK-íP¬±´sæ[o½å&ªúôéc>ú¨›42aÂëÑ£‡;uõÕW»¹ŽD3!»ë£EYZL¨lgZdòßÿþ×-”VFDÑæÊÚ€HÁ/Z`ªkñaƹût}¥ì4ß|ó½ÿþû6tèPw»®Á´hÕ¿({È!n!³‚´. „Tm ­LŠ^F-8×õ—®w´øÒˆ£µ º¦Óó{™s¼1)ëÇÃ?¼=뇎MÂeÊQðš®Õ)ð餓Nrǧñ½Ïx6ØÖñêXtݦÀDfwµND¯i=I$eee.XNï]ÇëeÊѺÒ‰6‰Ò&éÞu–þ_‹X{÷îmýúõK¨^(ã6ÖögÑ"[ÕÊ&ÎW_}ÓkêÚR?Z8¯ ¨³Î:+¦HôªëV‚"õœªKj'½Ï+Öºä˜-[/ˆWAf^–AmîÏBäx´y»ÆC•õ0ÈFê±~^A¿óAD+‹hå 1¤d|ƒž§R)_ê‡?¥þ­2Šýõ×õn×ZeýxtÞT dÐŒmáÄ3¿”Kë°4&¡uàÿú׿lÊ”)ní»æ?öÚk/×—U[ ë e n×®kßÚ¶m[/P5™‚Ì &òüÙöÜþ¾k®RW.éªÌß![ „(,,¨»àoi;VÔ]¯²Å‹×åÄûÚsÏ=ÝNHêªqÒ`…§²²Ò5JúQ§0”—:=ÒŽ!áh@B;1iÐD;5xƒ(hÐyÐN¿ŽK×k®¹&¦Ýã½à×Xâ)×d—›:û ]äx’Ú Ð»`SL?áxQ:6:y/è³kײ¤\ÐR«K—.n1MLhÐ(ʦ£ ]«}Ñ@ž&z¼ ê ƒâeNNÕë„Ò ‰Lôžõï§Ÿ~ÚÝ®-³™ú |˜6mš=Úõ%4A¤ÿׯÉ~\cðv)îÛ·¯Ý}÷ÝîGý íXª]X¼KVY$RÇâùNfb½Ê¦úá „& €¾P¢}¿ÿ»¶þ¿y7ÝtÓöì š€ðvaÔDŽÆzR} ­ï–î6#´Œ»|òýÐ.gN¿-Ñëúmé­ôÙ€ôÑÂyo¡•hq‰vuövšO%e[ÒÂbíNï-ÎÕĉÝnð^xaV•·úÊÆ¢E8ÉìOf³DêA®iŒú¡ YõšÊz¦àb-vô‚ =Z<ªó´²tyA¢à/ç)ØP™6¼@Hïú8ܵŒh‘¥çÛo¿u¿C׃èyµp44Ó„®DYC)èHFZîy×èá!言Ge·Ôã:.=V Æu[¼´hWk/žxâ‰zT‰ÎQi­†ë:Ë „|çwܺý¿®4vå-XÕg¦ûük<™|ÉA IDAT©þ&Qæ›yŒRQ#½f23hѽ?Rhûì³}òÉ'öÞ{ï¹L.AëvºÊVÙrDkŽüÁmñÒz%e :¾ëçô;D2Ê"ßß ç©TÊ—úáãÓ™õ @îò®Ÿzê)·qF:2øÆ3¿”kë°ÔgU ¤úºZw­l#FŒp÷é˜Õ'ÓFÊü«Ä.j³ Æj<¬¹Jm¹¤ówÈVB””Ù÷¾×¶®lQwq½Âªª6gý{:þøã] ¤­3Î8c{çOÚi‘˜v¥ÛÅKìñ3´ã~¨>øÀý¾üòËÃ$‡ÓÐóéµ+”Ž3–@Hï‚À[ ×â)×d—›·óÌÔ©SíÔSO­wŸ²<ª|ÔIQ0æ€ìç?ÿyØÝK4ð¦A¢üàÁ[ÏÿÊy½í´S{N´K–ÎíZ`ëÀ˜1c\мÚ(íüsÜqǹEfjŸµûl¸¶ÉË(ìç]Ìû3Çû:Ah•Ú;MkPAƒbjŸÉJ†Ê&ZôÿøãÛ¦M›ÜÖ»ï¾k§œrJÂKÕñ1nÜ8·ðK»/^¼Øí\¬>N"õ!hYÄR‰Ô±x¾“éËõÃ;eÚ#€ìï ÅrnÔ¦MÊ(àŸ4е¸Ælü×ò¢¬,¢ÅxšÐKw_#Zß-ѾH¢e´|ÒÕ~i!&iµX*´/­,é·¥¶ß–èuý¶ôÖúl@úèû¹`Á‚ÚÌïÿû6räÈ„< ´Jö¢¤;î¸ÃýþÙÏ~–ö2KäýŒ?Þ®ºê*7?”ÌÅû%Ÿm>—IcÔ}ǵpI‰„àQ†®PÞÐáîóÖh¬Y³f‡ût›®S”QR×Þ Z”íñÖM(ÀÏÛèHt !¡ëD´ Z”RÙ÷ü¼u"þ@oóèH€ëzÑË”£…âÊ8íõ5â¥ë2m2­÷TUUåÊM×eZ³£ $BÅCAC >Õ"v—¨ß¦E¶ºM›Dh¼D‹Ûo¸á÷÷ú·h<+õÂË(¤…è骋©xÍHÙý¡@Èpc:±Ôít½Oñ¾øâ‹vÅW¸Ïüâ‹/v›ñöIô=ô6¡OÅûú"e‘ŒïoÐóT*åKýІ>’ ×Yß9çœc·ß~»[¬ B ™jñÌ/åÚ:,m@ qµ¹Þy]ð¢@Hݧ¶Ìkg“ÑÇÍ7©˜3Jtž*×\E+—L˜»æï­ ) aÍ›—Ø>ût²]wmkM›fwì°Òƒ«c¨ƒ?üpûí‡v˜û­]áBÏ4X:}út×QÐ.g~^$ÿÌ™3wx--VÿĹv˜ó¨…ÓÐózè¡î·v|X»ví÷WWWoM¯c£N‚u_cˆ§\“]nJ£­Eƒ”õ:þMfêõ”5A[ºˆRÖI]À…þxœº Ðÿ{™*=Ë—k€n•˶È|šÈ•Ûn»ÍíøjóæÍõ&è4 ¦Ý‰4àpýõ׻ݷ´K‘&ôÿ¡¼ÉŠÏ>ûl‡û4Y¨6Z“;z¿ ¯£Œ?Ú3t‘^$jëð¯EHÚ-J¯­‹Ó­¡² ú¾´ÑA(oBôÉx\´ãM}f€5j”M™2Å ¸èÿý™*â-ÇXË"–rH¤ŽýNF¢ZØÚÿ r{>Ôñåè¿ô…’ÝŠåÜøé§ŸºcðcüùÏv;µîºë®6hРí·{Çῦ׹ïÞ{﫯TC}·XÛŒ ý¶ e´|ÒÕ~yÙî¿ÿ~·PÊ_—N:é$÷ïpQå[¿-}ßhåèuý¶ôöÛè³é³dÉ÷ûÚk¯ußSeQ{«$Z`äe2‰—úN%%%.KT2Ycݺuss!é”èû9à€\Ÿò‚ .Èúº“ŠÏ6ßˤ1ꇎWêª_­Ì ‰í$\°p÷éÚWý ©èܦ@] >ûì³;\{ç=eäS€Ž‰þêW¿²—^zÉõK"-×ÔZ,îÿÑùL 9•ÁÒãeðñ"ºæ½žw-/­åÐûÖ¦Öz¢ ž 6¸Å¯ÊÂ:¸ì…êéúJÞzë-·&HkPÔ–èvݯõ&Ê© VÝž ÞFV±Œu$û5ÓÁ \·n]\u;]e«ï‰‚&¼ŒyjÛµðÚ¿>¬±„û¼‚~çƒHFY$ãûô<•JùR?¼s<µH†æÍ›ÛرcÝ¿5^¦óQh²õµÂm¯xæ—’µK‚ÌÛ™o ò¼šTfºÿüç?.3sii©xàî>µ œ0a‚k—•ÙÝ[§ïëÅ#ÕÏŸjÉZ»–ÌyªLXs%霿˄¹;aþÙŠŒ@Œ:t¨°¶mËlîÜ56o^U]¶6ëÞƒ:»š°ÖÂ1í4¥Ý=dÈ!.ßßóÎ;ÏuÕÀ*PO“]ê,þú׿Þa0»ÿþn÷…x ®\æº] ÔÙÖN@Õ®UJ‹¬¤Ôˆ¿ýöÛ.XOáéz> bè95é®@ùä°Á©”èûQÛÑИÙ$Ÿm¾—IcÕeVUU©zsȨ›nºÉ-׿ÌZLëeõRPÆRüm¸2¾ˆÆî¾ûn÷£ûÕÑú‰Ð@­™3gŽýõ¯)뇷Á´6Y Gc?Zs¢×Ñ5–²-*ÓŒÖØ$B× ºÓqêßO?ý´»]‹{¡kAS©Ü䨱›#F¸ûtÌZ“¢,*;ï¼³Û¸û—¿ü%Aqò2÷ùשÛé¤5JÚ˜]×þwÑ8‚ôÿ™–­%èw¾1Ê"ÑïoÐóõ#ñúá-Ê÷Îù(¿«/ ,„:霮¹ë)àêƒ>Hê¹'žù¥d­Ã’ sz±Î·}^õu•SÙó4¯ `HæI|ðAW.Ên¼2ž¹É Rýü©–Œµk©˜§jÌ5W’îù»L˜»æï­„(**´ž=[[§NÍmæÌ•uÜ ™ùÅ®»Ø×Ÿ55øJ³¬@8Ì•——»Û•ºýÑGu ¾7¹¦Ý?Õ)Ðw¸@B †èy„¨ÅiZ`æ¥lÖ ª:šV§SÇ¢€=ë]rÉ%awÆjèùD¬•´ƒšvúð¨#«ÝîÔÑ÷Ó ’!Õ‰ð¿7Ïîz‘éö åšŠrÓŽXZ¸ rÓ¢AQ¦:ZzÎX.À*³%KÖ]@}î. ºwïΉ2€Î×jCÔæDòâ‹/ºvJ“tcÆŒ©7@¦‹so7-µ½jc´;ä©§žºýïtq®Åqúѹv“õh M;I¾üòËn"BíÐW\±ý~]×ßoˆ® t>TßbñâÅ®ÿ¡óg¤´Ø[×u:?Æ`äÿ¼…ê¡t}§Œ9ZÇ¡k-B¿ð Ý"ôx7ƒÃ?Üõ“´¹µÎû NÔuh"ÙîDã# Q°Îç¢ñ+Q ¤îÓXŽÆtDï'_xk¥ÔV¡>­ú¢þ+¼ÿþûÛÇã©Û‰S¼4ö¤µaÚ˜A™ô4Nå_G”îãIÆw>e­ýþ=O¥K.×ïŸi²ÛE]äúW×\s;/é¼éËk¬]í…æüóWÑÆÝ#ýM¼óKÉX‡%Açôb]‡ôyø®9‡üøÇ?®wŸ²kÜB´AZ¢¯Ï|GªŸ?Öç‰÷¹“±v-™óTAêS¤÷œèÜ×Hçü]"swɬÌß![ Ä¡´´I]ƒÛ¾îD¿©®ó¸ÒÖ­Û’Qǧ] 4n÷£6mÚØøñã]ÃëßÕW«L‹úÑnpš¼Ö$b¸IhGFmM>jÀAº&ô¼çSƒ¬a Hh¢ÎŒÐÎüáž·¡çóauðõ£FV ÝÔ)Ðß… úTvHuÔ™PÇØ?Ф<xZ`§üÇÕÐs-×T”›sÖYgÙgœážS4d:“.è (MúϺÿÖÖ+c@ãR»¤6§¡ µ9ºðÖÚT-„ÑB] úÛí®=]¸‡Ò Ÿ¨ûû¢vH€Zˆ£¢´;ÿB^»ibP+Þ\×Ñ€‡i°N› ¡AA=>ÈN»Z\¥vÖ¿ÃXC·‹é¾ÐÝu*› ïëæ›ovEh0SUz®–-[&õq oCï?Yå¦ÌÚ÷ß¿ÛøA(ü´ó—ß´k—‚½— åkYD«Ó±Ö±†Þ{¬ßÉHôÝQS×þï~ÐÛs½~ˆ6ú¯@þöƒRÙеÍйK £µÀNc5ZPŽÆ´ £veÕΊšDñŽeÖ¬Yúñô]"õÝbm3é·ÅRFAË'eé1:f-ŽÒd‘Þ‡&ˆ¼º¥…S±ŒË4f»´,‚¶Ë©êûÆúŒv]@¿­që}6 s¨Ÿ£ÅQj˜èÑ¿Õ/Ђªêêêzm³Ý{ßuõ—´‘Ç¿ LßùЬsšÏÑ o.Cíå¨Q£ÜŸÎ‰ÚSvÛm·z÷éœrþùç»Eš óŸo´óvC¢=6–÷£+è’i¾æ7¿ù;^£5·%ÞßqÄ®¿à¿MÓ¦¦ê‡j1~Cå ê£);¶ÚÕp´‹¸•hYh±¸æÔÎ;ï<×— ¥y0=2)hÞ/e-Pv+-ògûVVÕ?ÿâ£TËõú¡>ŽWÿý﻾u,AÎã¢ëï¼¢óé½÷Þö<« CÏ´ù‚®-tM­Í¥Ã œvÚiîü«ì òÓu™ŸÖuø7ËÒµ†þ_×qºÏ?ž£¾Ž²žèuÔ~)[Ž®_d¨ÿׂZ´*À\ Ç•í-]©­ÐqêT¯©í òÜÚ€B O•-Eë8t}æ-îUðÖ¥L˜0Á¦OŸî® µð6‘׋GªŸ?oƒ€Ï>ûÌõb¥Í T¯ž|òÉíë­t­ªë~•·S u;‘c B×ÞÊøçç­ñ ý~¤ãxbé[ùΩS±–E´ròý 'èy*mN£¿ÕX”l(ÈíùP?Ä „ôg7€dÐø˜ÖïŠŵ¹“Σk·N;Ú¸{CÏüR¢ë°$Þ9½hó-ñ<¯‚àTÎwô2Yz4.¢M¨ÔV„›ÿ úzAç;RýüAž'ÞçNÖÚµdÍS©O‘Þs¢sw’îù»Dæî’U'˜¿C6#H@«V¥6`@'[´hÍšµª®Óµ-#Ž+Úb¿h÷«#­ŸX©£ÚÙô7Ô={ö¬w[´œ†žïÿʾUÔL†jÐõ>40¢Á¶mÛÖ;®x¨Cé±Ñž3H¹¦¢ÜÔy‰wpYï;\d‹MíÕWÿשñïh|AvËÔNàþÝúý4€ÖPp]CmOee¥û ¥Í ”¯ eoVûäu´€.ν¾b¥]Ùµ‹QhöäXD z‹t{´¾A¸²‰ç}©}×¥ ‚>.ÒgÙÐûOF¹iðXü ÝB?Sñï0´ƒ”E¤rRÇ¢•Y´ïdƒøMš$åö\®â ÊÑ襲/­Íðηýû÷¹/ú· M|ÄsnŽÔw ×w‹µÍˆ·ß´Œ‚–O²Ë(Úc4i纑H¦öÛ‚”EÐv9Õ}߆¾ƒ±^Ðok¼úAŸ ÈZ<«Wâokµ@C «Î<óL·øH eµ ùºë®s‹<öÛo?·ÐJ V´ÿp»D_zé¥nѪæüÖˆñkð+à@ýŽ[o½Õý[ÙS´ãçRИ›„öÇþøÇ?º´CA™:WjgmýDí±AÞvÓÿüóÏ]–=½'mÒ*ÜyQ L–ÝÎSQ:³Îç bôÏqiêúk¡»ú‡ÒB>m«ãPP¥ÚŸ—^zÉe W`¡‚;‚ô­‘õC ˽ï’á˜cŽq}e+#šÚm- Wp®Éüç:c¬§óS(-U ‘Úú z~µZ «sž2†ha§²æþýïw 6½Ç¨ß  ´´uîܹÛ3úꔉCmʲãõ-t¾|ûí·ÝæKzn/¨F“Z¤z衇Ö;ž†¨Ï¤ãWG´ 4œ Ï}ì±Çº6Q‹RUvþ±e=Ñ5·úL Î7^Ï{ "Õω²Èèº_(X@¥üÚ‡»f~á…Üuö 'œàª”)Ym¹ê¥?è HÝN䘂ôÛ•%zèС®^h¬B‡(OzýtO,‚~çc­SAÊ"–rˆõûNÐóT8ʸsõÕWoÿ^{¹AnÏ—ú!ÚÐCô~ U4·Ë91–9†dÍ/%ºKâ™·‰e¾%Þ¹Bõm#Íõ5´+ž× 2ß‘êçú<‰ï6ÿ} ÔV6£D®×ѹH@ h÷ÿ­¼ÖÔÔ¸þ‡îSñ}ðÁ.¯Y*HÛîR ·²5êÜ(Õ¯—ësŸÌßåæïÍ„’õejRXwòom:UØÌ™«lÅŠ JÐ`¤v]Ó€ÉÕ±c…½òÊsn@wĈ M"j·r-f 2§ ]ddzs¬FÉgœ‘qå‘ÈûÊeš`?~¼›DÖ¤–~<ÚùM;í*Û„·ë]c—c&×1êÇÿèœCÿÒÓw£C»©]¦ÏFýˆÖo£Ï4žûï¿ßíb½hÑ"·@VAúž>þøãõþ.Ò®Ý eY J‹ŠýÁ]¢ý ðò²Rz4)á!½E2Ê|Kðc²J‹ŽµQg ·håàe7ñ|ûí·îwèîâZˆ¢…Ë¡Ÿî²øå/é!Ÿxâ‰zÁ 8ePlˆù䢋.Úá>Gja¶æÿÒ™õÃûN…ËP 7©MUæ/@E”ùPëᲯè:N™ÁÂe˜Púö Àñ («—²*ê¼£óŸw¿Î5Z°-Êb«~ˆºo¸á†zÏ«ìk ~RÖ4m€í_l©~‹^W? J›7ožËz«p§ÿøï»ï>w.÷Æ7~ö³Ÿ¹ üÇí¹æšk\`œw­¢vgÉ’%î\ª"‚P6=>RûÏs«]ÐBa¾‡fôQ[© -Tvá>¯ ¯×P½PÆÝç}žÉzþ ¯éQ–eŸVýVö@e‹%›Š¨ªïùõ×_»¶ß¿ñGè¸W,u;–cŠ÷}†ºùæ›]ö<û*·^#Ò¦ñOC÷yñ|çƒÔ©XË"ÖºíûÛÐ{zž ¥zµbÅŠÎÕAoÏõú!^v"ÖÈWñ®Ãòú6ñÌÛD›oI÷|Pª_/×ç>™¿Ë?Ìß!› $YYY±í¾{{[¹r£Í˜±Ò6l¨¦Põó(³—^z‰‚H²¢¢ëÕ«µÛñKMÑvŸ ”&a4YTÿþýÝO<´¸H¼Ey™$‘÷•ë>ø`›4i’ÛmjÁ‚n"L‹²4ãŸÀË„rÌä:Fýøú¯“{¼÷Þ{õ²)årß-žv9›Ë±·ËôÙ¨Ñúmôـƣã=ZÜpÜqǹl8Ú:Bhñ©Ç._¾ÜPÊ–-[âêo„jß¾½û­…¼~Ê"á¦_vÙe.ëŠÎ%ÚÉZ‹5Ž9昘'òØPZ,ë‚ûxËÁËr¢(z=ÏäÉ“Ýïи ’Q§œrŠ«S:¾ªª*÷Þ¬ñüÃ(@£!Z¤-=ö˜Ë2ê§Eä’΀½|¨;wv¿ ¢]ü›Èù5tŽ w.wGºOÏz膙9s¦û­ÀpÔŽøÏmá´k×Îý4ä?ø;×) I×%Ðq7”¡Ø`xÓM7¹€/Ób¬tüÊn¦kµáÄûÜ*ÇHI fŠ$ž×k¨^„¾V²ž?Èk†Òæ‘6ðhˆÚöXÇ-¢ÕíX)‘÷zìAú€ñOC÷Çú>âùέSAÊ¢¡ºË÷7–2‹õ<Ž‚)“q{.×ñ!ýÙ} ŸÄ»Kâ·‰6ß’îù T¿^®¯íbþ.?1‡lE $"••ÍlàÀ.uB•Íž½Újj¶Q($–´L÷î­ê:2ÿ›Ø¥CˆW<‹„Á@IvÓ¢.oaW¦¢Že~ýТú¯‹& † Bß-‡ËÙÑo£Ï–ùõƒ>ÐxÞ~ûm·p[Êji~E RµèA•6nÜèÓªU+«©©Ù~2x xCŸÏ Úð"ý ä²>ÜÞzë-—ù®OŸ>öè£ÚàÁƒ|½D›J‘2_pÁvÏ=÷¸ì.ÊÂyÐAÙ›o¾é6Õ{Q–¨x%£,T'Ž=öX·p\Çtæ™gº ÿüó.¸eàÀvÄG$<¶¡ }Ëõ¼A2å|òÉ'öÚk¯ÙÝwßm={ö ôºO>ù¤Û @í¢—a8YÏT¿^ºßrÿ;ߘu*Ú÷™Ñ&L›6;øâ —1ÔŸ%ò ë°Í¨O‹ù;d!ª;ß[×®-¬C‡ ›5k•-Z´.i“¿@c)-mâê5È 抔ÊOÙXÆŽk‡rˆ ´ò²Í)`Mžµ "•`'óæÍ {¿Í(àK‹aGmùË_ìè£vÿß±cÇŸ;‘Ǧ›2+Jß¾}Ý¢pý¨ìúÓŸÚŸþô§ˆr±JFY(øQ.žxs IDATýë_Ý¿Ÿ~úiwû/~ñ‹¨U–®9sæ¸Çî»ï¾Qæ¹^?¼àSï;é¢LãÇ·«®ºÊÆç~<ÚpAYx¯»îº˜ú)Ñ 6Ìn½õVœ$R‹=|sâ‰'~͇~Øý>ãŒ3’þÜñHõë¥ûý ÷¿óY§¢}‘m‚ÎéÚÈgĈ „Ò ¸¸ÐvÞ¹uîÜÜfÌXi«Wo¢Pµúô©´Â €<ô·¿ýÍýVFÈwÞ9êß{‘ʤ’ ^À™´‰4üqÛ´i“=÷Üsöî»ïÚ)§œÓk4ôØd¿ŸxiA°²"Nž<Ù/^lË—/·^½z¹ì)É”HY~øáÖ©S'{çw\°à믿î²(Æ’­RY_>úè#—Ù%S!s½~xß©L ê>ø`›4i’Ëøµ`Á[»v­uíÚÕÄDË"D‹-lÉ’%׿÷xÀýŽ”á$‘çŽGª_/Ýï'QÚÈã½÷Þs›0 3¿óY§¢}‘mÂW\á‚ÛÉPä‡BŠHŸŠŠÛk¯ŽÖ¯_;—UÈ6­[7³¶mË(òÔ–-[Üï3fl¿mëÖ­vï½÷†ýûöíۻߟ}öYR^=öpÁfË–-³Í›7×»oêÔ©;ü½øÕºuëŸ7ÖÇ&ûýÄKeþÕW_Ù¨Q£lÊ”).ÐMÿ¿hÑ¢°¯ì矾[HM²Ê¢¨¨È~þóŸ»cUH}^§Ÿ~zLïï´ÓNs¿o»í6[µjÕ÷ë¹¼ºÍ-·Üb7ÜpƒUWWÇ}{®×ñ!EçQtÐA.€;™AþöI?é¢÷CUæR2dˆ«oÈÍï<ßßܯº>$ÈDb ]»rkÓ¦ÌæÍ[csçVÙÖ­Û(d< )$È_ÇsŒ}ùå—vñÅ»l}Í›7·§žzÊ~iajmmm½¿?à€ì™gž±áÇ»¬€kÖ¬±Ë.»Ì4ÆC™œ”-P˜sçε¾}ûºÛ·Ï>ûØÐ¡CíØcµvíÚ¹,"ùË_Ü¢YG$A›ì÷¯‡zÈ;ì0»ñÆw¸¯G.€ðÄOÜ~ÛèÑ£máÂ…v衇ֻ=Õeqæ™gºcùÏþãþ_‘±øÑ~äêÚ+¯¼b{íµ—  T6IeƒQ`ßßÿþwûóŸÿÜà{‘o¾ùÆ®¾új÷ï#<Ò øö|¨2{öl÷[ï€LD $ÐH ¬{÷VÖ±c…}÷Ý*[²d=…‚ŒÖ©S…•—SäeÎÐfˆ………1ýýõ×_ï²1>òÈ#vÕUW¹Ç)øKÁiÊÔš‰ãÜsϵþóŸöòË/ÛŸþô'kÚ´©]qÅî>/£G¸ì^¶¦p÷)ÀK¯¾úª]rÉ%î6=ïoû[—ùpâĉõþö®»î²ŠŠŠˆï)Ècã}?žpO9(Ø®¦¦ÆFŽéÊ}ãÆ¶zõjûàƒlܸqvê©§ÚÎ;ï¼=OEÁ„ IVYxúõëçêÇG}dûï¿Ø×ôþ_|ñEûýïocÇŽµ1cÆl¿]Á° R<ðÀ£Ö׎;º¿ß¶m›í´ÓNqÝžõC ß}÷ ,îÞ½;'F@F*¨û©ÕdÕ¨Q£(  UUm¶3Vºß@¦iÒ¤ÐöÛ¯‹Qäe»SÇÒÒÒ@[·nË §`1ŽÉ¦M›\P–¸å·råJ—‘PÙèücz½v“&;îá«à­pA^ 4SfÃÁƒ»À.?ü)HRÇ¢×jÙ²eÌï)Ècãy? ýMr˜:uªËÚwôÑGÛøñãwø{ã)[âwÞé2w*QeõÃþÐÞ|óÍ´•…çì³Ï¶‡~Ø^pÁ1—‰Ÿ‚oçÍ›g­[·vz±îŠUÇCëeÐÛsµ~ÈM7Ýd×]w nö)ÿHFH C´hÑÔöÙ§“-^¼ÎeˆÜ²e+…‚ŒÑ£G+‚ ÈAÊXwõïß¿Þm SVVVºŸpÏI³fÍÂÞþƒüÀ:wîìü,X`]ºtÙ~ŸÂvÙe—¸ÞSÇÆó~ú› å0sæL÷[ÁzáTUU¹ß^f?¹•——Ûƒ>˜Ö²ðŽEÙ TxÊ)§*¿víÚ¹ŸxD : z{®ÖyöÙgÝoeŠ S d˜Ž+¬]»r›3gµÍŸ_eÛ¶ÕR(hTeeÅÖ¥K d¤]zé¥6|øp{þùçí’K.É«÷¿ß~ûYYY™½úê«vÎ9çØ!C¬wïÞ¶téRW j8p qÄöÉ'ŸØk¯½fwß}·õìÙ3íÇúä“OÚúõëíØcµ6mÚPy3¬~È´iÓì‹/¾pC÷Øc ±„2PQQõêÕÚ:wnn3g®²eËÖS(h4}úTZAå2ǰaÃìÖ[oµW^y%ï!»uëfãÇ·«®ºÊÆç~<ÊÊyñÅÛu×]ç²}–””¸À·O<±QŽõá‡v¿Ï8ã *mÖÑw¨°°ÐFŒAá2šB[j¯¿þz5j¥d¨Õ«7Ù·ß®°õë«) ¤U›6Íl÷Ý;P ãlݺÕýV†È|¥, ,°µk×Z×®]]\qqqÆßÔ©SÝï½÷Þ› ›¡õ£¶¶Öª««]Ð,™Jñd„²@«V¥¶ï¾mÑ¢u6kÖ*«®ÞF¡ å ¬OŸ6ÈHùéiß¾½ûÉT@f~ý((( „²„& :wnníÛ—ÛìÙ«mÁ‚*«­¥\:]º4·fÍh&и ) »4iRh}úTÚÀ]¬²²‚”())²îÝ[QhtBYª¬¬ØöØ£ƒí¾{²ö!ézölí‚n€ÆFôåÚ´if••]mþü56gΫ©ÙF¡ !%Ö±c9€Œ@º/ ˜uëÖÒ êb:UÔý…‚¸õíÛ†:€ŒA $CJJŠl—]ÚÚ€¬e˦kß¾œº€ŒB $ƒ**Jlï½;Ùn»µ³ÒÒ&bk ¬W¯Ö2 R@Sv¿¶mËlîÜ56o^•mݺBAD;íÔ’ÀYd2B¹þ%/,°=ZÙ A]\`$NÓ¦EÖ­[ ‡@H O(Ðm·ÝÚÙÞ{w´æÍK(ÔÓ»w¥Ñ$ óõä™–-KmÀ€Îö½ïµµ’’" ®N-™ª Eä§Ž+¬]»r›3gµÍŸ_eÛ¶ÕR(yªOŸÖ2!¤LXÔÞ^ôǶmQ]]T 7<Ü55¾gÒVRàí …B 6GOOCœ<9çÎ]Œùyu©4ÙС¡… y&BW•M„Ì&C80OФ²ŒŒ´GUUA!Hž $ðžkbÏž¾Øµ«'Ÿ"HùË‚­]] @Y()°Yp®££>Μ™‰S§¦cvv^QÊP¡1:Ú¡” !¥7ŒªBlÚÔ‡ E__“‚”¡æ|Ê'” AH`ÙjjбsgWLLôGkk‚”‰êêblÞܦ”AH`Åš›kcß¾¾ëŽÚÚ¢‚$. AfaH('%%®UOOctvÖÇéÓ315u!æææ%1510Ф”!UQ,Vå''ó`$iíˆB¡ ”AH`UÕÕ•bl¬;öí릦I@WWC´·×)eI¸.Z[kcb¢?vìèŠšš¢‚¬W“¯*ÄÈH»BP¶JJ\/…B!úû›¢»»!NšŽ³g/ÆÜܼ¬¡¡¡–¨¯¯VÊ–‰ÀuW*UÅÈHG<8õ ²F²IœÃÃm @Y„ÖL}})ÆÇ{c÷îÞhh0¥ðzÛºµ=ŠÅ‚BPÖ!5×ÑQŸO‡íȧE²úš›k¢¯¯I!({%%ÖC¡14Ô½½qâÄtüõ¯¯Äüü¼Â¬’,d •À(6`]UWcûöÎ8p` ÚÛëdôô4FkkBP!$46VÇž=½±kWOÔÕV»RÅbUŒŒ˜ @å6’ÒÕÕõqæÌLœ:5³³óв 7ÜеµE… b˜ ¤×˜ª ±iSk:4}}M ²DÙ$ͬnPI!dÕÔcçήؿ¿?ZZjäغµ=‘@%„’—… ³0ä7vGmmIA®¢µµ6zz€Š#Q”ÞÞÆèꪩ©™8}úBÌÍÍ+Ê[ …BlÛÖ©T$!²R,VÅ–-m199ÝÝ ò–¾¾ÆhjªQ*’ $P–êêJqÓM=±wo߆–JY0´Ý @Å„ÊZ[[]LLôÇöíQ]½ñZÚðpkÔÔT¬’å®P(ÄÀ@sôô4ÆÉ“ÓqöìLÌÏWþºëëK14Ôê ¢™ TŒR©*FG;âàÁÁè訯øõŽŽvF¡à{ ² B§¡¡:vïîññžü~%Ê‚žõ¾l*^I €JÕÙÙ qöìLœ<9W®ÌUĺ²)ÙäKØL„*Zj‰C‡†b` ù­Ê~Mƒƒ-;éþ› $°!TWWÅöí11Ñmmue½ŽÍ›Û|¡l‚À†ÒÔT{÷öÅM7õD]]©ì>ÿ–-íQ*iÝl%%6¢îî†èì¬Ó§gbjêBÌÎÎ%ÿ™«£¿¿É—À†b¬°q`U!†‡[crr z{“ÿ¼Û¶uF¡PðŰ¡B^mm)n¼±;öïï––Ú$?cwwc´µÕù²Øp!þ- AfaÈ;»¢¦¦˜N£®*ÄÈH»/€ ©¤ÿ©¯¯)ŸÀxêÔtœ93ssóëúy††Z¢®N»`c2à*ŠÅBlÝÚDWWú}Žl2åðp›/€ Kà=Ô×WÇ®]=±gOo46V¯ùûgaÌ,” • $À´·×DZ}{gTW×ä=[Zj£¯¯IñØÐJJ°4…B!𣧧1NžœŽ³ggb~þú½ßèh‡¢°á™ °L¥RUR=³³sW}]¡PˆÑÑ€ÿb"$Àõn´U…ؼ¹-&'ó`äÕô÷7ESSbÀ„X#µµÅëŽ}ûú¢¹¹vññR©*¶liW ¸ AH€5ÖÚZý±cGgÔÔói‘ÕÕÚ1\MI ÖGsôô4EUUA1à]B¬£bQÞK•©„’% $KH– $,AH Y‚@²!€d BÉ„’% $KH– $,AH Y‚@²!€d BÉ„’% $KH– $,AH Y‚@²!€d BÉ„’% $KH– $,AH Y‚@²!€d BÉ„’% $KH– $,AH Y‚@²!€d BÉ„’% $KH– $,AH Y‚@²!€d BÉ„’% $KH– $,AH Y‚@²!€d BÉ„’% $KH– $,AH Y‚@²!€d BÉ„’% $KH– $,AH Y‚@²!€d BÉ„’% $KH– $,AH Y‚@²!€d BÉ„’% $KH– $,AH Y‚@²!€d BÉ„’% $KH– $,AH Y‚@²!€d BÉ„’% $KH– $,AH Y‚@²!€d BÉ„’% $KH– $,AH Y‚@²!€d BÉ„’% $KH– $,AH Y‚@²!€d BÉ„’% $KH– $,AH Y‚@²!€d BÉ„’% $KH– $,AH Y‚@²!€d BÉ„’% $KH– $,AH Y‚@²!€d BÉ„’% $KH– $,AH Y‚@²!€d BÉ„’% $KH– $,AH Y‚@²!€d BÉ„’% °Ï– IDAT$KH– $,AH Y‚@²!€d BÉ„’% $KH– $,AH Y‚@²!) W®\‰“'OÆË/¿ü®¯™sçÎʼn'â7ÞxÏ×=òÈ#ñûßÿÞz sss155Ç^x!^y啲¾.*m=ÀÆV)}J¿€òVRR—ô£ÅK/½Û·o/}éKïxMöÜ}÷Ýÿüç?£P(D±XŒ»ï¾;ÆÆÆÞñÚ#GŽÄ±cÇâ+_ùŠõÀ:ʃG'žxâ?B)Ù9?99·ß~{~î—ËuQi먔>¥ß@ù„$YY(›Èó»ßý.ªªÞ{xéÃ?¯¿þz|ó›ßŒúúú¸÷Þ{ó°á¶mÛ¢ººzñuY1ÛÜþ¾÷½/¶lÙb=°Žþô§?Å/~ñ‹èì쌉‰‰hjjÊ')>óÌ3y˜0;×?ö±•ÍuQi먔>¥ß@ù«RRõï|'žzê©|3ú§?ýéw}Ýììl<÷Üs±sçÎhooºººØ¿<µµµïzÜW^y%ÿûÆÆÆÅÇš››óÛ×^{mñ±x íg?ûÙ(‹×´–lªP¶–7ß|óªÏ_¾|ùϧ¼X뼿¿?öîÝûŽ×íÞ½{ñ:ÊÎñr¹ÎS_°±¼úê«ñ /ijÏ>333ËîÓ)ô©w[Ãrú³~ •¡¤¬•éééøö·¿…B!¾þõ¯GGGÇâsGމÇ{,n¹å–¸õÖ[óÇî¸ãŽ%7›ì“É&ù,XØßÐÐßþñŒ?üáqûí·Gooï5¯%›ìöè£Æ®ú9³ÍôO?ýt¾™~||<ùõÀz]çW“ýí‚ššš²ºÎS^°1d“|ðÁ¼¼Ý®]»ò‰ÔY p)}úýïÿºõ©ÿµ†åôçíÛ·ë·PL„dÍtvvæ›ê³IDÙæöûÛßâØ±cÑÖÖúЇ–}Ülºbö·ÙÄ ìØ™çž{.ª««£¯¯/Ÿjôãÿ8†‡‡ãæ›o^•µdñ3ÙÆù…÷\påÊ•øóŸÿ¥Riqó}êë”®ó©©©ü¶»»{18XÎ×y*ë6†ûî»/õmÞ¼9îºë®¸ûî»ó~vüøñ8qâÄ’ûôzö©ÿµ†åôgý*ƒ‰¬©øÃù¦õ¿üå/ùíM7Ý”o>Ÿ››‹;ï¼3ß´¾ýèGãþûïo}ë[Q__çÎ˧úd÷ï¹çž˜ŸŸÏ7Ñ¿}2ÛµÈBCCCqæÌ™xþùçcÇŽ‹Ïe›ë/_¾cccùæûrX¤rgçöoûÛüþÞ½{Ëþ:Oi=@åËzîéÓ§ã†nˆ/ùËQUõ¯ÿçÉîÝ»cbb"ººº–Õ§×£O-e MMMËêÏú-”?AHÖö„+•âŽ;îˆ{ï½7~øá¸páB¾Ù}|||øš;88˜ÿ·àïÿ{üìg?˧eS„²Íî?ùÉOòÍòYࡇŠ_|1>õ©O­èý²ãfÇ{öÙgãÒ¥KùÔ¡7ß|3ŸD”M-«õÀz_çSSSqäÈ‘ü|¾ë®»¢X,–õužâz€ÊöÒK/å·===«Ö§×ºO-u ËíÏú-”·*%`=dï Eccãª?›öó½ï}/ߟM<Êþ}ôèÑ|Óÿ¿øÅøüç?ŸO zê©§âÕW_]Ñ{dl³|65hazQ¶?Û„¿k×®wL1J}=°ž×y|ùîw¿›ŸÛŸûÜçb`` ¬¯óT×T¶l²c&ë#×ã÷ØZô©¥®áZ~‡é·P~!Ys³³³ñÓŸþ4ß ÞÔÔÏ?ÿ|>½g5ýüç?óçÏçSزMî333ñÚk¯Å¦M›_3<<œo|ÿÇ?þ±â÷Ù»wo~ûôÓOç·ÇÏo³ÉBå¸Xë<;Ÿï¹çžxýõ×ãŸøDìܹ³¬¯óÔ×T®… ãRÂ{+ù=¶}j9kXéï0ýÊ $kîW¿úU¾©üСCqçwæ=øàƒùŸÕ055=öX~ü­[·æ]¾|ù_'üÛ¦UWWç·Y`¥²ÉAYxàĉùš²AGGG>Uiµ¬åz`­¯ó,x’…§§§ãÖ[oÉÉɲ¾ÎËa=@åjkkËoÏ;·ê¿ÇÖªO-g +ù¦ß@y„dMe›ÔùË_æ›Ö?ò‘ÄŽ;bÛ¶myhèÑG½æãgÙï¿ÿþèêêŠÛn»mññæææüöÂ… ‹-Ü_ØpŸùÍo~<ð@\¼xqIïW(ò©Cóóóùûf›åÇÇÇW­^׺Hù:¿téRÌ^ÿÁ~0n¹å–²¾Î×k= ÆÆÆòÛ_ÿú×ñÆo,>žõ¯cÇŽÅ©S§Vô{l-ûîR×°’ßaú-”¯’°–~øÃæ›Ô>555ùcŸüä'óI>?þx¾™½··7üèÑ£ñòË/ç÷6£Ÿ?>~ðƒä÷³MìøÀþãø=ôP¾Éþ«_ýj”Jÿzÿ{÷ GyÞü™Ùk:µ;õŠ@ ¢Ë ‹"° L±Á1œ€IqbËÁNìÿÓ Á±;ŽmŒC·˜ D•èB$„z?µë»óßçÝ™Ýwfg÷öÚê$¾Ÿd½»3ï”wfîöå4¿}úõë'&L%K–ÈäɓͶ/^l*iå €†tùC9${#~GŽ=öXs³~P¹Hûg_ôèË?ç¶ÑŸD"ažï¾ûî¼u3ÆTVÜ~Î÷U  ¿›ôwÕæÍ›å§?ý©L™2Å„—/_.[¶l‘Ïþó•û÷n©}èì8Œß·Àþ $ÊFo._³fLœ8QŽ:ê¨ìôúúzS9íé§Ÿ–'Ÿ|R®¸âŠl{½áݶk×.yã7Ìk ÙÁÁwß}×Ì;묳dÔ¨QyÛ¿à‚ äÞ{ï•_þò—Ùå/¾øâP›T*ež;sÓûðáÃeüøñ¦oÅn˜ßýúòϹVðRÆY±bEìúÚÚÚBïûòÏù¾ê40øGôG2oÞ½GŽUwûðsÎÏ9€/ nÛ¶Íü¾†;ó{z_þÞ-Ö‡Îþ~æ÷-°ÿÒü#!QV…nºWUUUÝZ·VÒGGjkkc§?ûì³RYY)çŸ~§¶«a­®äºn¨²Rwu·??çüœøøÒßWZ-±»¿§÷ÕïÝŽúÐÙßÏü¾öo.‡Ù°aƒ¬X±BÎ>ûl©««ëÔ²o¿ý¶´µµÉ¤I“¨ðsèß»ü~`£"$–H$äsŸûœL:µÓ˾þúëæù˜cŽá@üœzà÷.¿ŸØBi#FŒ0®8ÿüóÍó¨Q£8?ç€ø½Ëïg6‚@7qã=ÀÏ9€ßÏzË!}AHÐg„}V‡Ðµ47Ë‚yódñ‚²hþ|ÙÝÐ [7n”d{»Œ7NÖÕÉñ3gʉ³fɬ9s¤vÀ€¦ïÍ­Í2oÑh“÷Wµ›±V±vë6%Ó㧤¤Rñóv{Ò–>–k]q t=•n·i[Jš[=:¸xI@÷ÍŸ?Ÿ $`ßÒ›ô¿zÑEò»_ÿºÇÖùÞoÈ’E‹döÅ÷é›õõ&ý‹þù"ùõs=×÷7V¾!‹Þ_$ŸzqŸ½Y_oÒ¿è¢ùõ¯›{®ßo´Ë¢EmrñÅ5}úf}½1ÿ«ÿÜ ¿{®çúþÞÊvYò~›Ì>µ†0$€ñã«ÝøJËo¼Û*·¦:l·j}»4ìê8$ÚÔìIc“'u ‡!ÕìÜ’‘C C@o" Øç´RQOÞ¤XûᇲeÃ9ó³Ÿí³}×JE=y“~àÃMʆíä³'÷;k¥¢ž¼I?ÛaCJ>ûÙê¾{½ÿxW† ³×û¦¤lÙž’3O®æ— €ñã«ÕøJ+Av‚TZ ²”d@«o·'E(žpljñÒm=1$Á/è%„äûèûÌ‚yóä;îèµõÿæg?“'î¿¿Oö}Þ¢yrǽ×÷Ÿýþgrÿó}¯ïóæµÈw4õ^¿Ö$÷ßßÜ7¯÷E-òÀ½×÷ßü¾Ižx¾YŒ¯_1¾ú¸Œ¯ô‹ ô !:²k'ÛR^ÿöô2Zñ±#²Ü´5)€ÞC°O´47Ë×^ÛëÛÑmìnhèS}onm–koíý¾ë6öö¾77{ríµ»z¿ßém44x}ëzoõäÆ[{¿ïºÝ{=0¾b|Åøê@_%Sž,ý ­Ãv)Ï“µ›Ú»¼5’émuÜîm¦‚$ w„ì?»åÙºqc¯oGoÒÿŸï¿Oõý–‡n‘;z¿ïz“þ÷î;}¿å–FÙ¸1ÕûýnðäûßßÛ·®÷‡eëŽÞﻆ ÿçá½`|ÅøŠñÕ>¾Z½.i¾l¢#[¶¥¤­­ëÛÑä–í'5¹j]»zAH@Ùiµ¢rÞ<¯¡€¾RµH«•óæy ô…ªEZ­¨œ7Ïk( ¯T-ÒôËNÔÐ%U!€ñã+ÆWòøJ«A–:Ôj¥„;²e{ª¤ªΤ*$ô‚€²[0o^Yoœ×`€n³/˜·h^Yoœ×`€nsŸ÷{^KYoœ×`€n³»Ö¯_/ž×½ý^°¨¥¬ÁD ^ê6Œ¯_1¾:PÇW[·§J îÞíI2åt{ŸS)‘]{:NBj@³'‚—€|!ˆ––Ù°aƒ¬ZµJV®\iþF§àçÀï]ôœÅ ”}›¯?ÿ|Ÿèû‚%åïûóËö}ß,h+¿Ÿoíö:yä¹õÖ[å­·Þêò û‹—”¿ï¯/kã+ÆWŒ¯ÔñÕŽ]¥-·§©çú¾·É+qßRèy!‹Þ¯7æ766J2™”T*%MMM²nÝ:innæüœø½ €²hþü²oóµ… ûDßç/)ß.Ý÷}Ÿ?¿üÁ¼… »˜6mšìÞ½[zè!ùÏÿüOyûí·;}Ãþ¢%åïûkKÛÀøŠñã«u|µ}giU÷4ö\uƽ¥B@¯  X¶mÛfnÎÒtÑyø9ð{=cwCCٷٰukŸè{ÃÞò÷}ë®}ß÷†¯üýÞÚý›ÐO=õT¹þúëå´ÓN“={öȃ>(?þñeÉ’%%ß°¿{oùûÎ øÀøŠñã«y|ÕÖ^Ú¶’=—ƒ,y]¥î s*8@Nkkk—æàçÀï]tÎÖ?ÛŒ³qÇÆÅ6óöacrl³g€µµµræ™gÊŒ3ä•W^‘—^zIxàyöÙgeÖ¬Y2uêTq§ðµ·£ü}ߺƒ $0¾b|ÅøêÀ_µ¶•˜lïÁCTjÀ±¥Õ@Ï# tB[ÊýÊôÿJÊËT2ÒÂHùÏ:Ï~m·K‰ßÞL·Ûùm¬ù©Ðôè6­×¡e¼¼íÓrûy·Lx½Z¼ÉÓ‡çd_gžÌrÙùúÚÉÌ÷üiA[kÙÌ|'2?x^g°¾ðvœÈ6­u&ím;áý°§‡–u"ûP¨oñËæõ׋LOæú×רéÖ´Øc{ £Ç¸@ßìéÖ²¹×æÊè¡G²×Õ³ÛxñÅseøð~¡@ÐÔâÉ7~¼CÌ­,éÿ îiq/ó>oºõ;ß‹Ÿï?çÍ“h;/¾½D^G–Ï>ŵµ×-ái¡E ÜÏãtó{E&z1<Ýb0ß /ãEží6¡g/·Ï^ÎË-Ÿ}íeÆ#¹×¹Gxš“{Š´M9‘÷áéö˜(;*4FŠŒ SÑq§5^³Û¤’¹ùÙ¶ÉÜûì#éXóüùÉܼ`]Ùe“™o™Ï¶ó_'ƒ×Ö2Éô뤹KÇ<íþX)î¹Ô×ŦxÞßñ °ª««M墓O>Y/^,/¼ð‚Üÿý2bĹæšk¤²²’ƒÀø Ð B–ªª*ijjŠW™ž`ÿ÷þû; !«ø9€²Ž¯vìI˜À dÌ ; ?Ïvø1¦]Þ´ÐûppÒ^Oèut}Ó>vtâCŽ…‚±_lî}[”]&’ˆô¼Èúü€¢c¥$CÇô'Œ Gæ‘^dºjEz&èH8ð B_H¢óH°ÑñÂHÉM× ëø_"áøÅôt×¼wÌq7ÁE¿sæ &ÒÏ®d¾TÄ'Û§Ìtý‚ÇÌ7ÓÒ3]×ÉÎwüeRþ±w¬u˜f®g¾ÔÂqýùžd2×fŸÌ|Ïz޾–I¯ÏñœìÏŠëf‚’ÁÖÃ)aºÄ´ ÷ 5J6­][ömö£êGÉÚ­k˾Í}ÞïQ Y»6Yæmº¶Ñÿfyøá‡Óc©pu£ã?^¦L™»ŒÞâ‰'J{{»<óÌ3²yófill”ÁƒÇ_{õ Ù´µ¼}Vïò‹_1¾b|uÀޝª*’*/V$J¯äØ‘Êï°©®âïjÐB–¡C‡ÊúõëóþAFoH2d8|ï{oÉÿþïR[[‘÷s®¿å_iàì•åµ&¸ebv«Å´·ž $CË^é¥dЊŒ[sÕ ¼éöòáÀ£H†*CZÁÈØJ‘ñ¡È¼`¤Ý&åXÓcª@:v2|LùAÄP%H $ZÕ!MPÒôÀ1ã|7Û­ôòN&蘙îW–”\åG×Ë´ ª7áÈ ÈèºþûlàÑ AúAÉp5È"U!Õ £mœèE×Ùê….¶¸u”z!PÜôÓO—K–”u›ÇÏœÙ'ú~úQ§Ë’ÕåíûÌ©û¾ï§Ÿ^%K–´—·ß3+;l3pà@¹þúë;l×ÒÒ"¯¼òмøâ‹¦ÊQ}}½œsÎ9rôÑGû_JQäz?ªJV¬.oߟZ)ÆWŒ¯_¨ã«!ƒ²§±ã¾¨MHsKÏTÍì_[ÚMð…Ð;BÁÍúLµ« €ýÞ²e ò‡8?ýª-ýó}#Ê8¾úÛÿÚᇽÂá­N ;ªþXjø1P,|ì°:¤X ´óG–y÷î8Å«@:N(A¤ê£áYÓí¶^nƒáð£ä…"sÁG/ò>\ÙÑ‘hèѳÞç;ôè9¹À£—£gN®:¤S†´‚–~WLuH+ø¨ÓSéõ»)_Å :jÕÇTPÒEF*CеŒãW~ÌT´þú4™éY¡Å`]z L;7R1Ò³®çhHÒAŠU9R¬õ¦Rq¡ÇRª?:RzeHºïÄY³äž[o-ë6›1£Oô}ÖQ³äÖGËÛ÷Sö}ßgͪ”2Ÿr™1£ªÛëhnn–—_~Y^zé%󺮮.{ƒ¾ë–v“û‰GUÊ=–·ïÇM©ã+ÆWŒ¯ÔñUý G>ÚÐq»ýD¶î虾÷ï甸o! 7„P6Ž›‰›u5lô§ LÆMË_O~ÅÇR‚ñ¡ÇHàÑɯ~Œý‚s'öe—„B‘ïøˆ«éXoòÃV@2TR"!ÃÕ£Õ"ƒðbf]VàÑn—²B“N$éëð2ÓýufÂŒ¹ ¤©ð˜òƒ&°˜ÙŸ\;zlLåª:š¤øU“™ ¢V›LùÕƒet%™i¹*®kO÷ƒ’þÓª•Áº<;ð(và1~+)AJÉŸ^z5H‘®½NN ߬9sd`]ình(ËöªkjÌ6û‚9ÓçH]ÿ:iØ[ž¾×TÕ˜mîó~Ï©–º:GÊ3f¨©qÌ6»ë׿þµ¬^½ÚÜ öÙg˱Ç[ò úÙë}zµ ìïÈî½åé{u•c¶ `|ÅÖf=æ IDATøŠñÕ:¾6ĕЄH{Åt$áz’Luï/¬º{ƒt¼ בáCüB€^À×Ð(› ’yX•íB¯5,éWtý¶®³lvžç/¿îè:²ïƒíè³ÝÆZ&û:º®DðÚ3ë1ë ^'Â˺‘׿}"ópÝ"k~Âí m „½žDdý‰Èvíùqω\¿³¯ ­#».¿mðˆé§ëŸ—„ÿˆ]Wv=Ñc•™îÄíst]®„ö!wn#ëõ¯ìuá/=·ákמ煯Gûºfó¯_‰»žó‘’ [?cWìnÕGB€ž¡7Îÿá×¾V¶í]=w® ôzãü×>[¾¾Ï½h® ìó~×8òµ¯õ/_¿çÖš`@w >:^(@ LÚ}²nd¿ =+ }8Ö#&ìäÉ®;hÌõ3 )&ùÊüÀ¡rôòŽyï#Û-žt½üõØÛ³¦;Ñõ†B‹^~85aM³C“ û|y™óh£×S4È™^tcNá‡]Õ *Œf+Dæ·“èëÌO›ôlø1à˜FÐ5zóü°Q£z};zƒ~9C¥Ð›çGÕ÷~ßõýr†:ì÷ÜZ5ª÷ÿiBoÐï©PÀE]$'œpB—nÐ]ïÕʰúÞï» Ëº0¾b|Åøj_¯4t¨_8Ñ‘áC]©¬ìúvô便„)5YJ8°ÿùæ7¿)7ß|3b?÷ØcÉ—¿üeÙ´iÐ'¬_¿^Þzë-yï½÷¤­­­W·åyž¬^½Z¶lÙÂì×B(›üªxñH·Äd4ø_a2~t¯Ãðc¨d¤r¤[¤Òc¡ ùí3ûáFÃÙÐc~Éhدh›b!Ë„ÄCÌ„W´âcÂ_&/Y Bc¨zclµI//ðèäU–Œ DÚÊØJ^Ám9‘ªv°3s]äÂÁûP¨5/TéåW8u#•c+ z±¯ÝÈσĆó’"q1Çž?¼¦ˆtžV-ºñöÛ{};º¾R­( U‹nÿJï÷]·Ñªeû]ãÈí·êý~§·ÑÕŠzôz¯r䯝ô~ßuTƒÆWŒ¯_}ÆWZrê¡'µ*丑](Ž(©䑇UR ŠhhheË–É®]» ¶ijj’7ß|SöîÝÛgö{þüùò½ï}O>úè£^ßÖöíÛ妛n2½žÖÒÒ"/½ô’¬X±¢`›Ý»w›6ëÖ­+ئ±±Ñ´Ù¸qcŸèWg$“Éô˜æv¹óÎ;ùô:×,\¸P–/_ž7OC‰÷Þ{¯ÜvÛmòàƒÊ=÷Ü# ,èÕýÑ1Ø]wÝ%÷Ýw_Ÿ>6ì #!”M~%¼òJ @Æ#ÈPø1¯:dLøÑªúèF+GæUo´*:q%sÇ Ìbtb*:qÓ ;zÄ*ó‚‹±aÆH€3SEÒª ™HZWüryÛ¶ƒ“q%í@dÞqòbž^8è~ôƒŒvõOǪ ™=ÇŽºn¢aÅlPÖ‰©üzßqUH±æÇ… ƒj‘"á˜cÐ&óEú…N‘G1^Ì{¯È|J7kιäškzmý^}µœsé¥}²ïs¦Ï‘kÎé½¾_}ÖÕr錾×÷9sªåškúõ^¿¯î'—^ZÓ7¯÷éÕrÉ9½×÷ Ïê'ç̨ã+ÆWŒ¯>.ã«áC\7²ãôá Ž ­ëü-2CÒË ØñrcÓû0r)H(fÚ´i2uêT?~¼,]º4¶ÍwÜ!Ç{¬üÇüGŸÙïüàæùòË/ïõmÍ›7O¾ñoÈŸýÙŸõøºßyç9å”Sää“O–T*Ûæ[ßú–ió©O}ªèñÐ6ßÿþ÷{¤_N,íW]]AH@Y¼ÿþûòÔSOÉ£>š7O+@êxhĈrÕUWɾð™8qâw 9Š›ó~JC@ÙÄVŒ 4–€´C‡®U½Ï @lï ?F‚„N¸âclðѪ¨ =ºÑp¤t,ZÔíª6™4,ö(Ta²P(2Za1S­ÑnËFº…ªEê“—ß&‘ˆŒ¯|Lx‘ê—®ði…&Ã×™9?^äüy~(2rmd+殕Bá] ½·~&ü`a¦‰—ûù±Nl–1.Xjر¯È{;IEH@×}û'?17ì÷´“gÏ–¿íC7ÎÅùÉŸþÄܰßÓf3[þ㺾Û÷Ÿüd¹a¿Çû=»Jþã?öíëýO™@d_ïÇTÉß^7P`|Åøêã6¾šrh¥ DvDʃ–þ7²µŽŒÑq¸qÈ`W&R)€â‚ ‚Zè/ÿò/cÛhu$ûy_Û´i“ ñixsÆŒ½¾½SO=U.¼ðÂ^ BjÀtذa²mÛ6yýõ×cÛ<ýôÓæYC“k×®móûßÿÞ<ŸuÖYÝî×yç'UUU&tPº­‹.ºÈTÅ|öÙgù¡ô*?Lžüè•0½PèÑ~â Kò£‡2Õ…zŠ®K×Y;`@Ÿî{E¢BúÛ‡Lu¡ž¢ëzèï’5}·ïéS.=Tgª õX¿ÓëÒuàôíë==ÖüÑßÖ™ê=v½§×õ£¿«“Ú‡_(ÆWŒ¯>vã+ý›Ø±“«Lб£v©0U;¢mWaþþWŒnó¸)UfŒèبQ£äC1aºßýîw}~J}´bÐç?ÿùôçHïvØaòðÃËßüÍßôøºÝô‡Ú9çœc^?ùä“yó7oÞl¸6MMMò /H¿~ýäôÓOïv¿Íñmnn.Û9 *{RÐÛ† b>wâ¾LA¿B 4è€í[[›ùr‹öööN›ë~:ñß·å’W2.éæ·s‹†!½p˜ÌÍ_Î mÇË MºN áÇ :¥Ì‹ >& ‡íÀcš‹9–V!Ò‰«¶Xê#Q d4(˜ˆ. $ãB’‘ê…B‘yU £•)‘@ddýN\ÅG«:d¸ g¤êc6  C:Ñ0kt»M$|ëz¯ã¼€£øQؤx-—ý¹òçwù'Ó+0Í+Ц£0$]£7ëÿã]wÉ­<"õÆuy=ëêäæ{î1ëêë7éôfý»¾z—<òíGdØ ®÷½®Üóõ{̺úòMúÙ~WˆÜu× yä‘:6¬ëÿ\QWçÈ=÷ 6ëêë7ég¯÷ôØó¿:HnývÔêzßöw保6ë" `|Åøêã<¾Ò¿‘yX¥ %VU:EÛ•0!G=^y?7éÃ6al´)‚¬HçŽ>¢Òl“$”No:ÿö·¿m^ýë_—d2Ù©åwïÞ-Ï=÷œ©Ò¸fÍš¼ù¬Ój‡[·n-8oçÎyó4ˆ§ó¶oßš®á=õ¹Ï}®×¶aÓPà[o½e*QÒÒÒbÖóàƒÊ›o¾iÞ—êÜsÏ5ÏO=õTÞ¼ ˜ó3sæLóþÿþïÿòÚ,\¸ÐlïŒ3Κššì>ë~444˜÷ï¾û®üú׿…*£ýÒðÇòåËÍñR«W¯6ïµR£¶íìy/u?Ô™gž)õõõrÿý÷Kkk+?”°ÒÏýÌXºti^U?ý<Ð*ÔÁ磆Ý>üðCS}xïÞ½®[—ÓÏ%m÷yµgÏùàƒä½÷Þˆíq~öi{ýºÏúåÁòú^—Õi6l0A½Bû¦}+õóK÷_÷K¿ì ÔjÛé¡óô1è‡~6ë{ÙÕ¿£Ç&˜®} ¾$AÏçºuëÌvŠ?=&+W®”%K–˜ýkÛý èõ¤çhÙ²ef?ã”Ýé 4å’ )š7áWjÌ |EÚ‰€ æK íƒJz±m%òl/+^¤"_\ɦвíì`šâì6qí£ó M‹¹‡§³_@ü}3o1Ïzò"“={Y/øÿÜ4Ï ­Ûó²Ír¯Ó/r¯ìz½Ð#ÓÆKeÚèiÖUç¦yæ}*å˜e5h¦§ê³ ó>ý2¥óÓ¯ôèg^ûœ™&ÙÝöÌú‚›Ré÷®??å'7½`ÊLê|G÷-sj3áG}o‡q=ë9ûÚn§áJÇŸï™s?Nñd¶Ø£þöa§à%Rìjè`Z±*ö4‚€ž3kÎyrÍùÙ-·È7Ý$þ±1ªº¦F®¸þzùãn07ëïæLŸ#kî\#·ý¹yS¶j¡nçå—_6¯§NšmÛ“Ûˆzíµ×dúôéò©O}JüñÐ<½Ñþ»ßý®YÞHhuÆ›o¾YþüÏÿ¼Ãª•º^­ ùüóÏ›°….ú¢û¯aA KêMû®õÍZÉS}æ3ŸÉÛgí“VüüÚ×¾fŽÝàÁƒ³¡Äh¿þîïþNn½õÖì:.¸à‚ìk d~ò“ŸìÔy/u?TEE…qÄòÒK/ɪU«äðÃçöú¹ô裚ßùöçÂÑG-_|±y­á³Ûn»Í|N1„èíàÙ1Ç#^xaèó-£h5dýì·×=~üx¹ôÒKÍç‰M?Gµºµýú9|ä‘GšõWVVšÀœî‹VFÖ±Ò`¿~±@àÈü ¦ºÚ|¾ÍŸ?_N<ñD3Öˆzì±ÇLè_ûª}.D¿hàÞ{ï5ŸsýlÔÏábc´RûßÑyÐñÂ+¯¼’~Ï=÷d_éK_’ &ÄL׊Œ:O¿¸ 8z|gÍšªJ­çAÇ€´¿`#‘H˜/wÐëÀÇtu¿´Ïznt eoGÏóÙgŸmÆ ö8¬³ýtAHeã䅭ב bxžD‚“^p¤ @ÆÎ³B˜z.=üX0øX,ôèä‡ív¡¶1Ç0à -Y],/:ßË_Êóò—÷¬Ì[(L‹†#­ £ã•ŠŒ"¿MJ÷Ç7úHý"X­Îi– ¦ûÏšx4ÁG -H 6úû£ÁFýsvÊÉ„ƒ€d†tì°cúÿ\¿?)7óÞÏ)æÂÑÐcx´‚Á;óªBJáJÁÏD¦÷‘Ó=A]®éż/µòc¡=Coº¿î[ß’«çΕ§~X/X ¯-\(»dÓÚµ¦Í°Q£¤nØ09~æL9ú¤“ä“sæt«ÒQ_¡7Ýë²oÉÜ‹æÊÃ/?, –,…KJÃÞY»5Ó÷Qõ£Le£™SgÊI‡ŸdnðïN¥£>ÑïG¾õ­þ2wn­<üp‹,XÐ* ¶ICƒ'k×fþQyÔ(×T6š9³RN:©ÒÜàßJG}æz¯räºËúËÕÕÊÓ/·Èâ%­òÚÒ6ٽדM[3}×›òë¹rüÔJ9úðJùäôênU’0¾b|Åøê@_iXqâø SÙqËö”lß™’†])ikiiõ²c°Ê G]!j3ËÓñW|;1c±Á\´,VqPœþÛÞ˜®a¾óÏ?ß¿ð…/Hÿþý‹.§7à?òÈ#rÉ%—ÈŸüÉŸÈ€䡇2J Íiµ# èÍîîÓ€V)4hPvzs¿Ò›å£?½™_wÞyÙiZ=JtºŽf§÷ä6Ч(­J©*5`qÝu×™›õ5€ðƒü@þâ/þÂT3ÒãY̰ôØNÃÐ Æ9眓§ÏáÇËi§&ŸøÄ'äÅ_4ë×ö ’dPYÒ¦$5 ¢!J hU¦BýºòÊ+M°â§?ý©9Î×_½Œ=Ú/;ØQêyïì~Œ7.{Ž BÀþCC쯾úªù‚ý¯÷bèïù¸ßõú… \›8q¢ ?juC ùiP«Û¡~¥ÁAý\Ñuëg_UU•©ú÷ /˜/nøÊW¾bÂô_ýêW¦Jñ!‡bÖ¯ŸaZEQ·;mÚ4ºû\×ÏqGÕ¥u[Z©¸¶¶Ö|6ê>ë:tÿtÌÐ/DÐjǺÅ>»t[?ÿùÏM T×wì±Çš¾¿ñÆr÷Ýw\®3ýïè<èñÐàäâÅ‹eÇŽròÉ'›±”¶9rd‡c¥•¢u,¢_z c/­­c/=>cÆŒÉ=&z¼t›8Ô1¥VÀÔq~¹‚~æOš4©Ûû¥ÇG·¥áÚN8AêêêLØQ÷QªÈÔpcWûè‚ÊÆéJH'> X0ÐØÙdhžUeR:~´*>v\²HEHÉí{±Jq#K:þ…fXéF/2Ý –ó2k°ÿÖgÃ#­ £ä*Fz„"ƒ@¤ zb…ƒEª@êr’ ]f«>êùHi˜ÑÉ„#M{ϼ7!Hñ+Ït¢Èàó@?[4H÷ùÏ>ôy±mÛ6€Óe´êŸÒꃂԠ¡†ûƒ/>ÐÏ2 É +ò¥Rº¿úX½zµhhR¿ÜÀÞæÚµkeÅŠ¡0¥êZZZÌ8"Øï8ÚNC:Ð*ÜZ±0øìýŸÿùÙ²eKÞ2íGçaìØ±æ¡û¢ã£ãŽ;®ÃñIÜùÓÏ|dÿ¦U]mÆ&ê „“'O6ûmi…Oh5PíW„ìê~iÀS~aÄÿñg¿„AÏî‡^SZ)RϽ~iCWúè¾.@Ù„BˆnP-/R%Ï æECc^Ávnè½gnt=qí\u?×õ²Õ³Ïn®mv}®µŒnË<ç·qÝÜv³ëJX¯Í#³|°}7¼Îl7º>7ºV4 ×•GèØD¶ìoîufÍ#ûÚšŸˆ[&××첉Èvì6æxèMJ釽>óÚßnÌrN"¼M'{| ¼Žo +ÇZ¶òcèØÛ׉g]áë&{½êu‡®];øëÅ#ퟕ¸o®Qè©DqUK©ü(æ¥"¯:¦H½I]|ZQ§_üâæY+E7•kµ¢À…^hžƒ ŒJ—Z¡1-j/ u^°\@ƒ*„ìÉm”Jo°Wý׺Ñ_iPQC{öìÉVl,&¨æT¯TZ•Hi(Di°!Øo»ö!®¤ÒJGZáÑqz¦rrgÏ{gö#8§qÄ}—VRTZ°#ú»ÞA* Ái`¯µµÕZIXiX0JCvJƒ ×)ý"»ú³Ò ‘ÑÏêÎÐ Žö>4|©¢_µtéÒìø A* è]vÙe±Ët¶ÿ9]¥çϪ 4¨»í}‰;Þzž•VÃì.­~©N9唼JÔZÕQ¶zMÅ}ÁB©ýt!”M¨Ê£_±Qœ˜ "¡°XütézÅÈH[‘ª?©ü˜_2fž„+=†*BÚ¯¸c–›èttl»|R$TéÑæùë5U#U=¿¬£g½w¬Ê‘áŠbU„ô¬Š¹¶ÙGÊË®Ëñ2UC"u~z¿ƒ÷©”VxÌU‡4ƒ}×¶þ^5/=ÿ ›åty CZ=K™%³}×_0eWvŒ>Ûת_•1®²c¡ë=[ÔɯèèÄœø®ÿûqGU ƒçŽªAz”æøã—+®¸ÂÞ´"äm·ÝÛN+")­bôÛßþ64/¨jdÚ´B¢†4ˆ¨ÿ¾¥ÿÖ¦¡?ö½ï}ÏTV|ì±Çä»ßý®i¯¯Õg?ûÙкƒ›Óƒém=µRÕ)ƒJLQZH·_J…C Fh¥*­œ¨ÇO«iÅIuæ™gf·S__oªBjÀRÃAÈR+JÆÑŠLÑ*HÝÑÙóÞ™ýÐЂZµj?ˆ°Ñ Ú²eËL5=­ê§AD •EÈ*X h@ý†]»ve§_~ ŸAÈ1ÐØØ(­’¨FŒÑã}<ꨣäñÇ7ŸýZRû¡!;}¯U©ƒ`b!Á~Ÿu6;iëlÿ;sº*îüõïß?´O¶ææfnݽ{·™¯Ï*™Lv{_‚ówLƒ±¢VÉÔ*“Ýí 4!”O$éø++–”h{/»ž¢m¥H2ZËγ—)~Ì[Fÿ7&øX ôX,ðŽ‹mÙÙꀞąë2ÁBÇÚ‰hXÒ„=/ÛÆ³‘¹ c&œÌ Eúm²AH'Df®óì³óSž‚tL˜Ñ„"Es™Pc浈ô{¢aF}o ZúÝôÿ¤ÜLøÑÍϤ$Sæ¥g¶Ÿ­ü¨ï]ÇCúóì`¤gUõûT0ôëÄ]ƒ~(2æºÉž±N§!K­iO+öHż(Ý?ýÓ?É}÷Ý'wÞy§©vð¬”Z»v­yÖjEUUUyë˜6mZ(œ§½!_C|ZÁGƒâÓ K‡z¨œzê©òÜsÏɦM›LP«j0P§Û‚×ãªõôÔ6J]G -ª ÈPŒ†´âãÝwß-¿ÿýïåòË/7Õ,õæþ#Ž8´I$2{ölsntÞùçŸoÚêû³fÍ*˵ÑÙóÞÁ9Õ~öZÑñšk®É†ÿ5?dÈóEZ¸555æYÃ… ©ŸÛqŸ ú¹oƒ*ƒLìiºê× 6Ô±…† ÛÚÚÌëŽ>»ô ì~–¢³ýï‰óÐqAK3j sñâÅæUTT˜¾kë蘲«‚ãSè|2›ššºÜ@ç„P6Ah1®Z^8VjH¯H%ɘ*þ¶ƒí @ªþX¤òctŸDâ«CfÚ:¹ùb//‘é‘v±mŠó"ó¼¢mœp´ÍËU‡ …$­€¤]=Ò»"¤g…áȠò£— Dú¡ÈT.,àúÛ U‚t4(éL¨Q÷.•në¦r1=×/ë˜JäÂ&¨èÏwÄzíù¡Y+ÐhÂ⨾v¿²£#¹ ‘bM½Žœƒè5º.œŽÏyþßp£Õ  K}¤ ¼Nò tÊøñãåúë¯7U¿õ­oÉé§Ÿž×Fo¶× ?¿üå/Mà°^x¡ )>úè£&<¨Õo¸á3ïÜsÏ•gŸ}ÖT[ÒêEZåçꫯλñ¿®®Î<¯Y³¦×¶QªQ£FÉæÍ›M%D=Q7n4χrHIëÓ¡!5À U4ÄðÅ/~1ÔFÃ’„ÔpAÂÐJ˜…*lõ´®œ÷R!ËàöZ}磌®’­[·š/Ð*†¿úÕ¯ä+_ùJI•‰÷îÝkž ”6pà@’¿øâ‹ Vý³éèzôÑ“ÕÇsŒ Bê—èg°¾¦w¤³¡¼®ô¿'ÎCOÑñ–ŽÇt tÞyçe¿4BC?ùÉOB_JßUÚ=×ZÁѾnAø4øb @ï#R l … n„ C×äM÷BÛŽ¶sí÷ÚÖÍU¬ÌVöó_»öûìt/·œµ®ÐzMðÍ1a¾ÌÃ_—qùR5 IDATÿìš €Ž©fèŠcµÏµsüІ¹iÑuæ?œ"·¤‡Ä<¬ù×Öï­À<¼ôûà!^¦]BÂËF—KDÚ‡¦gÚ†ÏU¦c-çXÛÍ^#Ùeüùþ¹´Ÿ3çÜ ?ºR¼Ê£DÂŽ‘Ÿ ;äh‡ »Â‹yÕõÊ © ºî›ßü¦©šøÀÈo¼‘7_+,ª·ß~»äu^pÁæYCŠöSZáPiHQ=öØcæ¡´‚Q”†Ušëm”*84ˆLŸ:ujIëÓ£V#Ò*–ZñQqÆ¡6çœsŽyÖj–Z ÒîWO  A(¥»ç½TÁ9 Î1`ÿ£·‹.ºHŽ<òHinn–U«V…æëgKP0 _ÐýÑG™×vhM«* Ï•"Xvݺu½Ò7ý Ôp¢öIƒ†úe ºãÆëpÙÁƒ¿• £:ÛÿRÏCð9oWßìiï¾û®yÖ/k(T9»Ðø£Ôý Ž~ACœ`ºŽgåA@Ùä… ÝhÀÑ ‡É ¯à<×0:‘d´­]§—7/’Œl3XºV¨1´N+(áà£#¹àcP0.ðèijÉL;»ïMÛ7Ëö•ïËε«D’킌?²M‰†‹…$ Í·ƒ‘‘P¤xlݵEv¯[.›ÒûíµDºv˜27=tLH^øÑ±Û8^î±æÛïíséÚG; 놯ÓB!È‚ÁÈ ©Ú&†hÊg†&Xoº|,5¼è<­¬óï|ǼþùÏž7ÿþàÌó¿þë¿ÊŽ;òæ·´´äÝÈ>eÊ™4i’,^¼ØT>¬©©‘™3gšyÓ¦M3•(µÒá#}º ƒüô§?5Ó¢Ëêþk°rùòåòßÿýßfšV’ìI#FŒ0ÏZEª'Î{©‚pH)•µ}dž ò¦UVVšç b@«%ë—,ØaÈ×^{MvîÜikv…eýÜV/¼ðBl%ÅöövI&“Ù÷Áh%B»½-uÑPfgé—è8D÷ýÁ4Û×÷¥öMÇöøEÇ3Zé9Ngû_êyÐÊ™ª+ËRûµ}ûöì4=n‹-*¸Lg÷+8ö/½ô’ìÞ½;4O«vêØL¿\¡ÔÊÜ€î«à(—¼¤yS iUÉ‹†%óªCZíró¼¼ÐYl•IÉßvh«}жмÌüÌ›hõ¿`ªx˯脲p¡\œ—“ okù³ÿ'ÛV;ö¨ãå ãO.ùü¤RIiܱMÚÛZ¥ÿáRQUm¦{Ù­e^y©”¬}ýe©\/é3èYï¼éÙiNæÀéÿ—5Ï?);×~˜Ý¡G'æ|ÂÌË<<³œy-™iú§jמ½q›$Óû^Q;<=³Ú„Dƒm§RŽ C¦Òí’[^§ª^ÜSÍzÏ3AM] xo⦞ Ñæ®E'ïµÄ¼–U"£'ڱÑNpÌc’^Ñ·‘©½€ÔG’_n Ë®»î:¹õÖ[³U}lŸþô§åüóÏ7Âc=V¾øÅ/Êa‡fnDõÕWå·¿ý­ÜqÇr饗†–»ð å–[n‘ ˜¢è»í¶Û¤¡¡ÁTv¬­­ÍÛ®4µ"“VaÒÀ †£º»Ri¥% *>ýôÓò‰O|ÂT—Ô ¢†4ä1`Àù÷ÿwœ(•îßË/¿lnêŸ0aBìÍûZ9réÒ¥²páBü<øàƒ{ô¼Ÿzê©ò¿ÿû¿ò7ó7æÜk8å¯þê¯Là «ç½A@åøãç‡öÆÓð¾~Mž<Ù|®jEF Ók…Fý\´i O?Ã4¤¦ŸaZ QßëýZÙ±nôÑÏ—#Ž8BÞ{ï=ù¯ÿú/ó9¤U5H¨¡?®ŸåAÈPƒƒÔ ~ÖëþèúôË4pøùϾÛýÕ°þóÏ?Ÿý„Rƒº/úE›7o6Ÿ“Úw­Ô¨U·uz[[[Þ2ég΃¾^²d‰©œ­•-u?N9å9rd]ºßÚW­À} 'Huuµ©&­ç&‘H˜û¢:»_z uœ´råJ¹ýöÛM¿uœ¨çfÙ²eRUUeÆUö5è]!”•H,‚ ªFŠ[m¯”dáe¼¼@cw…óBŽNàc^èÑÉ«ýóÙöVšdmÝ9xúLñ´dE¥DÿÎæD– b‹Í»wʲ§•¦] f¿ôJ'Íœ-C'j…ì2Ë®zýeÙ°ô ™zö…~¿ƒ¸¡˜„£çHî}v ŽU›0ŠlX·Ê„ k‘QÇ"©ödúœW˜Ð¢ 8¦üçôñ7¯ÓÛÓ/ìÓª™÷ž´îÙ%[ß|LÚ2=LïûÀI³¥ªþÐL`Òߦ†!Û6½(íÛÞª šz ¸&™ÞÏÉ…t=ë9˜çùU!ƒvvÛB×Yè½—=þö5Ò™jž}ö#9Ç $êyÖÄHLµ{ÈTäuJ E0lz“¸>âTTTÈ]wÝ%çž{®©þ7qâÄÐ|­†tóÍ7Ë¿ýÛ¿É¿üË¿d§ëMèzãyP‰ÑvÉ%—˜öZhΜ9¡y,Ôà‚*VÐp£!õæú¯~õ«½² [PM)î8=þøã¦r¦þó?ÿÓLÓàåé§Ÿn¦~øá:âüÇüGhˆî»ÝæG?ú‘éßç>÷¹NïsGm®½öZîüÍo~c¶£á…¹sçvé¼—²êý÷ß—?üÐ[5 Ø?èXá´ÓN3•þ슋ú¥ú™ýý?fÌ9餓dÞ¼y&´¨lÆž‹ºì²ËLðP«<>÷ÜsÙéúÙ¤ŸtPvšÞOó¥/}I}ôQ„Óe”~1ÁìÙ³Mp.h§4L‹›§4´¨½5kÖȸqãL0±ºÍ«¯¾Ú|†jpïÙgŸ5ãýRsÎ9Ç|ñDÜ6Kíg΃~á€îƒ~Ù~ñ‚.«_‚PìØ;fŽ/“=OÇA{÷î5AϧžzÊ´Ñãöå/ÙŒ-ãÖÓ•ýºâŠ+dþüùæK(^yå•ì±Ð@¨Ž£†Ú­~:Gïžôn¼ñFóÉòeo&Ö§½èëÌ Ü)ÿÙT¦±^ÛíÌ-Â~Å›ð2~k~*4Ý+¼ýÐ2^Þöƒi¹ýŒ¼Î[&¼^½Á'ÕÕòΓ˜PäôÏ]-nEî;tvn\'Kþïa=å9dúŒ¼˜x¹è£—}Ìò$ø²µ`̺öÕçeË{oËø“Ϻƒ3ߤ›­ŒW¼ÈØÌwéëM¯ýNZ6Ɉ/IT˶7çI{ÓNzâUéµUfÇ?m»ÖÉž÷#•ÃŽ‘ÊQ3%•ÌŒ¹t~þk}vb_'ƒ×ÉÌôdÌôàu²ÝjšîOkÏLKúÓ2mr¯ÛÛsë –Éͳ–Ñyþ{žžë?Úbž;šÖVÂ<{Z¸"$ã+¥U…ôßÖŠÕôÞ­TT¬VZÒP€VÒ [±*ˆZáG×7pàÀ¼yMMM’L&Mh¡ éuÖY2cÆ S±7¶µgÏX¨¨(\ãB«3i¥I ?vç&úÖÖV³ÿ,,v 5,Y¬¥ìs±6Û·o7U•4LPh;¥œ÷RöCßßþö·åßøF(\ Ø?h8_?7ô³©®®.TYéç‰~ó®¼òJÓ^«þi…ȸÏê8¬Ó ’ºnÝF±JºþmÛ¶™Ï% +FÛêg­~.E?·t9íC±1~QÀ믿.çwžLŸ>½ÓÇJÇ'ZmYCzÁ~é6õu±ñC)ýïèêõe:ç_Žª ξUñ1æšÿ0øm‹œ$Ï+m^ðe+výÇŽ«=vTõ±XÈè—SÜ”^LGAI5|øpó(…Þ”_èÆ| DtdÖ¬Y¦¢”V’Z·nŒ;¶Ç·UJhrôèÑæÑ]ŪtÚýë‰}.ÖFƒ#Uº*å¼—²÷Þ{¯yþ¾À%ì‡4<6lذNµ×êŠÑ¿ó(uýÅ>Ÿ }ÎêrÅ>ƒõ $Þyç”;òÈ#»t¬´ZrôXû²€Îô¿3çAÇCqc¢Bý/¥ÊtÜ2ÑóÜQ_;»_ Ô–ªíJ?%~¾s”M$€hWlŒVeŒ«‚4aµÒCñ!ÊÈÃ__î}ðÚ1A¹Ð2’ )Á¸ GgžÌtWb¶“]Æ1á>Çÿ4'º¬YÞñ§eÙíh.¯½Mšwí’¶æfÓÿöæ&iÙ~߸×$÷îØ&­{2ë±é¶M Û¥­©Ñ,£iºª~µÙù¨TÉ–æì´/.Ötû)Ÿ<[ 7»?®¿¿qÛp£}ñÛkÙ¶=»¤½%³ßÉô>´¥÷;Ù´W¼¶iIï›—j7¡@õÌ9Ñ/I3_”–jK·Ý–Þç¶ôÿ7š}OTõ3ó´]EM­Y§×ÞìOÙ»úÙôbR;ñlqÒûîøër/³Oþ{sîýsîº^þµQ$” =Fƒ´yÝ A…ÌÌûáÄçK@¦¤x²X2˜p`ÒðÁõ×_o¾Àóþûïç€ìçÞ}÷]yûí·eöìÙrôÑGs@}–~^iEÁI“&Imm- !”M8æe«ñ‰UÁ0zŒ„ ‡Ð<«Rc¡ ¤— -J¡6©©âçXÈÜ.;¹õZ} U …ßò+?F—‰_.~jÍ›‹å£·_˾_¶à 󜨬”/¸L^ûí½fá“.þ¢ôT—m÷Á«/ÊGo½*;]šv¼™¦ƒê†^²ÍÿŒx©”L=íL©¨ÈügáaÓO‘%óŸ—î¿ÛwmÝ$“>1SªkúÉk¿{È$쎚u¶$\;ìè¿Î%!ýP¤ç?;y#3±;G¦÷Y»Óû­AÈá—ÃGeÛ6Rö¤÷aφ52hìÁ~¨Ò‘›>’T{› 7Q*ª«$Ý >uºlXüŒ¬}ö×âVVKËÎ-RøÉ’HÏßþÚf…u“Ï …eCáG'r ]Ö5P°¤uä.ŒpEÑ‚¼È³ÿ:›}Ì#aÇØ`¤cU„,¥êcG B$ Â@ÿ-$øŸÙ&LØïûA€ÂB(+ǯ  øå‡~Ä.Z´Û:¹ÔX¡ê‘ÅB†±Â”1!ÈBU ÃÓK@†ª? ?ÚÙ9Ç:HqÓãrv®YWn[nE…Lq†¼úøÃòÞKÏJóžÝÒ°iƒŒ>ô>ö ìJÆv„ \'[×®/åÉä“gJý¨±²ú7dÛº52íôs¤và Ù¼êÙ”~¤’ífþ¸©G‡A@2GzÙÚ‘ÁÙÏTvô"}ÐSâZ•&‡6Ù!w¬Z!uã΄üÒçkçêL‹!7Õµ¿ƒš$UýËžMkMÀsè”S¥ªn´ì\õ¶4o_+õSÏ’Š~¤ió‡Ò¼u¥¤RI©è?Fª†•^§›« éZU!«Ê¨Ÿ'Ì«èiŸ"ç$ÄËi‡C3CUÃaÇì´T._ ²PÕÇR*>&‹<B€ò!  lìJ©â(‘êŽ" —9vÒÉ@–‚tí°cñdñ*N(Ìi·ín2.ü’´ëC†×ëFŽÝðñɘÎõ+Þ“÷_Y(UÕ2å”Ošà¡­~ø(óìÙ±=Ýþy5ñp;iЬ^ò†,{qŒ˜0QÜDBÞM¿Þ³}«L=í¬ì2¹p_iÌUÌU‹ÌUŠ´» Bš¤#Ã>L>Z´Pv¬])··‰[Q)íɤìÞð‘$Òû?hÌx?Êç™þö:BjêG˜ ‘lÞµCÞIjG*FO’]½%;—?/ÕCNo«B?zN’MÛ¤füé&\˜¹¾2U!%R!Ò\O’›.Ž“_ Rì ¯[ Ò ½q²•³¡G±ÂŽv{/ÿᥜLRÂaÈ®»ò(—C œ¢Õ ÍS6Œèå…£aG)€ … %¦dWBN¸:¥c‡ ³Ë:ÙÐ\0ÏuœÐúâæ¹‘}Ë´ÏüŸëO Úd×çsë’H¥J¿dŽ¿oc›œ=õ#GIMmmÞ1G/%o>ó¸TV×È´ÓÎHÖ”|ðú+2lÜ9áSçËq³Ï5áÈuË—I{s“Ù'×ßß ºcøáϳÞÛaAWÂ󫪪¥~ü!â%“²síJÓ‡]kWIª½]†|˜¸®›i›^0óðLÐÕ5a×”l~ã)q+kdØ‘Ÿ4ïw¯zUªëÇKý‘Ÿ‘Á“Ï–êáGHËÖwE’M¹ÊnÌõ=×1A];àZˆç§=ëu0ÁË+ Lw¬J¹GÊ~Ÿr²Ï)“O,5iWv,õA5H°o„P>…ªAÆ4{^6Dè…BhÑÀ£H~•Éüù‡ 3³HÀ0W£±P»¼°dd4Hé–~Ì®Crm\?8éZ1»¼Ý¿TRÞ}é9q\WªkûË–5«eëšU1aÅÜcù«/É®m[ä¸3>%Õ5ý¤¥q¯´67ÉÑc²m†Ž+^*%;wøÛô˜Ñð£HL(2Ô Ž409ò°)fÞÖß7Ó¶¯ZaÞ=äð̲Á1ˆ„!·½·XZwm“‘Çž!‰êjI¶ì•T[³T×Ϋ1aÏdóŽl¥ÐÌú¼p%Që:Š^¨Å¡°cÐÒ ÏÏäÜ{»*dÊ?Æ"£áÈ®U‚LvâA5H°o„P6qÁ±¸jÑ£l W@«jŸY‡HlH2ØF)!HÉ A:ÙÈžS¤ d^;±Ã’…ÑÀd±ð£ã8ùË…ŽM¸r¤>>xc±ìiØ.:VŽ>í,ÓnÉÂg$•l·‚‡¹GÃÆõf™‰ÓŽ“ã2Ó¼ö6³\B+1úûTQQ™Ùh*•Wé1´¯ö< W…ŒVµç ;^*ûÕÊÎë¤eWƒì\ÿ‘Ô ,‡Ê?+ ©æíeûò7¤~â4©16SåÑk³Ž¿D…vSÙ€m^øÑ¿&È5•½ì¯èõî…ª;ú9ÈHÕG‰†SþÃ^În“²Ÿ3mSþ2«îØÕ€$AHk×®•·Þz‹ ìvíÚ%›6mâ@A@ùD+4–X Ò<9áj­z„Ö¢áÀbï¥xÒ±ç[û/%W ‡%K @ºþzJ ?†«LæªUºÖò;·ËŠ×™JGL?EÆr¨Œ<èiܽKÞ_ôR^p2ÕÞ.¯=ý¸ ¨"Gž23»þšÌú›öìΆ›÷î6Óú”­üøÑÒ·ä…ÏHkSc¤Â£äWŠtœPÅM׌šv®Œ<ô“ø[±ð÷¦úä°CÏ…­0¤¶—T›¬[ô´T¬“áG” ¿VÔô7ÛH¶ìÉVLµdö=Q30;Mìë¾ü¬éq×m |tb+?fÞ8áªþC¼˜êVÐÑ~¤Bóÿ½.­™ìæ#n=íü[ßüæ7åæ›oÞ§ûÐÒÒ"<òˆ¼ýöÛœ “{ì1ùò—¿\r°¨½½]¦OŸ.§œrŠ477ïóý×~î¹çdéÒ¥%ýŽêl¾N&ß{ï½Å5J¥ä¶Ûn“;î¸Ãô»/üþY½zµlÙ²å€<=Ù?@ï#  ¼ì ŽÙ‰^hbl5È`q'>0;-`óÂU"%^  W›ÌA:‘dfºi^W®/ ÃaÊ\ûðqqòB“Ùÿȳ++¦ç¾1ÿ)I¥’2mÆ,©ªª2óŽ>í I$*䃷_“ÝÛ¶æ‚“éÇÛ Ÿ‘æ½{eúìÏHEºM°þšê6fœ¬[ñžl]»ZvlZ/«ÞyK†Œ-ëê³û³üõE²zé[Òž?:™íXÁN7wàý÷v`2ózô¤)fþžm›ÍóðC·ÚæÂú~Ãk/H{Ó^{Ò™’¨LˆëfÖQQ]-5õ£¥qãriÙ¾FÚvmL¿~G*Ž”ŠšÁ‘k~(²‘"¡0d~eÇüG\õÇTÊ FÚT¸d&€ÙÓ•ã]§7¾¿öÚkrï½÷Ê< +W®Üoö}þüùò½ï}O>úè£}º€¼à‚ dîܹÝZÏöíÛ妛n2A)pl>.Ç8™LÊí·ß.wÞygIíŸ|òIÙ¸q£\xá…RSSc¦mÛ¶ÍãË–-“¶¶¶^Û­NùÉO~R.»ì²’~Gu¶¿@_·yóf¹çž{ä‰'žèûÓÔÔ$ .”åË—÷øº?øàÙ³gLž×„ MOŸ5g¶QŸÝÏü_¦WÙ  ?-‘Þ_•¨Èì·—m^ÿº!2hÄhÙµyƒ >JjÕIʤs^]ÓΓ]kVÉöß“QGBj뇚 9ÖZí1Ý~øÑ§ËÆ×ž­oÌ3Û«0\>;zÔ-;N‡×bˆç7Ìu*û:` ·qÂÕc©¼Š~0Ò¯™Ò*Öô®‡;Ó küqùË¿ü˼°€†zô¦ó#Âÿ6¥žàߑʡ£íýà?0Ï—_~ùq>æÍ›'ßøÆ7d„ ²jÕª^?~}Q¡}îécs ëêyßWÇøSŸú”ÔÕÕ™` VOìÈÏþsó|ÕUWe§uÔQ&YÈý÷ß/—\rIÙÏEÜï¨Îö¶¬s IDAT@ç¼ÿþûòÔSO™Ÿ³ë¯¿¾G×ýæ›ošçcŽ9f¿=>ZÕÒu©ÙõqÇu 7„P6¡ŠNþ¼hEG;ôhÞ:ñµBˆB–Eªüå‡$ó«1û3rÐá“cÓ º:9ûò+¥µ¹É¬¤ÊÿÖK?)Û6®—–¦F1n‚Ôž GúÈL[ÏÏfúzÂìs¥-½ß‰ŠÊp2½°ë/Û¿®Þ!Gzx¶¤†]­šèdÖTYÓO&œr¦ÔM8ÌÄúLxVÃ’™|£T,cg|NÚ››%•rÅsª%•Ôœ³*@f—=î±×ŒÎ5f'ÛaF‰ 8J®‚c.à9f*=:¹6~(2S2˜Þqȱ£°c{ ÓºF«^|ñÅæß²4 ùÿÙ;8¹‰ûíÎ…ncÀàÀ@À`zï½0Ý„> „ú†€ájH( !z/¦80„bº16¦Ût°é|·»Ò{Ïo5Òhv´»ÚÛÝkÏ×YÒŒ4M£‘în=›nº©8ï<ôÐCêî»ïV“'OV#FŒˆŽßyçE8 wµaÆ5¼|•òûì³ÏD¸µôÒKKÙ{›l²‰¸ÜaÝèö는+s=Û¦'Ó‘ëÞYmÜ¿µçž{Šë×ĉEˆƨ{ï½W 2Dm¿ýöQøìÙ³e a¡Ë¥mÝu×múµH£²Ô—’Üsplĺž´¶¶ª7ÞxC-¸à‚jùå—ï–msÓM7©·ß~[}ôÑjÑEeg饰B…„B!„¦â¥¹íyÉø4Ñ£2e ‘æ A‰îÒtƒt‰=ãèH X¥2éÔXA(i¥•æi»?šÎ”æ±ýû÷3J\¤ß<ý“u7þïׯâz þÉOe±.‰Ñ’Űyç›?FLŸüœêÓ·¯Zo›í£¶ÐçAEÚ²¥Eþƒø¢x°=Þ+ž…ÿs¹VõÙ»o¶§×¢†,?Láa¾!†„ر¥}½ÐâCÔƒ‡ˆ€²(~ BØâ¾§úôŸW©‚'"HSäè¹*lÖÏl½¶„Ž%%‹áþhŠ M÷G3—@20’áÚwºAê´äURésà"„B!„B!„BHÍŒ3Fýøãêøƒ:묳¢ðC=TMŸ>½Ä ò‡~÷˜¹sç6¥|•òƒXñûì³Oü1ÍnÎ +¬ î¹çž¦´_W¤\™ëÙ6=™Ž\÷Îlc8&B—ÄrÂÀ»îºKÆ­#Ž8ÂézyÆg¨yæ™§K\‹rcTµõ%„dg‘EiˆSô´iÓT.—Së­·^·uÒCù1Á ½öBH£ ’B!„Òt’¿zJ"’½Òxó¸T‡H‡«¤ç¹ò÷,GI¯$„(³¤éîXÉÒ@–”Ã%€Tn‡Éd“y)áÆ¾Wþ*%C‚’+øÕ¬ÏÔ§ïÏPël¹­ZpÀ@à 2<†WR‹"ÏØoÿçG©C[Âã>{çMåçój±e–Sýæ·˜n]EY”é‰ûcè3Ytt Å-¡+d`:?*!w-÷÷ê°ªZè(‡02(&` Ý‹ ½ÐÕÑKˆµ(Òv4 +¸Ü ³Š!mÁc!eŸB!„B!„B!¤v¦N*ë5×\³$NJ8°ÁÙ +0sæL5ß|óÉßÂàˆdŠæÌ™£^xáõù矫Ÿüä'jøðájðàÁ%éCGÊ¡C‡ª…^X„—/¿ü²Z}õÕÅÁ©šü´`ë¿øE”.Î[ÒXl±Åyê8¤1pàÀDDcpÐCy  è(˜Xþâ‹/ª¯¾úJ­³Î:%¢Ò4Ð.¯½öšZb‰%dqµU¡P´¿üòKgÚY®WVà†!È{ï½§~ö³ŸI?q‰ÎPFÔc¹å–S óž|òIiç 7ܰ¤OTSfWÛÔR¶¬íiòá‡J¾ÿþ{µÆk¨W\±©÷l¹ü«môûnÕUWMmãZÛ ‚E¤÷ÁÈ5‡‹£fÉ%—T ,°@´¿Í6Û¨Aƒ©;ï¼S]yå•òÑ^×_½¬<ðÀšÛ±#}¼ÿþûÒnèg«¬²Jêq®1*k} éŽàˆ1÷ÖRK-¥æŸþŠÏ–O?ýTÖ{ìg´ \xÇÀ˜‡táΨYh¡…¢~ãŒ1:^æ¹´‘¸ççw^>ùä§ð¾bŽGi¼òÊ+²Æø[k=:ZŽo¾ùFòÀøáz¿*ׯxGCû¯¿þZÜsñŒ@Z®J lü±<»+•-Ë5¬gýÐ×fÍš%õÁ9x÷s¹› p p.ê…÷³Îè;èK®ú"ïfýìkŠ~ç5ÚÂîÿx~ã‚~‰¼Ë•µý©–~@!Y ’B!„Ò)x©;Žc½2N’ÊvˆT wI—¤gæï ³Ü!M÷Æ8o/QÖêDHK‹#KžQ§›dèb©JêׯK»&^•W/e‹í»}ûôU›í´›Zj…a*–?j?N•tÔ®"ˆ,¦×ÆJ„ŽaøGoÿ0¾äŠÃÃv(Š[$óÐÒ³Da†ž-˜5Cãv¢ö­„)€´Ž"PT¡û£ï%µoŠá^(t,ºUÆ~P°Ü úx(‹è±Ò~ÞÚ¦’B!„B!„B!g™e–‘õرcÕÈ‘#Sû¿ÿû?uÙe—Eû»í¶[´ýÄOˆ›ÇwœºõÖ[£ÉÕ‚„Ñ£G''Á”)SÔú믯N:é$Gýîw¿“¿Ea’7N•òñÏ?ÿ¼„™B¤Ç{Lí²Ë.jë­·V>úh"ÏsÏ=WsÎ9ê€P7ÜpC"î /gÌ .¸@ÊÔüqµß~ûÉäu€¿™Á¥K%t»ì°ÃêÁ,i«wÜQ\®0?-íj®WV À5Dû˜×B»?ÿùÏê·¿ýmâo³/½ô’”ùü£ZmµÕĽO—tÐAêßÿþw$¨¦Ì®¶©¥lYÛ@Ôpä‘GJ™ýâWQW_jÕ䟥 ]÷Äim\K›ÝqÇê˜cކ‹N8A]tÑEÑ>úÂJ+­¤ž{î95cÆ 5lذ’s A]ЧÖ^{íšÛ³–ú€/¾øBí½÷Þrk $¿øâ‹KŽM£²Ô—îÖãÆÁ—¾§ÖZk-µë®»–ñ1¶=üðÃr¯ÆÄ|Ì÷š-üÂxàr¹Øxãeü‚ c¸Übœ:lÓM7•pˆÍ2n¹å–j«­¶J­zwB( Y­õ¨µ¹ß~ûí2^h0Ž£¾ÕðßÿþWMš4)Ú¿å–[¢íC9D„á&ÈeÓmV¶¬×0¬õócñÓO?-eÐôë×Om¿ýö2ÆÛ¢>ˆÑþ÷¿ÿEa8Ø}÷Ýå\äÁ=>ä±ï¾û–ôË/¿\D‰x&»®)><€vC^ºjÇÎA‹-&Kž !‹ÿk‘£Þ‹DÊpˆmc8nõÍ·“õ‹ . $áüèiGÇPü*©ƒõTKûF¢I¯(”tŠ ËˆµÀ1ªDä 饻=ú¶øÑ ÅŽ^ìöè'‘"j´D’©n…Ø Ò—ß¯ÖâúXnßÞö9þB!„B!„B!¤ÃüêW¿R×^{­ºé¦›D”1 Ë}ÂDL¾¾âŠ+ԻᆱŽ?þx@Р]‘à|áÓÁ,Â"àwúé§‹.€#FŒ(Iû¡‡’ épGÛl³ÍÄͯšüá\|àÚ£¸§žzJÜ¢L—L*xa½)ȸÿþûe½óÎ;w¨M'Ož¬¶Ûn;©Ë™gž©>úè#èa{½õÖ«:}sâ»YFÇ0Ùmš–v5í—8ÚáC,0jÔ(™(1`»ÁIBRˆ (ƒ(âGˆHþö·¿‰Èù±µÌvÛÔZ¶jÛ\rÉ%j̘1j¯½öaoŸ>}DÌ`‹mE5ùgiC×}W©ÿei3a1¾@Ä¡5D#p½á8…s‘¯ œ³ôýíb¬BÙ:âYk@¾¸·á¹É&›ÈXaæu×]çÛÒÆ¨,õ%¤»÷ÀÛn»MYw𑈹0&ÀEïö3NÍ ãžÃÇàÎ ÁØ5×\£Ž>úèH0ÁÆˆÇ PÃÂH¤ç>žÿú寲·ÞzK=ûì³r,Îùî»ïä¢:8զ݋3‘žËE;K=j)òÅsXÀ8a)Dy0.VÊç.ÞSp]6Úh#›0oÇvYÆuD~(DtåÊVKÝ]ïû쓸/ྠ¡˜v?à”·ÓN;©•W^Y šƒë,Df‹.ºhUB.¸¿âùoŠ2Q&¼3à/'„Äx‰1À$k=j)Dn ¢~ø¨\õ¸÷·Y³fU¬7\± -¼á’Ö^¸ŽÕ”­ÖºÛd­®7ÒÆÇ;~ó›ßD} Ï” €S$’Ìê÷Ñg ¦×Ï <‹pÜb‹-ÖáþrBX÷°Î:ëˆ ñí·ß–ç.ž)6ˆ=áâŒv‚¨~Ùe—mJÊÒ!¤ZØ„B!„Î hpú®¿£DBB¯zA¤Ë 2:®Ä!Ò³â“Ί¥bG•*‚ô,a¥Tjq_Kt\é±-^QnÙí"L/^ZŒóp¬>®%Z¼*û¼ä¹:/YT|N|œÎ»XÇèܰ uÙûž¹mºpzé®^àzF†‘ë£%vT¦Ë£2Ä¥®‘ˆ1<Övt9<ºÜ #çGÓ ²PAJÜ óeö]"ÇJKŽ!„B!„B!„Bê Üåžxâ )}ÿý÷"‚ ®rYXd‘E"H &XLòvQòÊ"Æ—ÈH ¯´Ë#xì±ÇÄR #!Ó@d†8}^GÀ$tST ÈpClTÚ3fÌhX ü¿ÿ÷ÿJœí0¡Ûm·UsæÌq˜ „ˆ¦`ò>Äèo'Nì´²eiOlœ›:ƒzç_ë}Wm›A`lw-ˆ”ËõW}?Û•Bf8bAtG8¢DsÀ¦£÷„âàÈ#ŒD’»îº+ÓUM} 鎠O›"H†÷ƒ¶¶¶Ä}a!€ØÍ"0áš"0` Ö ´p`­¶Œ¶3¥‰¥¥‡DˆÜàôg:M×RZÊqL?×´HhwÌF\ÇjÊVkÝm²ÖïÅ_”õÆo‰ 5p?DŸ@3Eæê÷[,ëšæÜ›µÝ´Ònhµàù«û.‰ÍìO„ÒHèI!„Bi:Ú90= >x™â½ ±^ª 2á8i‰,M×Be¸<êƒÓDѹ†³cÒõÑKä‰U©ûcÂIÒ8ÞLËY— -×2ðâHdh¸@b+ˆ\"CÈHí ÷Ç¢hjB_Ú©x¦¸>F×#(m_ÃϱXï Ý 2ÏÑâF ‚ýX ©E‹Ñbˆ$ã¸P i¸=~Òý10\ #÷Gc]p®Q¢J"ÇBÆm{!„Bé\ðµåwÞyG-¼ðÂjñÅgƒB!„ðýªáuã;(!„487Â9çñÇW§œrŠzî¹çÄÅÅá™…o¾ùF„• Àiñ“O>‘pLHwW'íÖ“=©Û%¾„ e‡ØmøÛÔÃ?,a矾8Ž?^uÖYr<¶Ë1.+¶@èg˜qtÅ´Ë¡íÒऄöu¹ÚÁíјàöá‡vZÙ²´ç 'œ ÆŽ«N<ñDé/pÜu×]UŸ>}šrÖ;ÿZï»jÛlèС²†3«vnp^¶@R p %á àäUˆ maL=î)-dFŸ²Y`dm KËQÕÔ—îHÚ½Á0Þ¾ýöÛ(L‹…á”§Ejš~ø!qüLp¬)†Äû¨ÖÑÏUF}ë|mPF°æšk–Äe­G-åÐçë1ÃÄ6ò:ê²Á-²£uO{§«¶~z|v¯Ç]üÁêsù{W»Á©<-N‹ê[[[›ÚŸ!¤‘PI!„BiÚu/)LóTãý!Óñª /q~LÚnæy gIû¯fd™cŒ´tfI“ñIa§çª®C@YþÚ‘®P§ë C_èE±â¨(N‘¡ 2TRBé‡y·„ úf;©¤ 2j üu ¬íÈÙQy†Ë£±ø–û£é©ÅŽÚ²à9ÓÕÑ7ÄŽax!<¿PH®]âHÈ,lLs}t…ù¨!„Òé|öÙgê–[nWˆ8€ B!„Â÷«†×ï „Ò\¶Új+õÌ3ψòÊ+¯T§vZÕBH!Òúç?ÿ)“äçw^-äóù(¾žè ß®Éà\ÀñçÙgŸ× 8ðÀp­µÖ’g Ü/Ÿ|òIyÎ SîœFÍs|)ÞÖŒþD! }—fB!„BšŽþ}•W!L‡{å© ϱ¥ªvD´+…%¥%ñ c%¤Ó²œÒ<ß(G¢žžçC*U¹A‚8 ó:Év„‘ž!„ Ad ZÂí ö‹T~(†T¡8Ò‹Ü ÓD± Öt‡L–3,_(lŒ k,‘È1\üÀíé;FÛ†Q»>ú¡à± K—BɺèÙ1d®BXc!„B!„B!„BšþžñÒ˜1cÔçŸ.âŸrŽfš³Ï>[]tÑEj›m¶”vûßÿþ'J®ÉÞA‹‘>øàgüî»ï.BÈûï¿_Dro¿ý¶:ùä“%nĈjâĉêÁTÆ — _ÿú×#¥0dÈé ³fͱ‰Í§Ÿ~*ëå–[®ê4‘èˆp¥QeKcíµ×1ßôéÓÕ¹çž+…;S죦³óÏî;°âŠ+ªË.»LŒ{íµ—ºôÒKSņZØj‹ q¯âãõ8YÖƒEYDÖÕŠj+QåêKHOC;Ò™.¬ -´ˆ!nNsö3ÑÀ¸!ˆÄ‚qe•UVQ;î¸cÃD_xøþûïÅÉbH›¬õ¨—°¯+P¯ºg­ž¸&èW.g_-¬ÔCÒã,åž#ú}µP(të6%„΂BHB!„BHçaˆõ®Kh$µË_‰pÒ‹-+'_–¬Îɰ2n® 7HÏá6Y»Ò:F7‘Ci‹ízš2Ѳ¢HÇ5ô"HeHcQd‰"ƒÄ¶9A†gZô®½¤Rw »¾%úG‡ðQVÚíÑÜŽÖ±ÛcŠá…47H//Æ.Žö¾¹.8Öë<-†¬^ÜX­ø1g…B!„B!„B!„4~ýúÉþµøèÉá˜DnsÏ=÷ÈŽÖƒrùiá—Ùì¶ÛnjôèÑ"ÈÒÉ·Ûn;YC‰¸ñãÇ‹Àì±Ç=î:–k¿,ÀE‚V¸pÁUÓábˆáæg ßð·I}ÎòË/ß¡2w¤lµ‘ïõ×_¯æÎ«î¸ãõÄO¨}÷Ý·i×µRþõºîáßÿþ·Z}õÕÕ /¼ bÔÙ³g˵®äÄ¥ïg[Ø Ñ'8ðÀ;­NË,³Œˆ«áÌi;ZºÆ¡JcT¹úÒ]0 Ц(c>œa)LÃû„Âp¾«FðõÒK/‰Þá‡.B7ä…ôô;K£xå•Wd:¸ÈZZ€‹2òÀ˜a@@;f}70Ý9k¥^uÏZ?ä‹|¸¸ÑMž„l»í¶V§½÷Þ[Öÿú׿b˜×_]ýâ¿móïà•Æ¨´úÒÁ=~×]w…ó(âçîàÎl:÷âY žyæ§ ž¦+Ò„í±Ç“ñBHì÷Ýw «ço¼ñ†¸ô¥9 g­G-h1?žiæx‚ñ‚ø,h¡„v¥^uÏZ?Œ¯ÂtûúããèHš×LçA»YVŒÛ(ÿŒ3d¢L8„£/ãÙ¥A{Ýzë­ ¿‡šÑŸêÝ!ĄބB!„¦¡ï•ì„î~ÚÍÏsP&L…ÂGÏŠªÒ2%ɪðªˆ÷ÒÂ]ž×'œp‚LHßu×]Õk¯½¦Ž9æõꫯÊsû–[n1Ò ‚ sËå7`Àq„ðNS=Úì¾ûîê/ù‹8æAèˆw+ÍN;í$õƒ8î‘®çĹçž+ψ¯´ª;Q®ý²ÔoÏ=÷!á£>*ÂE¸gBH Á/è+ûÛß`8ᘕVZI9R„_wÞy§ûÇ?þ±äï•Ê\ϲe¹WÖYgµÕV[IŸÂ{ÅóÏ?/.…p9D™ÓxóÍ7Õ)§œõ9-üÌž5ÿ¬mØÆŒ†8³ IDAT#÷®±Í²Ë.«.¼ðBgŸÓBÔW ÄHÔBœR‰_þò—Îë}ÜqÇ©-¶Ø¢æ:¡o­ºêª"èÜxã¥?cü¸æškdü„(ˤš1ÊU_Bº3bO›6Mž-»C¤†}Œõ[o½ubÌ_a…äÙ¡!Æ£ðÌÀ˜‡ŸùŽç³°aï,x¦ÛàgÌŸÿüçuuÿ(;~^Å=žöÉZZ€0Ú?Oãch[Œ‹™"ÜÔ—ÏH¼¯=üðÃòû&¤ƒ1­œKbõª{Öú!"Ç÷Þ{Ož78c.úÄép4Ç3Ôìo¸†Bø‡÷?œƒø·ÞzK—û쳇ßU øÁµ×^+nÓëQò³´u-4£?Õ»Bˆ …„B!„æaþÝÑ;†Û‘˜1\Û‚Æ pŸ%—¦hŒÎ¿€rîO¡(/3NûG/í%*jœd¨½Ä!¶2¹&‚ÔõN ½„ø2NÏ+)¾çU®žÝtÚÍS‹ƒ0sìûáÑ-a¸²ˆ¶¼Ò Ò…˜NÃPßKŠ}ËíQ;AúÉu‰³£éþXHwzŒD“–2rÌÇî¦ë#öóyíYHâØ‚è«uvÌUyŒ¹ø—!„tð‡7ü¡qÓM7•‰E=ô„ã…£GîÑuÇÁÇ/“«]l´ÑFj‡vè‘uÇ[á˜`Ö}íµ×–IžçñÆ „Bø~Å÷«ö~…Iq<ð€L”3Ý=0îZ˜˜_m߯XLfüÏþ“¨/œp0¡/m‚)&ŠCÐ0gΜ( ùn¹å–u-'!„ôàÊt÷Ýw« .¸@=õÔSjâĉј qÙ9çœ#“ÄM;ì0œÝ{ï½êÒK/•÷“OšöþëË:¹]°·}¬ã¥¸ßþv`†c»=0Š µ¨Pös*žÏ…kì笵o—Ï…éë’ø0Ÿ´c£²XyéãrmžŠ‹mŽíŽ® ùŠïW„B¾°ŠÉÌøã2¾¤Š¯°âp˜Ø‚¯d÷TàÊ„zã¤ø£(œhðGQ| Åän´COúzª¾Ö¨¾>‹/šc²®5êIì=Yœ@!„ðýŠïW½ùý N ˜0ˆòA,ë·LÇÄv8ô˜.7ºn¨&(šaÚéq˜PjÖ®G¸þv:pÇÄ$Wô89|ûí·213ï¬å$„RŽq`‚5Þ=*90ùå—2Fc²¹-‚`âuŒÙ?Œá˜„m§‹cñÄäëZòÃø7C|@Õ] oLf‡C¥ ž x–¸„lp ]Lއ[N5”«òªV éJ§#i»Ú¯–úiðq<¿!dK^@˜°þúë‹hö@;Cxa\+‘vÍ+õ™jÊVk{¢µ¡O¡\pp¯ˆuð7W»ÿg Ïš­m˜_m›ÁE ‹wÞY>|a¡&¿àÔ Y „…§Ÿ~º©Ï;ï< ÃX×K¼Ûá=¯žtäžB<œÍÑÏ´ïóø»¾yN¹1ÊU_Bº3mmm2ná>€`.s¸Çô»@%ðó ~ÎÃ9›ÂØ~ÅWÈ=‡Ÿélî»ï>CâgÉ 7Ü0*îoóC;®0 ÞLÑž%“ãwGqDÕíP®µ”Ã÷6 í|ð¬Çyxÿî¬þ”Ö!¤ ¤#$!„B!¤yŽ}ÃR‡E‡yE@eì'ÒñÂÍ”tÌs¼jÊæÕWkš^J˜—a‹MAcyd© ¤-¦4SöRÚ°ÒïãKâE—&½5c‡HO>ÞÑŠ!Má¨vƒ„]d´’Q,° û°?Òa.%Î.ÈÈé1Ýݱ`® ±¸2r}Ì«x)LãØ(|rív¬×RàxD!¤Û‚Iú˜—Kä–?„öi@¤A½v¬†rõ)—W5ét$mWûÕR? „ŒÕˆM0ßîµ\óJ}¦Ú²ÕÒž¨ƒ~OÍBšà0kxÖükmôøjÛìwÞ‘uñãÿ¥@ÐlÄÛo¿]Öp‹ÔÜxã’N%7ÈZèÈ=…xÛÝÎõ¾^nŒrÕ—îŒ)¢‚ð ÀÉžIX\|õÕWeÇÍÄ\®ò” ÓØâs8Ì"?Ó¹¶£õ¨¥®xü~¤šçF¥qÌ5Öu¤lÕÔ½µÔ»p}ð" ôÏÁƒWUû¸´²”k·´ßçà÷•~×Óèþ”Ö!¤VZØ„B!„fQt«ö w@{)ÆÅ'˜N‚^òØðü↗8'ZTg웿°ŒÝ·K—RÕ¦£>Éÿ†çØräY‚H—¤r;P*[ˆèÅîšøáÑpÔlñ¢#Ú¨,]$Ü3.澦ë%:uðJ— GIÇZ;žnرóvè$î…Œ6^©à1o ±(26†ÇBFßCæS…ŽV¶[¥R"±º )‚$„ÒsÀ„¹]vÙ¥×Ô_Tø¬ ¾Pð•Øž &옓ôÁRK-¥† "_žÅ×Ð !„Â÷+¾_õ¬÷+LŒsMG²Ô×AL‡hn®ú¢=L$€ˆw=ËI!¤ëaØñÇ/g¼óÎ;ë–.>DðÀ¨ .¸@>NÐÓèéõ# >^1ÿüó«ñãÇ«C=T]wÝuâ†8vìXqrƒn¡¦ëùôéÓÕ«¯¾*¥¦èèæ›o–ûÜå×Ǩ´úBÒ†„ ë­·Þ÷G8ÄÂÉNÃwÝu—š:uªƒìÔ Ü£ÌeùP!„ÒÛ¡#$!„B!¤y„6âïg*µe yœçYŽAPzŒªà©÷=ÃE²ÔÕ0*`œgÅE»A䤘VUå(’$jg³Z<ÏqŽ)r´‚½ØJ2!‚L¦åYBI¥"gH•U&³õŒúɺAÄêiGÈ¢¨Õo¿¨MB´h:@–3北 v€ÔÂÈP éëuèè˜p~4E¾!\4‘°ÑOw}ôó±(2Ÿw»9–s{, ãí(\q¿c"ȶ ë<Ç!B!Ý|¡µÜWG{&€7ß|3ñuZ|ÙØø{i. ˜\þé§ŸF_u'„B߯ø~ÕóÞ¯à´1cÆ õÝwß©~øAÍ™3G …B‡ë ÇÏO>ùÄY_—K‚vE@9QNB!݃Q£F‰ oܸqê¸ãŽ«KšxƒXkï½÷î‘mÖÓëG:¸ºÃ‰{ôèÑâpŽÅ|<æ˜cÔé§Ÿžx¿Ãý ÁÑÉ'ŸœH "Jˆ ³:võ1*­¾„ôß@ýÈ#ˆ˜‹}`o±Å59#¦±Ç{È}šÅiBéíPI!„Bi",ô•òloú„ˆÑIf´ •C8i–ˆ$8¯è `Qèã ¡e|ª`‰1ÅŽqˆ$­¤Ì¯‚Ò‘eÕxÊ®“CÄh‰ “âHÏ:¦TPYš§WÒ6EÑbQé«P©l¤g_ð¤cgà^CL‰´dÅ"·ÇØÕÑr}4ÄŒ±Û£[¸XÐbÅ\ì™:Ǥœ#‚Çœ!†Ì¹ø\¿Ð^èLÂF{]î\Š !„î¾Zþ /¨‰'Š«ÌСCÕ;ï¼#_óÆ—‡áXÓÛГ—0éœB!„ïW=ïýj„ ròù¼L0ÅDv¿ì«s}á‚Y ˜”Úå$„Òµ0`€úì³Ïêšæj«­&KO¥õ[sÍ5å½ïq¤÷·óçŸ^}þùçòa|œïóIÂÕÍæÄO‘ ý!üŒÐǨ´úBÒƒña‡¦¾ÿþ{ùx~nÄý…‚ézÃç!„’ ! !„B!M#0Å„z£œ¨ÑåôX‰ÝÐ>²Œ»¤é ™ëEbÈä•\!q¬§•.Y WâüèW¡,0>ÆKË_eI&ÒnqÚ^¨R‹ MH-€t»CºJBÈbý#¤åYz±õù¡¤Ãù±(|4·ÃÅ?–u},<†‹W(uw4±ˆ1Þ·îŽZ ™ÏâÈ\q_“Ï‚IAf6ºÖt‚$„Bzo½õ–¬áL4iÒ$YÀðáÃÕN;í”:)»'£x´›!„B߯zÎû„ Ï>û¬L>1bDäÚ‰IÝÿú׿êZ_LdíÊå$„Òõh„‚t ˆÝ6ß|s6D/gñÅ—¥øyOÚcTO¯/!d…B!] ! !„B!M#ÐÖ~Ûý@ &s¼¢Â­%%.Õý1mßÎË©ˆŒã”âpŒt¹B–¸K&ŒÅŽ‘:.0ó3ŠUVáhˆ4½,V‘^å`-Žô ËG[ ¤áÙÖÖª¾ýê+ù2ŒøãÊ€A‹¨>ýúNFZÖeÑ‚G\þä:°¥ ‘ $¶c—ÇÈõÑáîXNèX(¦¹8º¡2gˆsñ¹ú‰Ä‘ÕŠ Ë ³C$!„Ò™2eŠLâÁˆçÌ™#“¶ äü¢yO_\në|ð¬Ñ„B!|¿êYïWpæ;ï¼³ˆU;Z_84ÚâÖ™3gv¸¾õ,'!„B!„B!„êhaB!„Bš…¸ùùá:Úöâ}Cç\$c ÷SÓó­ó}+¯„Ë`Ì?‘Uo[΄z?Q´Àul`¥iÔÇ\‡;‰¼SÊ•8I'¸¾L\ˆ—fFÆâEÃ-R¶Ã8ˆsmmjö§Ÿª¹?þ¨üB¡½Í}Õ:wn{Ø'*ßÖj ½’B¢½|k: dÁ7ÂüâZ–BPêöè=Fļ!ZŒDŒñv$\tó¦#d•"H¬s¹r"H×â=¦ãZ(‚$„Bºÿ;t fÍš¥üqõÉ'ŸÈD}ìcÒ~OçÓö÷Ê;ï¼S&¯k^~ùeõõ×_‹ãÎOúSvB!„ðýª‡½_ð ½v¾üòËÄ5{á…jªï]wÝ•¨/„°ß~ûm‡ë[ÏrB!„B!„B©:BB!„BšFdÈ9;êmúь³]!Ûãü½Ç%$uA—êé©@¥9C:.ŽiqžNÃ%+4ÒJÒ³ó¶]!Í4c·ÊÈäÒ+Óàž§¼J×Äpst‘HÁ3B ·H]•o¾ü21±(Î'—ÈE—žDùc —ÒW±dÑí±} eß‘¦RÂÛ*ä qaäèX*hŒ] ×Ç,"È* Kó5Dá±¾L˜Êâð˜%®ÀA‡Béìºë®ê†nPO<ñDIÜ /¬¶ß~{µÊ*«ôȺéiÚ´i"PXyå•Õwß}'û`ë­·fç „B߯zàûÕ°aÃÔ矮xàY÷ïß_½öÚk"j´+1ß|ó©×_]]vÙejøðá"€D}áˆÙÑúÖ³œ„B!„B!„BªƒBHB!„BHÓ‚ÁcÄѹaz%ù¨„¯(&¬Ÿ25 ¥¢8Ï%†ŒHIß\Çlj¬ÓP&òVvÒ¥åJÕPZ{®##ÇHCH™Ëµ¥^ÿ|¨ØéÑt{L:&\Û·óá~^o‚„ 13VAæÒ mñc"2bÊÒ4\y×¾¡b#mag'„Bz}úôI¬{ S§N•Ml¹å–jèСíï[95wî\õþûï«—^zIn]tQµÄKô˜:ë‰ã«­¶š˜l4ïB «¶ö±@̘ÓkYüö}?>bd¾Aâ¸\.Hˆs¡(1g  b>HFHSÈhˆ s–¨1!ˆ4Ä”Qºm^"ÏHhi 0#%¾o ӄ޵ÆB!¤§±ÔRK‰ûÌ[o½¥î»ï>õÊ+¯¨÷ß_½þúëê®»îRS§NUK.¹¤Za…ØX„B!|¿"„Bº<˜` ÷åÛo¿]ž½ï½÷^ª>ºpÁˆˆ¨+ðøã«óÏ?_Þw:ˆ¿vÛm7uâ‰'ò²¾½’zÝͺ^…BA3F]}õÕ¼x¤Kñù矫[n¹EM˜0¡K”nO=õ”üŽ¡Þ¼óÎ;jΜ9jå•W–ít6p§¼æškÔwÜÑã®c½ëGºç}Ùý²‘c!=¾lB!„BH³€`Îó!Šk_ZÚ—ö-¨ŠŸi*Ï —/vGzFœ+=ÙÖqEÁ`‹ —¬¼°A1àx<„™’„kc_ëX<+/\i„õ@D¥ã‹Òþ_qî3 Ë%éé‡e œ=—Ã.—çÊÛˆ‹¶ÃºzFsêfðŒ:HŽ}ú©ƒ—Ps¾þJr9_öé×Oõ_h *´Ç ~Q4é—|\ëí¼lû"¸„82 o(äcqc!».æÛà ‘ cûÆÂ8×9H+—3Îk+éÉ1‰ýø¼¼qÄŽQºQ¾ÆvL;A¦­kËsp!„Bz( Pûï¿¿zä‘GÔK/½$‹øÞ`ƒ Ô–[nét¦!„B!|¿"„Bº>ø :öØcK&ún±Å2é»&â4ó™\üþóŸÿ¨Ñ£G«¡C‡ª3ftz›_|ñŲþå/ÉØM¯!ëKy½ÒƽvØA-¼ðÂ"„„«,!ÄÍ›o¾)¿_ÀýrüñÇ×5m|¸ ¬¹æšÝ¶}àjéú:é]4»Tº/»R¿läÂ>Cz:BB!„BšF U$Bô!òƒ(Ò…„Z$é‡ÀC çÅâ;¯%ö¢G?:ß Ï¬ôÂ8ˆü|U'ùÅq~(',æ$EŽZôhˆ ÃZÆÛ–è1Ði(C ˜=ši”¦Y*VÔéÛV¹ªDÊQZØh$ã0e Ã0¯ä Ý&ûôUó-2Øp« …¡Ò×ÂG‡ R‡åÃí¼Ÿ,±bŠØ1_NØXY™·Ä”Na¤)À”4’ÂHó¿PP;¦SàÀB!„ôp–]vYu衇ªï¿ÿ^}÷ÝwªµµU&ð8"„Báûq0dÈuÈ!‡¨…ZˆA!]¸¡9R!!†ÜtÓMÅy硇Rwß}·šCzüœ!!„B!¤iøpm,xí?°zâ)K ¢mß÷”ß'Çé%ÐÛ^¸¨äR(ç×W0ãŠÛ!àóEÈÝRöÀÜ.î›Nˆ±0ÞŽS¥qѶWˆ„„Žc ¡a!<Öçû¾*q_ÔÛÚ­±(BôÛ×¾Êé¸ö%g,mXÚÃÒª—¼±/¤Æµ¥-mJµÍõT[«W²Îé}sikoÕç¨öíp_ÖÅE›ÜmÓÇ…Œ:^¶Ã}“k‹A“áÅí¢h±Õ±ÌMYWs¬^!„B!„B!„BºS§N•õšk®YLp‰|ë­·D fΜ)ûp`ÇdΜ9ê±ÇS·Ýv›8–Íš5Ë™7Î{å•WÔ×_-û^Þzë­R¦JùÍž=[½ôÒKQ¼ Ü¥µÎëÿûŸ¸1¦ÿä“OŠSã|P|òM‹ûæ›oJâ dBÜ—_~…iL='m IDAT±Ô/~ñ‹†åQ+˜ØüüóÏ‹ÝçŸÞáôìkŒ Ôºíz¢Ÿ@$úì³Ï:`i×ÐΣP(¨I“&¥Ö¡­­M½üòËêý÷ß/‰Ã¹Hëã?îÔº¦Õ7k]Ñ'*Ý'h 8²g!Ëý×ì~ul¨×=PþYí8»Í6Û¨Aƒ©;ï¼Sú3!]ÜKï¼óŽzóÍ7SïG8-¢Ïãxט`‚ñõ£>×TœóÅ_D‹¾'0ï÷#ÞI4ûôÓO£q÷ÒÁ=Ví8ˆû¬±Æ5×££å@ºp¥Ä‚´ªeC¡ýÆ%ìcœMKeûðë*[–kXÏú¡¯}òÉ'ÒЮՈ´Ð/Ð?‘Æ^»ÎH'­¾(—]?ûšb¬×ma÷¤‹²â}³RYÑŸjé8m캗uœë}F_|t%í¾lt¿4ß9P«]uZY³\›zµ™îº\ˆ÷Ýw%ßjê^Íýa÷!¼K½öÚkÑ{J-׈ônèI!„Biø½¹éYt? ªv‡'E/tKôëÃ)ÑJ¯EÎ óÓΉc¼dzÚ1±Åp{ ÷C?Ȩ í¼ù>Æõ¬ä©”ÃÝ1鿍Ì}í<9?†.’^ìö¨”QÃá±$>L xL±$Q|òW¥¿¤0~Y!×7‰Ún±˜Óe¦:CæCgGíÐh¸<ÚëÈå1ŸtgtÅÛnޱÓdíçØ.“~¡ êãþèZó‹W„B!„B!„Bé^h§Þ±cǪ‘#G¦÷ÿ÷ê²Ë.‹öwÛm·hû‰'žW²¯¾úJwÜq"f4'÷ïß_=:á8 ¦L™¢Ö_}uÒI'©!C†¨ßýîwò·­ª<°l~ÿýïÕÿøGõÛßþ6qœæ¨£ŽR7Þx£ºá†ÔåµÃ;ˆIÁÄÉ'Ÿ¬.½ôÒÄDÞ7ÞXê¡ÛÂÎ]vÙEm½õÖêÑGMäuî¹çªsÎ9GòA~&^x¡¸m^pÁROä‘Xe•UÇÖ+ZyüñÇÕ~ûí'¡þæyÆgÈR+æ5Þn»íÄYG Ûæ›o>uÅWȵþË_þ¢N;í´h’÷K,¡î»ï>µÁ”¤e_C3wÜQòз]u˜1c†¤ '¯W_}U­¸âŠQÜ©§žªþüç?KYþô§?uZ]Ó꛵®ÿûßÕ™gž©Ž8âõÏþ³¤Ì¸Пp¯üêW¿ªº®H·Úû‚½föë,eC¾õºêÑ?«gAß¾}ÕJ+­¤ž{î9éÏÆ ãÃŒt)ðaƒqãÆE"ôõµÖZKíºë®%.ŠŒ=üðÃòl4ŸÃK/½´Ú{ï½åÀKxàRÅIx~ã>„ðçÊ+¯·V}¯ë°M7ÝTÂ!&6˸å–[ª­¶Ú*µ^;Aø·ìŸüä'5×£Ör@Puûí·Ë}¯Áûê[ xw‚[sË-·DÛ‡rˆ:thâx䃲é¶N+[Ök˜FÖúA †qûé§Ÿ–2húõë§¶ß~{{mçmˆËÐ \×à˜UW]Uí¾ûîr.ò¿þúëÕðáÃÕ¾ûî[Ò.¿üryvyä‘ÎkºüòËK»i!ÒÄsyæ™gäY¨Åh .¸ †˜í©§žç Di`24€ø “ŽÍ‰È˜T vÞygY£.pQÁù -´P¢,õÊ£&Ož,â=´?„sp‚ Ûë­·^‡Ò˜´}ÑEÉ5ÞgŸ}dRö5×\£?üp\wÝu±Ú×â8ý˜×Öu Ív@˜Ä¾‘VˆÆN<ñDuÞyç©£>Zú€(çcÂ:‘]¡®iõ­¶®Ý! ÂÜ'%›÷ îYLr·ï“JÀÍ´Úûy6³_g)[#îŽôÏjÆYÍRK-)B’®uá ñú->’ÁœXqïáýÀãžµø8îŒp.ƒ` c'Æi¼Ÿˆuðá<«1nb a$ÒÇ8ƒç¨þ€A¹{N«päű8±£ ZrÉ%Sï)¼« =—‹v–zÔRä qDQ' ,…(®¾x‡«”"-Œw¸.m´‘¼‹`N„}öuD~(fåÊVKÝ]cgÖú!_ˆnñ¾¹îºëª…^XúÚtüøñ"D„@ÎÂ481âYöÀø g=ô£ÕW_]Dæë‘òGúW"/ô}<ëñ~ GQ„ÿìg?“²£Ýð¼:öØcÏÀFö§,ý@ƒú@@7m|È÷²Κï1̧óeæG'ìökT¿´A»C‰kqÈloSÑÞhS; ûZW{mêÝf(7ê ¡-Ú ÷Æ>ô)¼Ëí´ÓN¾?P.¸`¢ìhc\Zû éÝPI!„Bi¾ï¹K„ +8G*ã¼Ä9*Rÿ‰3c‰ 26æyU "ãx¿¥¨Ô.7Ç‘c‰³£ŠÝµ³£R©¢HeŠÃx¥ã ±£éöéOnA$¬Ô”s Œ--„ LÈð—3‘#¤2¾EjA¤J8AŠè1ZT²(ŽL #µ¨1!f,»ŽÓ°Ý«qL:@&‘âZY@m;âôXiM!„B!„B!„Ò=ð‚0¿ ЂÃÊa‡&.‰páÑÀ= \t ¼û‡-Ì &›.I›o¾¹L4†È®w.!$&ðî¿ÿþâÒfN.—&óbR.Î…¸ Î&ˆª01âF—ÜM01eF½5˜Ü‹ ÎL]}õÕâø3ÿüóK[áx¸¤@@ 0“‚ÁìÙ³Å-çëý^xA&»cò5Ày EL&õÊ£à\7ÏK.¹$ ƒ`í÷¿ÿ½Lêî¨rÚ´i’ÜÁo~ó™Ì"úÜ G%qÒÂ… mÇœJ?L¯¶çB¨ƒvÆDqˆçÐG1e ¦'Ô‚ô8â¾01 ÷ ê^­[—fµÕVËtÿ5³_g-[³îj®Y5ã¬ÆBÒ•Ðâ±4î\uÕUâŠp-BÿÅó¢IÄÍþýÅ_ˆˆ"28—álÁÏÊ+¯,aÍA¸‘Ù¢‹.Z•(®¬oLQ&Ê„1 »rBH¼§@0g’µµ”â(ˆQ¿ƒ:HVú] Ï–Y³fU¬7\± -ŒÁk¯½vj{á:VS¶Zën“µ~¸ÞHïÀsV÷)<÷P¶1cÆÈ‡ ƒ`àã&¢Ï@x®…pgqÜb‹-ÖáþrBð®Ÿ=묳Ž+¢^¦‘åÚÔ»Íp}ñsÕ¶Ûn›[Ñב'ÆV]öZÁø—Jóg²Zú éÝ´° !„B!ÍBÄp¾.*¹°¤Äéó_0ŽÎ5Î+(ë˜Òó X|¬½¸<e-ž±$ãäü‚.º储ÝUìx¨Ã}-T 7D?H[ðÍt Ñ ¹Ý¾ÎáövÞ׋ŸØÏéuÛ~1¾}ÉYK›^ ñÒÚ~N«¬­%_õÜB!ÞÏûÅE‡…Ë\ÄåÚ·ç*Õú£—\æzjn¸ÖûmFœì‡‹¹-K«c-‹R9 S²ŸkßG¼¬Ãcr­®óÂð0,ן“k+¦Q€*RÍmÐB$!„B!„B!„Bº?£G‡”M6ÙD}ÿý÷"ÔÁä`¸’e“vM¤“e&y»€ãò2'ÜVà kÚLô¾ŽOã†n5œ[lôk¸»h ôÚ©<öØcâd§Å]0i |Cœ>@l\BÈzåQ ˜Tn ´€vÌ›9sf‡ûÒ×Â@v®+© œ˜È]:̘1#¡#‰à„NPýë_åZC §ÆžT×_ÿúײ¶µn½õVYC R Yî¿f÷ëZƆfÜÕ^³jÐcH–~CH3@ß4E: ïmmm‰þa!€ØÍB#ášB#` Ö p\®¶Œ¶3¥P¥¥áDnpJ3mk©G-å€È@D§E‚@»c6â:VS¶Zën“µ~/¾ø¢¬7Þxã„Ó€3 úú›)‡ À¡Üt¸®¶Sw­íf ðÍv€ÀL‹ Þ{ußÕÎ{ÍêOµ Ý2õG=ôó å×"?íf´Ûaµ.›ºwMôÇðqˆdÍø4²^›z¶ênŠ ĉzl5ßUj¹?>Œ±ë®»fþ™Œ:BB!„BšD„±ûbèôØb8/z–+£vBl ݵCdèæ»8:ܵ3¢gŸg9HFaFy•(“²Ëc.¾g¸PI'I/é ©,·Ç„ƒ£ŠË¢BÇb™ƒÒxó<”I{G†õ):=zQ:Jçïñvª d‘„û£Þ ”±] ‡xSo¦4Ü/ÂPv†ÂUqs„ˆ4\GN·#dÉ~.Þ×Ú9R»<ÚÎyó<íöh¸@ºÜ ]’Õ»@Vãô˜ׯƒB!„B!„B!= 8ŒÀäñÇW§œrŠ8ÈÁ!Åá™…o¾ùF„• ÀÙí“O>‘pL¸uW'Ó¤ZöÛo?q®;v¬ˆÚ0q.+pž4hÓ}Ò“œ†àVi¢‡L‘DKh¶‚ ø7¿‡~XÂÎ?ÿ|uÏ=÷ˆËÞYg%Çcì±ÇQz’¶K0Z¯Ÿâë \PPÞG}TM˜0A&4CÜ‘œ÷*MäýðÃe w×±pîÑN~á"ôì³ÏŠÓ \}yäµÖZk‰ƒ&5Ÿ|òIõÙgŸ©Áƒ‹«&#\£'1§‰ê‘G½èÛ·oC®›F»1¹\gÊÅÕR‡4´À±¯QâˆÎ¬+îA8+ ñî»ïV|°1an‘µ h²ÜÍîךuTêŸåÐcD„tðNÌ"háî}W_ÆØ¡]ÖÀú믯&Mš$ãÄãC‡G3ˆšpß§¹-g§]À¡Žq;†^Ÿµµ”cΜ9‰vì tÙÌ1±^uÏZ?¯Úh¡)ŽÓH;§Qèç»ë9ï kFª|´÷!¼ƒÈ÷ß!Cä£K/½´¸¿ãZBœ¡ÚáÍî—ià£Ë.»¬|èåƒPâCŒKë®»nÅgjÖkÓŒ6+7¶f¹?©ë;.›€B!„Ò, †K¸@šÅ–0.HD†çó¼ Å3ÜSÜ%-Q£*‰ ã}1m”¼qLK/ÇûA,zl‰E‘ÊL¢H[P„ÊÅèXù-IRYIø(›¦k¤2”)„ŒE’‰ð ˜¿²Ñ¿¿Iˆ•ŠD¦ø10„„~Q숵8?jñc´Å•t‚,XâÇB쉸|¸Nì§8C&„çG§Òv†4Òž§ ;*Œ¤ $!„B!„B!„Bzø[Ú9眣ƌ£>ÿüs™¸›æ`hröÙg«‹.ºHm³Í6êÿøGäf¡!”:*ôrQÄN·Þz«ˆn¿ýv ?à€*ž ÑÄÌ™3Õ7Þ(¬j€  b.ˆª Ô‚Ã œç\æ&Nœ¨|ðA™X 7Í̉ÒÚÍ ¢•©L‡$ÄhßÁ­N„•œD»#?B‰¾ŽmÜ/àÀlÚý×ì~Ý‘±¡; …ÜzL!¤«£ÝÒLwÔ…ZHD½#GŽLu.3k€€‚H,x·Xe•UÄݶž¢/ŒWR¯±ÆN!uÖzÔBW.Õ«îYë® ú•ËqW +á¬Ð ç`)çD®ßW ˜ÕÛ´¬´ÒJò‹{";8$n¶Ùf7E¼Wã~ã4® >xШû²VpC q3Þ9¦N…7âÚ4ºÍÐŸí±µ–ûƒzÒÂ& „B!„4 qôµ(.Êt˜^¼’øÈ5a¾unÁun‰éBG‚Ÿ7Óã yûÜØÑ0ìãýH´g.*Ú)CÔç……Zè¨|!P¿¸äÛ!èí Üöíb¸‹Ž‹_–œ,Å´±®´´™KA/~´´Û²ä±.¨Ö|1NÖ¹ö¥-P­­Jµþ詹æòƒ§Zh‰öu|ëáv¸Ž{_/s‹ë¶öu›µ_²=Wo«âñáZ‡·µ—³­5Üo ã[õv{x«>.\‡çø>„Š?†ËÜ Ûs­íJaAB!„B!„B!¤w¡]â0႞®'ÝšÜsÏ=²†#¤Av”rù½öÚKœPî½÷^5{ölYÁ!JO0.œèÀ«¯¾ZuyvÛm7Y#Ÿ‡~X¶·Ûn;YkÄtXÀ{ì‘80@‹˜‘©ŽóÎ;O\ÄŽ=öXé³à·¿ým$ÖéIüüç?1óc=¦¦OŸ.‚C8Á³#d¹ÿšÝ¯;26t6•Æ=s Ñc !]Œ¡pP4ÁǽázLá~¿€»Z5¼ôÒKâ¸vøá‡‹õQG¥N=õTµÏ>û8Å>õâ•W^‘5>êà"k=jaàÀ©ïÚý-ëc:ÈÕJ½êžµ~:_ˆÈ\èp|è@£ûÞG}T¶,L-ëH[wf›6¢@Ôà¢÷B°üòËË¢>‡dùy žý²puÅG0Þxã ¯ PÄG–Yf™†\›zµÞ ²Ž­Yî®tH÷‡BHB!„BHÓ(.Â-Ð×ÃXY°Ä‰åD",Lˆ% Qc¾ôÜ(>oˆóQ¤ï6Æ¢F{)/ŒÌ‹¡Wt —¢P2GŠëXàhƒ <æC1cÞ6êsDY(.ùB¼­-lLìû:<(Š 1dRô¨BÑ£Š„Œsm£Åý …‘ž%Œl‰‘sKÅæbž§—Ra¤J†Gq– ÒÚ·…“X#®»ÈHèh.sUÇ…‘Xç98B!„B!„B!¤Gq\×àBbsþùçËäÚ 7ÜPÍ3ÏŸpåCšpª†¨ûã?–ûû¸ÏÅܹsE0'¸å–[ÎyLÖzÔ\/ÆPóýïwÜqG¦´´Ð¯B»zÕ=ký´{ßsÏ=WrýáDŽþ¡¸yÍtp'6ËŠ÷]”ïúùç&ú2Þ•5h/í¨ÜHšÑŸjíÎÁ¹í‹‰@P¨„)£í öƒ¸UÑiv¿,Þ?à‰~†$är9iójÞj¹6õj³O?ý´dl2eŠúæ›o$sl­åþèJ׈tú² !„B!ÍÂEù™Þ dݾ*®½â} kS¥qúøè\ÏŠoÿÜk Âôq²žÛ¢Œã¼øX?åü(Ì8_…a-^±¼^œ‡y~”‡*ž'ûÊ8N™ùy%aJ©¸Í”JÄ+(×ïH*ýâ¿`+ “äTm{²–ÅÇÚ‹¶}lûáv´ö,ÇOÓéÓ‹®¾!zÕBդ˦Å™a‘À4ÜÏç“áÐ*J\΋ĦQ\¾4<ŸKž›·ÓB9 €U9ki˰_i»ÀAB!„B!„B!=L¾îºëÔõ×_¯6ß|s™Ü‹ ¸O>ù¤zñÅÕ‚ .¨®¼òÊÄ9›l²‰ºí¶ÛÔI'$Îr˜|{ 'È„Û]wÝU½öÚkê˜cŽ‘‰¾ ÜrË-"æAº®¿ƒU¢\~¼î¼óÎh¢<ö«aÇw”2CxµÖZk‰H .y˜0Œúßwß}ꪫ®R{ï½wâ<ˆæþò—¿¨'žxBÄZ˜P­Ùi§¤Í ø€ÞüóÏŸ8ŽU˜à ±(\´ŒMGòùa’ó¶Ûn[RþzÐèô úáa‡&"׿þõ¯ÒOÁßþö7õßÿþW]|ñÅÒ´À¬;×ÕäàƒÑ/D&åõÍrÿ5»_×:6t6ÕŒ{Z°³Î:ëðaFºòN›6MîU ñLÅ>æÊl½õÖ‰93xæÂ- BÃýë_ÒÇád†÷ˆÐŽñ@ ذwŒ!6ptƒû­>¶^ ì7á©¥Åíq•µµ—8´-„Ÿx7AÛB¤ ‘)Â!æªÏñ¾w^ýÎÆo,¢¬¬Ô«îYë‡xˆ¸Þ{ï=5fÌ9ïXèwøØÍñ\1û®!lqáyƒs'>.á, ðÔ¤×^{­¼#âÙ„º,¹ä’™ÚºšÑŸ:ÒP6-Å;-„}fÙñ ÑŽÓóÍî—•@›âÞž:uj´ßÈkS6ÃG"̱î¤ick-÷GW»F¤{CGHB!„BHÓÐŽÚÍÑåô(â6Ã%2±äÃsóñùÉøÐ2Ó¥;Ej§ÇT'Ç‚—îôÇh[EŽùHh§‹ÇraÖmáZ»AæcWHYÚBažqlëöð\Ÿ Ùn÷%,çÉñ®¥­µ¸äÂóÚZ=Ë±è¢Øf9.ºœ[mGÇc·G§‹£vˆŒœ![Rç&Ü[*–C;Ašm¦¤#<ŠO8D·smò}ÓÕ±Ö¥œK$E„B!„B!„BéùÀ9äî»ï×ǧžzJÜÖ.¹ä™t¾çž{ªI“&ÉÄYÈ f‚°áÒK/¡#\sœô=ôP5sæL5zôhDLî… &ݚ艾vxµùi0‘ÓÁFm$“mÒò;v¬:çœsÔ÷߯Î;ï<õ›ßüFüñêÞ{v¬3Ùk¯½"1çLÐn=¡ÞF;Ý?>µÞÍC»Ñ`ru9Ê]´3W\=Ò/§Ã̸´ã³ÖnNO?ý´Ún»íÔ~ûí…/¿üòê´ÓNÁ hY×´sj½^`ÕUW¾OÒêSm}³ÜÍî×õªiÓjÒ©%ýJã\£Þ}÷]R:”3Ò%Ð÷öj«­¦öÝw_K!üÁ‡à"†0ŒC6‡¸â2|ˆÏß|PDÀ¸×µs€nh[n¹¥:è ƒÄÙyäÈ‘j½õÖQ„ÏÚ©L—Ǽw\a‚oÇigÖJî«YêQK9o¤õå—_ª‰'гDz‡xÏ•ž ¨ñ~ñãЮZUKZß·¤¾^ˆë몫3Üs¶³ í6N©›nœoKϨÓR¨cZÌ#×^ï39ÐBHuæ7±sbøYíÎ蹜µƒb™øØiÑå™~¾2Ï·Ü•R¥ÎÆ1Ê™¾vp Jâ¢ôÌð(Ý ŠÖf¼þÉÍ8&Ú/ù Ïñ…ÛÀ+y¿Uáa¥.*v4 c'H×ûNüŽ»@z©n~Áí )ÛyϵÆn‘~¸m;AšN‘±Ëcçr†ÌÛñ9FñýÀ½´U^n¿µwü Å÷+B!„B!„B!„8€ã 1Ñ"•\0I®"˜L çH“9sæˆã &Ã}À1“kítq,\áLg”¬ùL FZ˜œæØT)/¸}ðÁjРA298-]L,Ö“¨M0¡åq•<úè£2zÓM7•ÉõõΓ£‘6&õñ¥åÚùØB­z¦á ê`·5æJA¤ ûO¥´²Ôi#_¤íš¼|qàøÎ¬kZzY¯— „¾W_}µˆž:ê¨÷,÷_³ûu½Æ†JmZïþY͸wöÙg«ÓO?]çpÒU€Ó.žó[!X„cú¾k|s`¬Ä9px4Çh¸­]qÅ""‚Ò.ÎS¦L>ð Ëƒûμ÷]aŒM¦  .€#Ãíìˆ#ލºÊÕ£–r¸âñÞ†Lè´!<Åv1Æ<PFs êHÙª©{%j©êç ΩôèŸ_|ñ… G¿´r¢,èfºie)×nŽ¡í|ð€óðÞÑYý)­”m€ç««ÜÈõJK«\YÕ/Óú€nû´ë_©¬Yú{­m†÷8˜ÂIòÀŒÆV8DºÞ¥j½?*Õµ£}†ô ìËf „B!„4 Ù"A¡ }-†ôJE‹¾)ôJE“f®óÂFϋұØ1L¿PNôè9öqž%tL $mq$âG¥Jöu>z[™Ç%(ýeG`j#EôèEâÇHø¨ŠBGelWBê6ŠpðýX©ãMñ£}c[œ=“¢ÈX cgN/rµâºYP%ÂGÙÏÅÇØ"J-Œ,Pk—Ð1MüX.>MIHB!„B!„B!„ôn(9Õ‚IãX\@°LN;¶£ùL|FÊQ)/¸Á¸a\ >iuÂÄärÀÉ ®.v}ôÑGâÌYÏ< ’‚û&MWC¹vqåSÏôÓÄ1ø»®+.-­,u@ÚåúŠ9©¼3ëš–^Öë¥ÁDýÛn»MDJp3ªGßÉrÿ5»_×kl¨t?×»V3îÝ~ûí²6M é ˜‚ˆi_|ñLçãÇâ⫯¾’uÎxÍ„9®ò” ÓØk€#!ò[c5êVZÊáŠ_l±Åa•>(‘6þ¸Æ Ž”­šºW¢–úáÙ]­(L÷ÏjÞùPû¸´²”k·4¡#Þʉ ›ÑŸÒúA9ÐiíP)¿J¹FôË´>ö>RmY³ô÷Ž´™]î¬ck5÷GAc-}†ô.(„$„B!„4 ÐZ\¢GßrTžáöhˆ}žg ‡;£Wê©b÷IUðnÊ™ŽWš®*#ŽTqx‰@RÅÇ)Uêþ‡ ×GS)mb6f¹š‰U´DN^¸v9AކR»T'\¬­ýHYHîkçG·#d,bô#ñ£±÷,‡ÈR·ÈR÷G‡à1“2«ø±R!„B!„B!„B!ͬã?^tÒIêÎ;ïTÇw\ÝÒ†Ö< .»ì2qö¬7N¿+ÑÓêzóÍ7‹sÑî»ï.Ž@Ý©¾½©ßUbúôéêÕW_g̬â,Bº3øhÄBo½õ–¸?ÂuBaŒkÓ¦MSS§N•càšV/p¯At”åC„BHo‡BHB!„BHÓ(º‘Kc‹)zt8EŠз]“n^x¾ŠpR¢Ê‚!Œ,„ÇᘂC<™Ø7\#-qd”WŠ+dñ<Ïáþ˜<ÇÜ["ÝñÑK¬J;ÑGê‚8>>jgH?t4Ý-!¤=·K…±èÑB¬í‚¹DL—È|,ŽŒ!Ë "âHC™7E’¹â:Ÿ3öóµ kq†$„B!„B!„B!¤s5j”ºà‚ Ô¸qãê*„„› ÜâöÞ{»Ñéw%zZ]¯¾újYtÐAÝ®¾½©ßUc„Y'Ÿ|2RÒ«€³ëþûï¯yäGcÑÀemà 7T[l±EMΈiì±Çr¿eq$„Bz;˜Cœyæ™êŒ3Î`kâ šÞþÏìí@âýp-Ãmó8_…LJ“ÐããÂcŒx?¤çŸ8'(É_‡Åå´¶KÎI¦Ox÷Œ‰îX{éâõz}¬qn1Þ³â '¡ ÔY(™gåi¤Y0óö’å0ÃçzVÒêæ>·¤¾^ˆë몫3Üs¶³ í6N©›nœoKϨÓR¨cZÌ#×^ï39ÐBH9àwߥ‹‹ÄÎŽ‘˜Ðr‹TF|ì©ÅŠ.ÇHåpv ,‡Hå=º’JYn*á"©”ËýQ•q´6½*_\•!ˆÔâGK™tôJ ýt!dôÎVH:Dʾ‹mwȂà Òv†”mCàh‹#£°rBÈ‚v†t‰$qNñùß~„J5–‹«Æ2¾gô⟡ø~E!„B!„B!„Ò%(àjªèIH£y饗d½öÚk³1º1˜G˜ËåDJHo.ß~û­jkkS …ÏRBHOýyáÃ?”qnРAlÒåþ‘Ž„B!„æýàœ·ÅŽ¡kbKèªhˆKÜ" EᣠÝ"½Ð-RŽ Ý½ÐíQL¡¢ç?ºDž!^4Ž+7zwGU"’ŒãÃ2*•pˆ´ÌbH/éóè$ÐÛÂGf !õG/|æÅ޾-†´?Ò`}¢ïÇâG/v†,”ºC wH½­Ý!c7H‡ø±P*vÄ’p‹„Òˆ3> ”/¦‰«A–[ ¼É !„B!„B!„BH—¢ ÒL(€ì`NE¤·³À ÈB!½áç…¡C‡²!H·‚BHB!„BHÓ€(-rs ÝAQìèEGàûÿŸ½ó“¢Èßp­xʉžã!b:Qzˆ1¡böLèYñzЍ§¢Þy ˜1+æ,¢pÐSÌŠ€ Es8$îÎß·vk¶¦§{¦{ÒÎî~ïóÌvowOwuuMׯ«ë«¯ÁɱÎ1ÖÖd8?¦#k}QcM¶èÑ&Å‘¾sd–c¤©ÉKº}“¹_ÜÞ„½©Ã 'ÿæXn+ÈT*ø¶(ÒD6:Az®YN7î€{¶ï.í¬kk#…5iAdÚ².๠¦3¤/€ôÅŽ¾2Ã!²~>•BØèÄA‘c˜è±P!¤B!„B!„B!„B!„B!„åDBH!„B!DÅ@¬Ö(l¬>Ö;<¦B\kêŠ 5üï#Ü kr‰ÓâF!z4õ"Ê㙚ÜN¾3¤[–ž¦½ý˜Tærãí;˲_™ 3† 8?úóNé»@f À±&Ó ²®qyû1cªQè˜ò S¾ð1 ~Ìå?:gÈz—Ç—H't¬m6úÛ…»DÖO3E¾¸1j>‰2¸!„B!„B!„B!„B!„BQn$„B!„BT ë,† çY›¹ÉLzbÆßXh¡Fq¢/ŽÌ^2†$3×Õ„‘Þz·¬Að˜áþèmt{¬©),³] ÝLƒˆÑÍ“‚ ¤ 8?:GÈBHç iEµž(Ò‰ë²ÿ¯óEŽuâÇ´È1ËÒw„Ìt¬s¢GOìèÜ$3D’ì4K¬˜T ™K)¤B!„B!„B!„B!„B!DS!!¤B!„¢b Z 56¸=F99†‰­;dÀű.l;w,c²Ä–&‡ƒ¤ÛÞŸ…•Žô÷LãvéIp» ¹‘Q¢Ç†u)yªq¾þÿ·G'‚Ìrƒ B¦œð±Ñù1í YW¿­s€´bI_øè»Cf8C6Šë.YâG·Ì9C.ðÖû.l÷ÛtÕ#.0™‚Ç(d1bHB!„B!„B!„B!„B!„¢ÒH)„B!„¨ˆØ¬øÐ8!cýòš\¢Ç€kdýöâH_@iL¦{¤Ét¡ôÛø]o1Ùî 3¾À±&LèX¢o¬É˜4ü“ÊΤT㩬™FÇG;ñ§¾dP Y"„L5 }1¤96Îg 0²Qéýß Š sƒôE‘Añc¶3d¶û£›otŒ@#† BÖêÇ+„B!„B!„B!D fúô鿇~0믿¾2C!D“óóÏ?›Ù³g›VXA™!„BxH)„B!„¨ æ7ˆA¢-K S¢Å¼¢Ç´ƒcM@ؘ¹mÆ2Spƒ q‡ô—»ý›åCJ)„B!„B!„B!ŠãôÓO7K/½´8p 2£BŒ9Ò<òÈ#æüóÏ% Y°`éÞ½»|ÿý÷¦mÛ¶ÍöÜS©”yñÅͲË.kÖ]wÝXe2i~ !DSÀ½zÊ”)f©¥–jñ÷*¿ñÆÍܹsÍi§f^¸i%Ô-Ÿþ¹Yl±ÅLûöíUõÛÐù !š !…B!„7¿lÑ£‰pqL®‘&Z i„”ÆŸ¦²”îËÞl¶ëcÚé1•µMýv¡jG±8M*cyZèX“)‚ s‚4Nܘéé¶©«ËB:¡#ËçCÜ ÐÑE:±cÊû?ËÒsƒ s„Ìp|lX²^Œ%z,¥Ò_V§«B!„B!„B!D¡ó|`&MšdÚµkgÖZk-ûii<ÿüóæÂ /4Ç|«¼Î8,6ÌtíÚÕì²Ë.;nmm­=n§N¬è/£G6_}õ•9à€"EÍ¥Ìâl¹ÕV[™.]º˜ &Ä*“IóK!š‚o¾ùÆŒ1¬±ÆæCiòôàÖøÆoX!V©ëD^3gδõgS‹ n¹å³üòË›ãŽ;N…±ÊÊUµý6Ê-õ|…ñ‘R!„BQ1¬°­¦&Kœ˜-zô\!³–G $ÝþEŽ©ìå ÿd»?6$²ÆÓ6z.‘Û˜šLqcMÆ$Âý1©ŒIãÿž2=ß `t˲] l›%€lt}Ì<¦²Ýë§AGÈ€ð1̲¶ÑÒ‰Óîu™âÇFwȳ KüB–K 9O?P!„B!„B!„Bˆ2ƒ Žu¸ù Â:ñÄMÿþýÍB -Ô"Î÷òË/·Ó¿üå/Íú<ʵiÓ&ñ÷žxâ 3hÐ Ó±cG3uêÔŠ¥wÇw´.A7ß|s,aßí·ßn§ýúõkñe6¬L&Í/!„Æ|øá‡f̘1öþI]PJÞyç;íÖ­›2Zå* N¡ÕoäKW¡é.ço¬Tç&„h$„B!„BTŒó³!ë…‡!¢ÇšT„«cMãv¦ñ;¾h1c[î ™!–lø?k½§pÌv ?Çš"È0'È ÁcÃ4=Ér¬ C¦ŽÆfˆ CD‘!nAd£ø1Lô¶,— ²1¤B!„B!„B!„(7C‡µ"+Ü]ºwïnzöìiÖ[o=ëøÞ{ï™GyÄ 0À¬¸âŠf¿ýököçûõ×_[!`‡ì¹6WúôécFe&OžlÖ^{íDßÝ|óÍMß¾}í´’,²È"fÏ=÷´îUcÇŽµ‰Qàrõè£Úr×»wï]f£Êd’üBQ÷ÒuÖYÇNK ¢{Dø‹/¾¸Y}õÕ•Ñ*W–»îºË|üñÇvð…e—]¶jÒ›/]Ť»\¿±R›¢éR!„BQ1¼ʼnÈ€±Áµ1Óá1S$™)|¬‰pôÅÞ1¦AwÇ,Ád`>·ûc*F.ÔdlÇ2cꄌvAM'H“vÌh…Ž”Ißõ²%–Ù\e2n~ !„¨g™e–)‹ãó¤I“ÌüùóÍ&›l"':•«4”‰T*eÔ6_5äKW1é.×o¬Tç&„h:$„B!„BTŒzGHÓè i|¡cˆ@ÒdnIš§Ç,wÈŒíj2¶ :=†:Cšúýy«¼åÁÙð˜©Èvtƒ4™"ÇŒÿÓó5áÈ´ûcM@Ù(„ô—;ÑcZ0Y[?M;BÖf #C ƒî!d­ÕúâÆ¨i!d1ä<ý…B!„B!„B!* "³ã?ÞÎ_{íµæ¯ýkèv8{î¹ËmM˜0ÁtìØÑ,µÔRæý÷ß7o¿ý¶éÚµ«uæ ‚‹‚O?ýÔ¬±ÆÖEfÑEÍØ†ÎÌ'N4+­´’Y~ùå³öÓß’K.™á>S[[k—wêÔÉüá°Ç7nœn¶Ùf¡B8'Üwß}#ófúôév¿³fÍ2믿¾Yk­µ*~}¢Ò€S"‚!á³Ï>3¿ÿýïí{]ªgä»>¬gß+¬°‚ýD]Wò÷7Þ°N‹m´QèuñËûœ6mšÍwÜ+¯¼²i×®ßn»íÌÒK/mxàsã7Z×Ã0n¿ýv;=äCJRfƒüòË/6O~úé'Ó­[·,W£bò>ÿüs»Êûºë®›sÛ\e2n~ !Dµ@}NÝÄ}óü£Yl±ÅrnOÝýÕW_Ù)uu} ¾ùæ[²_î÷Ž%–XÂÞ#H± õŽ[Ï2êNîçé¹Çùå—¶^!îpuT.Þyç;¥N.ô<ŠMuÇ ^(Tì7ýt~÷Ýwö¸Ô}Áúëýí·ßšÿýï6]Ë-·œYxál) çÊq9FØy²ŽØÐÏ7÷öË:ŽEýÊt•UV‰uÝ8_ÒÏ~ƒeÑ­cÿœgØy‘ÄYÁrÅufàöœ?çM<ÆþÂÛà|¾øâ §Æ-wAæÍ›gf̘aãCò’üö÷“/]|?_ºó]ÿ°ßX¡÷‚$å"iž'¹·ð{àXäÛÊiRˆÂR!„BQ1jih=f¹7:¤ ::†;=6ΧÂþ²†iz&0ï 'Óë2þ¯Éún!¤‚†‘©LGÈ0Qd¶ d„ãc„#¤sylFºùF§ÇF·ÈL÷Gçå™!’ Š"ÓN‘u¦Ñù1Ž2(|Œ#„Œ#†B!„B!„B!„MB3„‰tè?ôÐC}÷Í7ß4Ý»w7´‚·¿ÿýï¶S4Œé”ì 4‚´‹.º(ÝièP~ñÅ[Q›ë´ü /˜í·ßÞìµ×^Ö Ðq">>ï¾ûnzù[o½eÓqÞyç™.]ºXw@:1;úõëg†žîOÇoçÃÄitÔ>öØcíwêß§ÕsðÁ›;"×%_Î<óLsõÕW§—ï¾ûîéyò×À|×Ç­ßqÇͨQ£B¯ëN;ídÝ~\~rÎ9çû rÿý÷›N8Ávš㤓N2—]v™çZüéO2¯¼òŠ™:uªY{íµ³¶GäÀ¹pM7ÜpÃ’”YÜO;í43tèP›'Ž=z˜{î¹Ç¬ºêªEåÅ÷ßoöÙgóüóϧ—!´¼üòËCÓ“¯LÆÉ/!„¨Þ?þøãV¤åî—l°Ùm·Ý²\©ïFmïþýa:÷Ñ h‰ÁpF÷qê5„…Çq?¦î·¬gÏžv9âr?[o½µÙf›m"Ï "‚.D`Ø =BÓÁà÷ÝwŸ­Ôïœo’æ!éDèöÌ3ÏØsC7hÐ »-1 uÝK/½dóÀñ»ßýÎôîÝÛÖ¡¾0´SwîÜÙì¿ÿþYùËЈˆØHǶÛnkóþ‰'žHç—«_ûöí›Ó¡“¸áî»ï¶ƒfcÏ;v¬+ˆ?}8¯çž{Ξ ù,WÿùÏÌ«¯¾šÞ~Ĉéyj` Οëíò>N¹óA,KÌÆ ~~ãš½Å[Ø<‚|é¢ äKw¾ëö+ô^¤\ÄÍó$¿IÊñ“O>iã>Û°2!„È„B!„BˆŠ±`ïú˜-`ô]!ÓÓŒm¼ïÏ2(n nã¾ß šlø×GfY;ÆpÌM” d*0“åi§5¡âG·}†8ÒB6¼ŸÌ=:Ác†û£'ŽÌp€¬ÉA6 ³Ý!ÅŽ™BÈlÑcØÿ¥B†‰!ëô£B!„B!„B!„hbp«:çê@ž‹§Ÿ~Ú q­£6Í}p¸Ãí£>ÚvP¦£1¢0„s¸ë}öÙ±—Êá´ÄˆÒ謎ø±Â•W^i;VÓÙ™yøä“O¬÷Hœg‚\qÅfذafï½÷¶½é\þì³ÏÚO¥È—éÈ}à 7Øó9ñÄ­(ƒkt©Êw}¢ò“áéÈç!Ĭƒ6›l²‰éÓ§Oz[Ĩtd<ôÐC¶³:ÂHòw¾Ï±}pr×#LØw×]wÙ´ùn¥*³t€§c>ù‹p–þ?ü°2dˆ‘~ðÁn¥Iò‚4#æ%›o¾¹\ ê¼í¶ÛÌ.»ìšž|e2N~ !D5€Óݽ÷ÞkÅQÔG?þø£"QOp_ET¬»¹ç"ç~ŠáäÉ“Íÿû_sË-·˜þýû§2@xEÃ=›:€)‚.öÏà ˆÀœ=W÷ÑG™—_~ÙnËwp~ñÅ­ ÷â¨{,u)ûCxƒÄ=BÒÁq‰gRÿ#&C F]C}—BòpÊ”)vD„¬ãšúçØ âÆolÝ9ç5räH+ÜCè—„¨¸„t’78_s 8ÒK'â¼wÞ9rŸ¤q&õ1΀~ÏùÁÇlí÷SãAÐÜ¥‘tc¾þúë6_þüç?Ûzœ}ønÛî·Á5$>Æ-w>ä5ùÀqùážÈ ˆ5<‚X´æKe1nºs]ÿ\×+é½ n¹ˆ›çI~“ 4ã7Û¯²/âå`Ì,„ˆ‡„B!„BˆŠ±`¾/j¬ qrô§©å5‘"ɬï“!ˆ¬Ÿ¯ u†Loâ/‹DÆ$•5“í™!‚Ly›782Søh2–§Dþú´à1pƒô…éyÏ!²¶qYýòF¡cª¶Ñ2-”¬­1õƒ]ÃD•BÊýQ!„B!„B!„¢Ú˜4i’â6çCGqßuè,Œ˜ŽŽÄ>t@?ðÀÍwÞ™9Èéo ÜC‰Ã âÈ Ã]wÝÕl¶Ùfæ’K.±É`Çë¤àŽsÆg˜ýë_éeˆ"7ÝtSsóÍ7›óÏ?ßvœþðÃí:', ‚p¾91Ž„8*VŠ|iàœøàè„0œ 2ÎõÉí `E™„z§žzªí@î‹ÿp©Äí󪫮²®L€2D |Óç ûÂ`ŸX–²Ìâ$„’ø¸29èHO‡ùG}Ô–ß*I^ šD˜‚sé˜1c¬°ý’Ò ä+“qòK!ª'|sÎt€pí¦›n²ƒ °Ü‰Ð¸ŸqßE(µß~ûeÜïpÖEôå\Ÿ{<.oÞÖYg» Q "-KË.»l¬Xá÷|_ˆEšp¼ãþKI=ÊýÝ'éy’yˆ 9?{@Ôçâ„öß~ûm¬ëSHr\Îg\3Ê™?hŒp]pÊäüBæKW’tG]ÿRÞ ’77ú IDAT'íI“N ‹ªsŠd€bY!DrR!„B!*Å‚ù5fþoŸìéoëæ™ÌiÃúóS÷=þÿmÛùÞwæÏ«Ÿg]ýúšìù¹©ÿ™XÿÛg^ȇãÌ‹X—ÞfNý'½lŽ·¿9™Ÿ¹ ÛÍõÿŸcÒÿ7.kœŸ;»áÓðÿœÙÞr÷ÌÛÎùµá{¿f.ŸûkýÇíõóæ¤~˃¦¶ö·DšY¿}f6LgEü\63bZè‡ï“‰ …B!„B!„B!ª ×ñ—ξ>×]wÙm·Ý2>tšÇE(B=œ Ã:GÓ9N>ùä,§;ÜYèÌ>sæL++D•¾èà¼ÑF™Y³f™±cÇÚe8PB”è àhØT”2 ¹®O.è¬ï ÿñ*L:5c¹sL ŠqF ÛÞÏÿ0·\~píA8Hg÷R–Y–€P'NÀ-¨Ð¼pâJ„”N tÒðÁCó:_™Ì—_BQ-p¯ò…On÷ìyóæeÜ3x1_L0mÚ´ô2L°ÜrËel‹¸ p֛Ơ¡Eí±bC\ñÂû$=BÒá vr"HpÎŽq)$†Q§ãô GY‚6Ù'×¼T~òËA1‚+[8çÂÅ(nðW‡38FºxœCd0¶)&ýIË]âŠ0çhá’Yj¢®)ï¥&éoÒÅk |!„(9B !„B!*"Å´Ë£ïÙðO¶+¤ñS!î‘Ùî&û£7f˜ûcζoe*d6Ã2sGiçÇTãvÁei·Gç ™ÊÜÆ:8šú©s ºA¦3œ ]$ 3ÝS!nõ®Œ¾ëc˜d¾O>7È(WÈ\ŽuúQ !„B!„B!„BT9tHÆíq•ïzrä‘Gš.]º¤ÿ¿öÚk­8lîܹYûÀ&ÊéÇu6º 9p =ztI:É;— 8â<3}útû¿ëlÒ9N:é$+ž;å”SÌÈ‘#­“ÀÛ´iS±ëRÊ4äº>¹ =À9"9!…7"ÀiÑ9LÁk¯½f§a"DÖÞ‰9ä’—Y„ €ƒÖc=–±Î9jņIò 1(ÛAÚµkg§A1A¾2™/¿„¢ZˆrxC þå—_šŸþ9½Ì‰Àq.ƽÍ7>ÿþK-µ”²­/äsŽ~Aq_’4ºû³;nÒݺuËZ—ô< I‡û¾« ||ad> ÉC–ÝÀýú/,M®Ncð\û*]¶Â@tHýëŽÌ“>¦ Ü€K BH'Þs¢H?®)uúÝõÆ=1 sæÌ±ñÆ/¿übË SÀí³ÔD]ÿr_¯bHú›DÌ˸srÝ\…ò‚3¹"9B !„B!*NŽqþ²FqcMÖ÷2æƒâG§2 ,O/Tý²ßR©¨“´¨±q¾¦qYˆÒB6 3ÅÛ9¤'~´BÆT£à1§’eµû¨o«bç¾Ø18-D¹ âÿ8BȰuB!„B!„B!„¢¹Ð¹sg3~üxÛù×çO=õ”•%uƒqâÃ(qÂÒK/m§®Ãr9pIœ'ÁuÌŽrßÁ=æ•W^1´N•Ï>û¬u¹õÖ[MÏž=+r]ª! a,¼px·ÖãŽ;Î\sÍ5Ö‘óóÏ?7[mµ•¸>üðÃö\pT âò?(î\°`1b„ìµ×^%/³®LâÖ±¿k×®ÖI²Ð¼ s?8±Iò•É\ù%„ͶmÛÚ)Np'„úúë¯Cïm¦œ{0¨Â«¯¾jÆgïá3õ΀¹\uó‘KðTWWgÞ{ï=[gPIz…¤ÃÅ0. ¥”yèÎ{±Å ]ï\ö’ŠüJQ¶Â`PÎ'@êjœœûŠ+®h–YfÓ¡CÃ×Ä ˆ$97–— w½S‘Ý2a;Äz8gÏŸ?߯"œ]ýHú±÷Sm÷‚R“ô7IÙ?âˆ#Òƒ³0 eb=ö°ÎêBˆ„ÏŒÊ!„B!D¥˜??(fLEºC6n“GìæúXã ýïyßõIø.Õ’Õ®ã‹ ÃæSÛ‘¡bÈôòšldƒØ1Ì ²Îw…ô„˃¢G_èXg¢¥Bæs†ŒB !„B!„B!„Bˆæn‰€ð쬳Ίto):—óÍ7ÖiNÆA¾úê+;íÔ©“ºá¥ìí\þ\ç~'P£#|n¸¡í ;Ð\` ûôécÿçœ*A5¤!.O>ù¤®µÖZæê«¯¶ˆ{ï½·:th¨hÐ ƒ‚ÁQ£FÙkvðÁ‡:Y[f)‡¸(Ýyçf“M6)y^Ðq’ˆ{ã”ɨüBˆæ€saóv—Xb +òFôå*èã\¦¹Ï"æãC]³îºëšvÚ©lîm8Κ5ËÖ?aú¤çQ¥–2©£É®m˜s²oºA/À NPJçBÒ,[QàPM]K> r¤®Þb‹-Ò1 ñ×{Ùe—µçµÁT•+àØ±cÍË/¿lãfbB7Ђ¿ë®».ñ€%Õr/(u¹(ä7‰ƒi¿~ýÌwß}g…¸IÞ}÷ݦÿþ9« Ñš‘R!„BQ1ð.1ò¦aj²ÄŠAÁdã¼·­×¸’!Lâò˜¤}&dP«  2Sð˜Ï=Ö4nŸåé "k¢Ý ë²…AçG÷¦Ð1ìSkâ !“ˆ!“¸A§B!„B!„B!„¢¹ƒûÉ!C¬ûÉÅ_l…e¥d5Ö°Î{t(¦3y–ð¡}ûövê’>¹Db€à7A_t‡3Ž;†s t"B',ËÅ:ë¬cn¿ýv3gÎsÿý÷›^xÁì¿ÿþ½F¹ÒàÞé:@S1|øpë¤øÚk¯ÙkG'rò;Ê%ÊÏÿ ¨Á'rÈ!e)³”Iœ"'L˜P!$ÎAˆpô :aF•¹8e2*¿„¢š@ä„;/£.Æi|Qb<êvD\qÄJo½õ–ur;ꨣ¬ÈŽc±¿ßýîwe='QЭ[·ÐõIÏ£p·æÔA×Fç~‡Ræ!çÍ9# «›XîÇv€Ó"8‘d’ó Ö‰[¶¢@‰ÓöG}dÅr~|ˆÒ­sƒƒÅÁÅcåt9À]wÝÕŠ5‹MW9Óä^PH¹È•öb~“ˆK÷ÜsOûL1qâD3uêTÓ¥KÝÜ…HÀBÊ!„B!D¥ÀrÁoŸùó²§éÏüzÁäüŒÏoÛyÛ¤çç†ÿo§sûýí3/kÙoûž›ïÓ¸¿yÁÏoùœ†ÏÜÀÔûÌmX>×ÿ¿a:w¶÷ñ—Ï1 Ó†ý¶ó¿;gvýöóçÕý–¯ Lmí\SWÇHm¿>³*ô™™çÿ°Ïo™.¤B!„B!„B!D‹Nà—]v™?çœsÌ 'œå6D'æŸ~ú© ý;1ÛW\a¾øâ‹Œu8ò½þúëVX°ÝvÛÙeˆÈp[zóÍ7Ó½ÑÚ{ìaçS©Tè±Þ~ûmsàÚŽËŽ›nºÉv„îܹ³ÙtÓMí2è<ëàܹs³öƒH ˆôåëdá…šsÏ=×ÌçÅjÌuaËã¦aùå—·S'Òh*pð¡³8eè7Þ°BHþÿòË/#¿ã„}¾°„röØcÙNë½zõ*K™¥ŒÀ%—\b~üñǬõ”‰bûì³^ýõ÷'OžlöÝwßÐ2œ¯LFå—BTÜ÷|ðA{öëgÜÙùîÐèqz˜Ó!õ¹ïÇ>ôà¹çž³1B+þÿå—_Êv> BðÁغǹWIz…àŒ nòë ê IˆK)óÐ94#ü~ŸA0Ø?I?ßt¶iÓÆ–b‚µ{î¹'çñh!X¶ˆ©ï[†9a;„¤Ø’Á3ˆ=(é›2eŠuŒDÊà qpB>ΣÜñø®ÓäQ’®r¦;ɽ r‘+íI“añª;7V!D|ä)„B!„¨ëÝþd:=Öxó&Òå1ãÿ 7ÉÌ:5‘ÿ„.È&:ûÛ?5ËÒ.ÞjY*{Y†3d˜ÛcƲF×Çú¶·Ó:ošk>Ž d'È$® ò,óç…B!„B!„B!DKwA:Ò4È\}õÕV ˆ{#âA„b/¾øbZ„µÔRK%Ú7Ž*ˆŸ}öY+DDÌØ¡CÛ‘ŸÎÑ‹/¾¸¹òÊ+Ón1øÃÌAdn¹å³í¶ÛšC=Ô: =ú裦{÷î¶Ã~¸¿°OÜ~öÚk/›æxÀîû¼óÎK;Çp :¶üñÇÖ• â‚6ÚÈl³Í6¦oß¾¶Ãüøñã­K!ŽA›o¾yäñé4úé§ÛùwÞ9-¼Ìµ.ly’40ï½÷šZá(b€“N:)-N¨Æ 3½{÷¶ùdµÕV³¢C'tà°œ«1¢ÄŠtˆ/G™Ýi§Ìn»ífüqûÊÛšk®iEˆ8b"  ¦7.”ñõÖ[Ï A{ôèaË"þ)ÓtÌ+ùÊd®üBˆjþ¤I“¬ÐŒ{2÷Vþ§¦^¯ñ:qï¥ÎFhˆxœº‹ºœzaËwß}÷´y’qF½~‡vHo[*H;¢)îß¾³OÒó(œ É[D‹ÔQä-õ%Â2–‡ ÀF)ó4 rüôÓOm@©Ï¸öˆÿØ‚ØÆ¿æ‹.º¨Í|¸õÖ[m=LI1B®ó@æ—-ˆ*[¹àZ!£^¥îõĹ–Äˆ.Ù.®S&±í{ï½gFm…|\bÄ•¥„4QFŽi6Þxc›Ÿ:7ZÈ—®r¦;ɽ r‘+íI~“ü¾o¸á¯R†xdÆŒvá8BˆdH)„B!„¨ \›AMM£²aÚ°¸aÆ6z_@™1ã #ý…¡³yIEþ“)v´¢Åô|M¨(2-n4iP䨰¼q*'f =FMã K%„ŒZ–OY§ƒB!„B!„B!D+£ÿþf—]v1gžy¦u•¡ƒüóÏ?o×Ñ9™Žñ‡v˜í4ìpÃéèž‹Q£FYç>:"_sÍ5vYÛ¶m­Ðek¯½vÆöW]u•‹!b¼è¢‹¬XòÈ#4C† ±‘£Ž·É&›˜ÿû¿ÿ3G}´ÝØ¡œŸn@´‡èŒŽä H/ç\Ï8ã ›Nwþn{ÒEZ¢ÀõÎ7Î](ߺ°åIÒ@¾ 2E(:tèPûÝSN9%Öõ‰ZŸë{t²ç\‡“ŽägŸ}¶Ùj«­¬ûØ$>ÜpÀö:;&ÐO>ùÄ þ:vì˜ÞbOpN¢¥.³Ž‡zÈ\|ñÅÖYòßÿþwz9×ÑÆ[lQp^ ¨ -,¹6ÿüç?­¸„´\z饶“}Øþ¢Êd®üBˆjÁ‰»tébPO>ù¤›¹{+÷kîA¶¿ôÒKæå—_6ãÆË¨¹çùu&¢'êË­·ÞÚÞ ©w?! Ǹ!”K/ª[æ@”Åúà:縜Ï7Éy’–QP!<;v¬¥ŒQßëä@ Tyäàƒ¶u/\¼úê«ézQÙ®»îj݃PÏ"lCGœ@ˆÈŸó¸üòË#‡ n³Í63O<ñDºlá$HÙBôÄo\'DƒÁÁBH`Pƒ¨rL#éçº0(™ä¸¢ë±ó¬Y³¬v̘1öûĺGu”p!IºŠIw®õ…Þ ’–‹|ç÷7É÷¶ÜrKKºA'‘,iÊ÷œ#„Ȇ¾À©ÁƒÛF!D6¶Û¹í˜ž2u©à|ª¾›yÃÔ:ôxóþv¶K:ë:¹7n×°·¾.cy*úøßIeß-kLg`>ë;™û¥~ª®Þ¥ÈÍ×OkÒnDõ뙯©_ŸjXæ¶õ¾[¿¾&°ÞÍgîÓí/ó85czû¬õ]“™yÆwkiˆ:·ðïfo*°¼¶ñ|ÃÎ5t¹·,4Bó0˜Ççæ/÷¾Û8ŸD4‘TTQŽO)Ž1ÿ·ó¬BTò¤†ÆˆÆ§_»Ø(ˆL%pÌ~À [^P˜=>6L²\ k²ÄÐ(pLÅüÔå™/FU§"„ŒóY Âßšž¡_ !„B!„B!„"t²ž2eŠuE¡3»ï–ã3sæLÛ?j}\XÈÑñ<_§oÄttpf[·:ëÓÁÚï”Lç{Ü"é(ýôÓO›ÚÚZëDDçû•VZ)tßÔzõêezöìi;[aˆÒ8çOGû8 ( ŸO˜ƒPÔº¨åIÒðÃ?XǶó…’ù®OÔú\ßãºø@Ü{èŒÞ§O+N‚X÷*œ?O8á» qàYge‘Îç8^"–¤ƒ¹Êlœ%§M›f]…¿’äEØzM)Ãn¿¸!\~'W™ Ë/!„¨6æÍ›gë2îqˆípiãþ‰ø)î}—?¾ƒ€ÜwŒ#~Àµû)®ÁApóEȇp ±œK÷nÿ¾¶ÌË¥¯0È¢ÀcŽ9&Q}u…¤#l=ÐÄ9nßÄÌ犭J•‡QàøGÌBºâlÏyÇþöaçA|sã7Za%¸²…CäK,QPYå8ÄYˆâÂÒE\U·çÊê}òëë•òŽØÐC®2•®bÒk}1÷‚¸å"î¹ÅýM’NâiŽÅ6l+„HúG9B !„B!*È<+ö¯'jµ¬aIÉJêü$•5Óðo*à™±‘?Í5Ÿô“Oü˜K™KYk*#„”ë£B!„B!„B!„ˆ¦]»vi÷¾\ärH ab”81Üqíñ‰Ó)™NÒ¸Ðä'$œ…p¨™1c†Ye•U²ö‘Ä]È‘K|µ.jy’4,³Ì2ö“ôúD­Ïõ=®‹âCHe¿´µÐñ|'Ãûî»ÏNqŠtÜyçvqÜ ‹)³AÚ·oo?…”ñ`^„­º…‰.ò•ɰüBˆjÃ!!` .é}œO?þøcκ‘9 ` KO®eŽà€ï¾û®=^Òº%×y’ްõË-·\ìø£Ôy¢Ä$ÂDÎ#XÿÆ9BÊVXìu¬|ùŸ+O¨÷Ãbƒb®wT‚y+ï¢ÒULºs­/æ^´\ä;·¸¿IÒü] ! ¼Ç* „B!„•ƒ­¤ÈLi£u4Îh/«)QÚR9þ›O*€ [<&qƒL*€Œ#†Ì'„Ì'†B!„B!„B!„B"ÃO<Ñ 8Ð<ðÀfÀ€Ê”Á5 Æ‘#Gš#Ž8Âl¹å–Öµé›o¾±y‹ˆÇÎwÜÑnÿþûï› &˜í·ß>C\r÷ÝwÛëæRÕšËdT~ !Dkq8©>úÈ:®ºêªvœÞ&Mšd&Nœh·¡þ)Ü{GuíÚUy(„BÄDBH!„B!Dñ…Æä>F "£—%¥œÈàÿqÜ“ºA&@â%„\ â,„B!„B!„B!DŽ>úhsÑE™Ç\BÈ"èСƒyâ‰'Ì AƒÌðáÃíǃç 'œ`Î:무"ù°ä´ÓNËØÏm·ÝfÅ€qC[K™ŒÊ/!„hM,¹ä’V(?fÌóæ›oÚ·8Dù[mµU,GÁ¸ì±Çöþ›ÄåPy(„¢µC¯áÔàÁƒÍ9眣Ü"Ûý<Å4eêRÁùT}—ó†)ëüy;Û=u)øNÃ6ÞúºŒå©èãg|'•u|·¬1ù¬ïdî·î·„¤êê]—Ü|ý´Þ…©q=ó5õëS ËܶÞwë××Ö»ùÌ}ºýe§&pLoŸµþ±k2Óá/ÏønM QçþݬóM–×6žoع†.÷–…æAhó8âÜüåÞwç PÄu™*ǧǘÿÛyÖN!*ùR3Ã{ñ§¹æÃþZ–$Ê˵,•gYR7ÈB…ùDaõw*f½Wéæ%|1]Н„B!„B!„Bј?¾yå•W¬‹Ñꫯžè»µµµvŠO.3fÌ0¿üò‹ùãÿhE’¸OùЊk¶È"‹(Ãb”Iå—Bd‚ƒáÏ?ÿlæÍ›gþð‡?Øêñ–›‡Ô‹Ó§O·i\zé¥uñ„¢ŠAÿ(9½B!„¢‚Ìm˜#€Ìµ<qÅþ|œi1È\Iq4¨5É„µ*®B!„B!„B!„BüB»-·Ü² ïJ8QZ–_~yûÉEMMD} ʤòK!2i×®ýˆÖ‘‡Ô‹;vÔEBˆf‚„B!„Bˆ 2ÇTV™Š±¬ȰeaÎqÅQ"ȤŽÎQbH7B!„B!„B!„B!„B!„¢úR!„BQAò9BæZg]*Áºbþ|RGÈ:ß 2®#dR1¤Ü…B!„B!„B!„B!„B!DóABH!„B!D™ø?® d¡Ä?úóIÁÿK%„L"€Ì%Š”èQ!„B!„B!„B!„B!„B4$„B!„BT9 Ó¤.IHåYÖTÈ|BȤ"È0¤DB!„B!„B!„B!„B!„¢å!!¤B!„¢‚ „,—2•àÿBþ|Qd]ŒåùÜ ó‰!…B!„B!„B!„¢u2}útóÃ?˜õ×__™!„¢EñóÏ?›Ù³g›VXA™!„Bx,¤,B!„BT޹¦^ é–ÅýÌö>ÁeÁõq?¿æ˜ÿ5°,¸<¸Í¯1?³"¦n~vC>Íûí³ÀH)„B!„B!„B!Dé™;w®yüñÇÍ„ J¶ÏÓO?Ý\|ñÅÊÜ<Œ9ÒuÔQæë¯¿Žµý‚ L÷îÝM=Ìœ9sšõ¹§R)3nÜ83iÒ¤Øå(i~ !Ds‡ûþ|Ð*î{uuuæÆo47Ýt“=ïj¨§>ûì3óí·ßª ê7!„MŽ„B!„Bˆ 2§ÈOP´%ˆÌ7_ˆ2Žø1®à1(ztóN:ßHð(„B!„B!„B!Z*tÜž8q¢¹ÿþû­ ë£>ªŠt!€Ü}÷ÝÍ)§œR’ý=ÿüóæÂ /4Ÿþy³º>¸,^tÑEöÚTŠÚÚZ3lØ0sóÍ7ÇÚ~ôèÑæ«¯¾2}ûö5mÛ¶mveÍgË­¶ÚÊì¿ÿþ±ËQÒüBˆæÎ7ß|cFŒažy晪Hn/¾øbYê•)S¦˜™3gšuÖYÇ,¼ðÂM~®¸SÞrË-¶.úM!DS³°²@!„BQ9‚#qÖäÙ>•p}*d>ײT‚ùB>uy!„B!„B!„B!Z/¸ÖüñV8€û¢O§N̉'žhú÷ïoZ¨0ÄamÚ´©šó½üòËíô/ùKÅ]L^<ñÄfРA¦cÇŽfêÔ©IïŽ;îh–Zj)+ìÃý0·ß~»öëׯIÊZS—£¤ù%„¢´|øá‡f̘1ö^LRJÞyç;íÖ­›2º…‚ëgsˆA„¢‘R!„BQAæ”`qÄþ|pYR1d*ÆòZ]Z!„B!„B!„B!r0tèP+ØÂA©{÷î¦gÏžf½õÖ³îƒï½÷žyä‘G̀̊+®höÛo¿ÄûïÓ§5j”™bþÅ_ܬ¾úêÊèÈ]wÝe>þøc; òË.« Bˆ„H)„B!„¨ … !Sy–ås}Œšæ@ÊÅQ!„B!„B!„Bˆbyê©§¬[RÛ¶mÍu×]gŽ9昬mfΜi.¹äÓ¾}û‚Žñ믿Zw9sæTÅ9?üðÃ6=íjjj*zìbóbÍ5×´bÁJƒã!Â>\s û|ðA+r¤]/+QÖª¥ÅÍ/!„¥g™e–)‹ãó¤I“ÌüùóÍ&›l"ÇÀ ×7•J™ (3„¢$„B!„BT$/ÚR1–%@JÔ(„B!„B!„B!D¥A°vüñÇÛùk¯½Öüõ¯ Ýç£sÏ=7tÝôéÓ­“߬Y³Ìúë¯oÖZk­ô:ÜqÍCüŸ}ö™ùýïoEc8)ýïÿ³Ëþô§?™Å[,kß¿üò‹™2eJÆ>sÁöo¿ý¶ùé§ŸL·nÝ" œpß}÷M|Nùˆún¾¼@P¨n„ ¦cÇŽf©¥–2ï¿ÿ¾=Ÿ®]»Z×D`ö¿Â +Ø[毶¶Ö¼ñÆÖeq£62Ë/¿|Î2Àþ¦M›fŇ81:V^yeÓ®];;¿ÝvÛ™¥—^Ú<ðÀæÆo´®‡aÜ~ûívzÈ!‡”¼¬Å½ÆÅäÇçŸn÷¿Æk˜u×]7çµÎUŽâæ—B´D‘QÏqÿýãÿZÇûà´øÕW_Ù)uÛ’K.¹-Bµo¾ùÆÖ«ì—zñÄKØû-B6¶¡sëYF=L½€ \!± IDATŸºâË/¿´õÓJ+­”®ïrñÎ;ïØ)õ{¡çQl:¨ÿ8uL¡ƒÄÍC?ß}÷=.uh°åzûí·6®#]Ë-·œYxá…CëgŽË1ÂΓu‹.ºhF¾¹ï°_Öq,êj¦«¬²J¬ëæÃ9³¿yóæÙkä\¹f Æ@ÞçÂ9§ql°`çö»ßý.ôÚÿøãV„·ÎORî…¢¹ !¤y` ¥ú”jL›©-±´·iø!„å!•:M™ „B!„B!„B!D+ÑÚ§Ÿ~j;ôz衉¾‹¸áØc5Ç·Ô|°¹ãŽ;ìü™gži®¾úêôºÝwß==ÿ /˜çž{Î <Ø:âáûºóÎ;­X2 :³ŸvÚifèС¶ã¾£Gæž{î1«®ºjzëÇoçƒB·8çTh~äË \ß|óMÓ½{w3pà@+HüûßÿnÓKÇx:߃ÛfÇw4£FÊXÆ÷vÚi'ë‚E' ÿ9çœc?Aî¿ÿ~s 'X¡C't’¹ì²Ëì[§8ˆ8ß$$ÍCÒ‰Pò™gž±ç†(rРAv[âêÍ—^zÉæ¡`ïÞ½m]ì;&“vâÎ;›ý÷ß?+(A ñ”¡ éØvÛmmÞ?ñÄéüruuß¾}ó:t’Ö'Ÿ|ÒÆþ5".Ùk¯½Ìþóóꫯ¦—1"=Ïà Ä0œ+n »îºkÖþGŽi…²ì+J([h¹Bˆæ„„B!„B!„B!„B!„B!„( 8ßÑóu rÅW˜aÆ™½÷ÞÛvˆoÓ¦yöÙgíÇ+ ºo¸áóÉ'Ÿ˜O<ÑŠ8Äé쎒ŽýÜ|œwpÜ£Ã=Î?úè£È´ÐiÑiATI‡ý‡~Ø 2Ä ?øàë"¤QáþðëH”ôœ Í|yáóôÓO›wß}׺ n±Å¶ó}¿ã¼ƒþ-œwÞyfÆŒ梋.²yLÇý>}ú¤·}ë­·ÌAdE=ô½#¯¼òJë|Ä÷9¶®^.Ä}wÝu—MWÐ ²Ø²–ô'ÍÒ¼ýöÛÛ4n¾ùæV¨‰¨ó¶Ûn3»ì²Khzr•£¸ù%„-/¾øÂÜ{ï½VàG݆Cb/êîшÓ}¨ÿ¹#(ç¾L0yòdóßÿþ×ÜrË-¦ÿþigA„yÔWÜÿ©K˜"êcÿ8,#”óEñQu%ñÄË/¿l·å;8 ¿øâ‹Vä†rÔýšz™ý!¼ ’ä< IÇE@ˆ(Xa)ÂAê-êÞ¸’‡8s3`B§Nì:®©Þ_(n¼ñÆÖ=’cp^q¹DØ™„°k¤“¼ÁE›kÀù“nćĊ;ï¼sÎý20îÐ\#âšÄW.ÆbŸ_ýu{ŽþóŸmýÎvÄH8Or|ÒÁ±ˆó¸S’\ã8õ}!åE!š º{ !„B!„B!„B!„B!„¢,Lš4ÉNƒn‹ˆÀèðïC‡l„yN¬ˆXš¹Nó¸âbèØtÓMíç!Ä`8êø¢?:ÌÓÑœÎé¸ú.‰t G°ï¾ûætÆÁÁâ \•ì—Næ>ú¨¹ùæ›ÓîB~ø¡:‘šOœsŠ"ßwóå…ú<ð@ë„é;)åƒÎù °¢LB½SO=Õv¬÷…¸TΟ?ß\uÕUÖI rÝoR&‚éó…}a°ODŽ,KYÖ’^ã¤ù`1I×®]͘1c¬pË$¤=(ÌÈUŽâæ—B´4œð AáÚM7ÝdøXîLÀ=Ñä~ûí—qïÄ¥aJ\º'=Dh묳Ž]†(‘b¶e—]Ö ÖòC0õ‡/Ê$M8RäBR'SWø$=BÒh$çׯ_?ë¸èb Dûß~ûm¬ëSHr\ΧC?&a{έ}ûöæðÃO_WêyÒÎà8E"DpY,ä×–[nizõê•Q¶8ùKÙB‚NÀíÒÅ• HA쫬²Šý×!‰áü¼`ß\ÓéÓ§›?þ8#žá;sçÎ5ë­·^Î4S^„¢¹°²@!„B!„B!„B!„B!„åÀu §#¸Ïu×]gvÛm·ŒæqrÐYp,†Ã;ÌNƒŽF÷Üs†¹ ú ¾Üs‚8q;Ž~øÁNÃlÅœS©òRâ™D *|ÑঠS§NÍXî6ƒÂDœö÷ó,Ì¡%ÜŒ"$(eYKz“æ‡W"¤t"H@ñàƒ†æu®r'¿„¢%Â}ÏAºeÜûçÍ›—qïu"xÄ|A‰Á´iÓÒˉÁrË-—±-b8À¥7nƒÎ”Ntµœ.⊈ Þ'éy’7˜îN ÎÙ1.…ä!¢Aêæ`L‚»"ôèÑ#Ë‘GKöÉ5/Õ`ä—/‚â W¶>û쳜ßwõ;ƒ$ Nœþ5w¼÷Þ{v5ÀE)Ê‹B4ä)„B!„B!„B!„B!„Bˆ²@§ò3fX¡–sÄ#<ÒtéÒ%ýÿµ×^k…f¸Ý8N:é$+V;å”S¬{#îyt”oÓ¦M¢4ÐÿÄO´Ž?ÿü³Ìš5˺ô!. Š‚àÊ8"=öØcëœC’/Dsüƒ‚¼bÏ©Tù¸4âžgÀòË/o§NüàèØ±£’ïÎ ^{í5; $a„‰$X1J¸ZLYKz“æ‡OàZ¤]»vv€ä*GqòK!Z"AAœaù—_~iëy‡”れCžÏ¯¿þšq¯\¤m}!"EŠû’¤ÑÝëÝqƒFèÖ­[Öº¤çQH:Ü÷]½âã #óQH²Ì94û¸z4,M®~dÜ+]¶Â@°É€ 8n2Äf›mf…§8YÇ…xs\¡‰SH"Lþ_l±ÅÒBÆ\Z^„¢¹ !¤B!„B!„B!„B!„B!ÊBçÎÍøñãmÇp\|œ;<õÔSVœæ‹Áp²yå•WÌÀ͘1c̳Ï>k;€ßzë­¦gÏž±Ó@§ü¾}ûš{ï½×<üðÃæÐCµâ<:ƒã™¯ƒÿôéÓ퇰Žú]»vµƒב>¬“y1çTªü(5 /Þõ¸ãŽ3×\sù׿þe>ÿüs³ÕV[™Ñ£GÛkÀ¹à¦ÄåYPܹ`Á3bÄ+àØk¯½J^Ö’^ã¤ù€Â•Ÿä*GùòK!ZmÛ¶µSDc'\ûúë¯Cï“Ü|aY÷îÝÍ«¯¾jÆg~úé'+èÇmzgÀ\½ùÈ%†«««³ŽÔ?ÔeA’žG!é˜9sfF>J)óÐ7À0œãìÙ³+^¶ÂàüŽ8âëpÎ  °Ì2˘=öØÃ:qÇ=ƒGPÈ3"AΟ?ßÎÇ©ïKY^„¢‘R!„B!„B!„B!„B!„eaýõ×·SDlguV¤ÛNn¸¡íPþþûï› .¸ÀºöéÓÇþ¿âŠ+ÆÞâG„wÞy§¿çž{ìò(wA:¶ãèÇw7Ùd“¼Û;±Û´iÓJ~N¥ÊJ€ã&¬µÖZæê«¯¶ć{ï½·:th¨`Ð ƒ‚AÜ‘pf<øàƒ#,‹)kI¯qRB€siŠC¾r”+¿„¢µáœî|§Þ%–X ÆÐG¹ ú 8s÷lÄ||¨·Ö]w]ëÄÙ/ ¸ãTM=&ÆOz…P*Qa)óúž|áÚ†90;ñæÒK/^æ9¨­­-YÞ†`ÙŠ—Ê~ýú™ï¾ûΊAqe¼ûî»Mÿþýc;qã Š’Á?2ï–Ç¡åE!š !EYH¥RæÅ_4Ë.»¬ \š§Ÿ~º ˆ5K!š3o¿ý¶yíµ×ÒwŒlؾ}{sÿý÷§Gƒ£tß}÷-Ëñ_ýuÛÈÎÃßöÛoŸ5*O¾õ¥dîܹæ™gž1«­¶š¡¯\Œ9Ò<òÈ#æüóÏ·£æˆ¦*uíS)žÍ›JÖIMyL¡XK±–B(nIÏOª„B(~Pü ª—`»î­±¬(fBˆFpÆ2dˆuƹøâ‹­@­pǹýöÛÍœ9sl{ü /¼`ößÿôz×ñÝuV²Ã;ØÎéÏ=÷œ­«Öá†æJd5Ö°‚&Lˆ%’s‚D'R+ôœ ýn¾¼¨Ç·õ?ïT¾úê++ À™1×{ —gAQ'‚OÈ%\-¦¬%½ÆIÁ êå—_¶ŽžÁ2UN┣¨üBˆ– ¢8}A}¬p_‡19Îxqao½õ–}~;ꨣ¬ÈŽc±¿|ÎÑÅ‚X¢„nIÏ£–\rI{ ê• k£sŒC)óóæœ¨ ¬žc9ÐWÃs48‘d’ó nŠ[¶ò±ÜrË™=÷ÜÓ:ZOœ8ÑL:ÕtéÒ%#N‹r˜$&AÌÈwˆÊ’qÝ4+Q^„¢)iBÈI“&YÕ~®sFÑ6l˜Ýf—]viQ‘ òƒ>°ù@åÍèI|š‚ ­¶ÚÊVØ4 4—ëðüóÏ› /¼Ðüñͪ ´äòMC?:t°]kM–æãÇ·);¿­¦j¼ªtZš¢|O™2Å|ôÑGæ§Ÿ~²¯œ£í¦9qòÉ'ÛÆ{G§NL¯^½ÌI'df̘‘^ÎCe; †xÐæt½õÖ³¿\£òðà·ß~û™x ½lРAæßÿþw¬õ债h÷Ýw·/5ž~ú鲇x„òJ^Óy¥¹ñý÷ßÛëí¶yž•°Ò±C)~ÿI®}s‰©O)ž¥£)ꤦ8¦b-ÅZе„ÍR<«$‰÷ýã² >ß Úæ —ú~^©øBõ‚B(¨¦8À0gn‰_|ñ…í¿ìç‚Nòr‚ˆ2´ù97ð(´!X¶Þ|óM7Ð_:_Ç Æ7®]Ñ¿fN¬IšÂú“plÎý¥—^2=ô½¾Îõ:•(/BÑ”´!$pÜ Âôà‰'ž° ì;v´êø–A/Åp â!ׇ ýÄO´6Êå²á.„¦¾4¬äª´/¿ür;¥!¤ÚÒVÍùZ.‚„‘,vÛm7Û˜ÕÚÒAðÿÏþÓ\yå•iW ¬ðû§¡.ê¾×RÒRêòë·ÆKÊc=Öv<ðáA‰FÒ8 Yý†¨‡=gy Ÿ~ú©]G#ê“O>i¯m×\s¡ÖïôáÈN8á».¬q÷ÑGµ/¸5p_¥ÛÉ/ßúæÌŽ;îhŸn¾ùæfùB›kÂÃ|7ÞØüýï¯øo Rõ[s‹©O•'¦j©ñ”ÈMSÔIÕV*ÖR¬¥XKÑÒŸU ‰÷ÃŽÇz^þãÿˆ<^SÇÂÕB¥â Õ B¡8 šâÇ¿þõ/û<7Ýt“9üðÃsn¯ø¡òíÊÏòæOµ¶Kä;çüÑ <Ø\{íµé6 Sà]wÝùÛ§ ÷#§Üù#øTÌ*„áàTHGzê©«¯¾ÚÜyçfƒ 60;w¶÷bâ6'èâþ ÄqÓ·ÙfÓ·o_ÛÁ»«1Øâæ›ožq þGh†Û6ï%è´Î†~ÇñC=Ô Ýèä#ã°ÓN;Ù~C?þ¸M7JÜ$ùåóÆoØþDÄ€ˆÝ€Îþ ~H#Mtƒq$='Ÿ$ß“•€Az÷îmÎ;Lu¸Es-\ž9\ ù:`P÷"VÌËRÖ ¹ÆIA\É ˜C§îµ×^¶Ö-·Üb š$ªåË/!„hÉ ögÀ+ÞÕsoç>Íÿ<Ãm»í¶i·=à>þ§?ýɶ!D§d€-êTDk,ç9‘0Û2.ËA¨3àÇm[*H;Ï¢ÔQýÓ’žG!ÐþEÞ2ˆõyK݋ȔåQ}‚”2Iíxôà ¦ Ô\{’ô©Øyç3®9ý˜É¸õÖ[mN}K1 B®ó ¿†_¶0)ªláÞpà 6¾!´‡0ˆ6‚Dú”ø‚GæŽ=ÚŠ5Égbœ4ècB’HÃU¢¼!DS²pk9QìyøÏÕPМ:t¨m(çÅA÷îÝMÏž=íC2£;R1>òÈ#fÀ€¶C)#.ê:Ó§O3jÔ(ø¬½öÚYëé|ÊK!‚ ò³šÒÖÚÊ7ç©§žjiFö…àhW­%Œ,BC éÑGmÓãÆ³#| HdT .¸ E§¥”å;×omæÌ™f×]wµ#¼Ò`Ic#£·Œ3ƺçôë×Ï~‡—™Í Î\Ç|`„>®a8øp6kÖ,Û˜ËËZFÇ¡¿ðµ×^3O=õ”¾’7ŒÚëFÌq8q¿Ÿí·ß>+=ùÖ7gx°ÞsÏ=m#õرc­“_s‚‡j Cåƒl~çŒ ÄË:ñb¤%Å Í1¦RAZqx ×:Ir¿ ÓfÐ5Á碋.²"Hœjq»@p©˜U!rÃ@8Ÿyæ™¶Mœ{­«?è´NÇøÃ;,-,gÙgœaôZDXwÕUW™Å_<ò~ÃA‡|:áo±ÅÛ“Nl#GŽ´ñg!çä“仹ò"×9æÊ‡\ß#^ç\‡aÀÙgŸmëBê^úd!F>|¸p€:ÚuîÿðÃmýŠàˆ='Ïr”µB®qÒü ÿéà™‚kÀôBHÇ¥—^jEaû +GލüBˆ–ˆ 2øÂ<&¦>w÷iîûÜKƒ GPöòË/Ûv%¿^åþ¹êª«¦—ÑD»ÎÖ[omï«Î¨ 1:Ï þCÿYk.=¾@?l™ƒ¶&Ö×!”ƒ|ξIΣt°Œ:‰ºá!ϱô…à™z“Áâ ¬TlaÐ êpÚ^}õÕtŠà~(<“¡ÎFðˆˆ‘˜ƒú•8nŠ:BÉÍ6Û̶ٹ²E¿Ê}§sAš¶ÜrKwøS1 éñëxÒB3Xn—|7ØŽC§ÝpÚ´iÖI!cÔo"ì|’”!„hn´!$ ¼Ðj Ð)’J .®»î:sÌ1ÇdmC'SFKbÔ']‡z1‰ÀŠ`*Œ‡~Ø®çeg®š"m­©|ŒtæÅ5=u¥¯Iµ¤ƒ†>FõÍïŸQŽipãÁ½Œo©i)eùÎõ[ãÞÊõf„F‘sÐKG‡+®¸Â>à5ÇÎùI2dˆmçA‡Î÷ÁÆFƒa<Jiô¥1Ö‡Ql€‡¯0ò­oî0r7/´ù½4×Út*r¯tÞ¡ñ—|Ž;9Ñ–»~k®1•â©òÄT--žñhŠ:©ÚêAÅZеk !Zê³J)â}Ä~\KÛ®½´M W•Š/T/!„â€j‰t,¢ïQè L£Ï>û,²3²â‡¦‰Z{~–;ª±]"ß9ŸvÚi¶†Ø³4XEA§F„tr¤çȰ߲bV!„ÈçÂ#FØyäýŽ9tfw•ûƒ1p7B0îçlG‡ô0°S"hÇ=‡mÄ…´ÍÓ¹=LTG!Þ]øƒ :HBD>8Ò1‡b¾0'‡$&å݈/`KrN…æG®¼Èu޹ò!×÷@u~G|œ˜p!b`‚sÏ=7c{D "Àõˆò¾ûî³S'ðë]âkŽŸÄ )iYKz“æ Ö`¡CI˜7ožµE§Œ!M¤ÑÙv;(·|q]ÜF¾|TÒ¸æò:¬ìqLÿüÊ‘Ž¤y¨ýë_³‚N4\ºí¸yRŠ´Øs~Qe‘`‚¿ýR”ï8¿5FGƒwÜ1ëûîe-û)ØË“÷üÆ8þÏç>zôh{Ÿå~RN­–2tÐ!‰‘m\C1u‘+¯”ò•}åÿ]šó­/䜓æe±û‰sOÞn»íl#8#ñ{oî𛧃 Äün s’Æùâ†BŽë×uÍ5¦Ê—/利JOUsLUêç…RÆSIê†Bb¯R¿˜+és]œß_>òÕIåÈÇ|Ç,EÞ)Ö*,6P¬¥XKÑòžUЉ÷sÁÈ©À(ëÔ»>Q±p©â—JÇ›À‹ägžyÆŽ¨ìê—BÛâËÕ¦«zA!TCàÀ xoBL@û¨(?4MüPÎçZ×näÚ8Øn̘1¶6_ûpÒöä\Ïöaù™/Šm›JÒ.Qªvˆ|y§LЩ"o¸á† d>N>ùdƒâ¾•«¡bV!„È b1:Å#äÏמös:ÇéÃcnQa"Hêêîû.Î ‹s¥Tƒ¾4:uŠìˆÎBˆ.Ý€…žS¡ßÊ‹8ç¶M®ï!ð;ñ#<„¨wß.nòqÂ>œ"ýX›}Äuƒ,¦¬%½ÆIò#¸Ž÷Dþ~à%lû\å(,¿„¢%Ã}Ò=ƒqÅ5/—2¬>Xi¥•ìsZðYîÇÌYo¹çT„d~z‚uDØ2ñ‡/’çùã%úç;BÒ¶ž¿oê»|Ž¥ÊÃ(x~§~Ž»=çÜ>Îy¸²•Dé—¼C¤˜kÐ pœ°úŸ¼¢=´âhžë7‘/?â”!„hN´GHFèÞ½»ítÉË‘àòZÇÔõîÅ 7úsÎ9Ç~|hœfTÀ¡C‡fTÔ=zô°/¡|«`*tFáa9£ ø•ΠAƒ²F:òÓCˆÓÇ Á‚—<Œ@ÈH¢<‡zhAyµï¤i^d0b–ÓX°Nr’äiÒkvæ™gÚQZ»ï¾{zžÑ¤u‘cŽ?Þ.áćVÇ{¬>|xÆK2^bÞqÇ9ó<ßwã¤-ßu‹Ê×BÊ60r/£Ä¸#ºŽ»q`ÔŽM7ÝÔY&L°vÚF¡¸øâ‹í(¾çŸ~z9#X•^@ <ؾðÛ?/Ź&4œ¹‘|Ë‘ŽBó$,PöÔ*•'Ŧ…r½ýöÛ›½öÚËŠX|è”ßµkWûáá.×}£÷Î;Ûÿ)çôh×Q•Å@çëßgy˜áº“çþC÷e~~G ÖÓÏ}…ÑwJÍÝwßmš·Øb ³ÁDn‡c*׊2Œ“"YFäÜ®Üð0È>ó­OrÎIó²T×$Î=™e^8¼òÊ+ö÷ÎAÍ×pO½ÆCq¡qNÒØ!*n(ä¸AšsL•/åŒ©Š§šCLUÊç…RÇSÁÃ\÷I:gsüRÅ…þþ'® IDATâ’¯N*E¼–ô˜¥È»Ök(ÖR¬%„hþD=«ïçÂuú慺߹)W,\ªø¥Rñ¦ÁþQG•PÝxãy_W*¾P½ „Šª!ðÛåè€L}¸’çžk®¹Æ>ÇÓ–DñCÓÅålo@ÄGšqhBl@;/ÚìׯŸ}þöËPÒöä8Ïöaù™/Ó6§]¢Tù7Ïâ” Úªø}y䑉:Î"øÅÕ7Œ(W‡bV!„¨>h«çþß·o_ëTNˆƒp"¦Atókél¶Ùf6>Á1ýˆ#ްŽDk¬±†‘Èbh·ƒ€ðn„÷]¾ „kF^xà­²ÌF•£¨üBQ«¬²ŠÎ1ˆÑÏŠˆ&MšdEilC]V*¸Ó7…þÊC,´92pB9ú¯!DsfáÖvÂQ# 0*%/wzöìi_L0r í4öo²É&¦OŸ>émé¤þøã›½÷ÞÛ¾àEÜC† ±å|ðAºS'#yòb‰m¼ Aµ ÇYgeÃÃþ.»ì’•::Ò9•Ž tä…0Ê"ð"#îhq÷4­ä%ѤióÍ7·ßÃ1å¶Ûn =§\×!Iž&½fŒÕ¡C;‚#.$4 0ªùçX΋C^nGo¸âŠ+̰aÃlÚxqBóÏ>k?ùÈ÷Ý8iËwÝò•ï$e›—s¼”â¥*#mb;N™¸òÊ+í(|Ÿc'9§œrŠíìÌ‹ ÎÃh¤‹³Â^—^ŽrÎ4 !ñGÏ`tZÊ?#oìºë®eOK)ò䥗^²S^Ž2âH)ó¤i‰sŸ.¦|Çù­qž…^~ùe{oãe.`¼´ç·±çž{ÚO10ª3ùÊè,G}´ÑŽ—ÑxiÍh»gŸ}vz{ÒÄhntšàË=„sfdBwwÛm·’—Oç°æÖ„û3å–ta¿ýö³yvË-·ØÆVîç”gF·‰³>É9'ÍËR]“¸÷dçlEyk /´é<ÁCˆXü‘+;ÿ…ÆW>-!¦ »/–+¦*6žjN1U±Ï 刧|âÜ'Ë{å;~¥b®8±q.òÕIåˆaó³Ry×Rc­BêtÅZе„ÍŸ¨g•RÄûAƒÎÝ@‡~Ÿ|±p)â—r¶ßÁ)Ѝ¯|„´²œr8ǨAð*_¨^BÅÕøuœ¨q\žW[m53yòdóÆo˜7ÞXñCÅåloÚwiöîÝÛŠy¶'?ê"he¾Ðöî$í~~æËÊE1mSqÚ%J•Ïqó,N™à8ଢ ë…[âŰކˆ/ÿïÿþÏnÃñâ ˜U!ª b ž®ÔoÄ/¼ÇmmBHêbܱyÇK ÏÇAlA]OîÞa“GÔÕ þëûsâêòÖJX9ŠÊ/!„…Á3;ÏÖcÆŒ±Ï‹î™ä†çMÞãÇuŽÃ{ìaïå¥è{ÛZó°¥B›ä€I!Z#ªE`„édíà¥Ì©§žjëÝ ”ÿüç?ö’uF×qàÖ@£8#]ÒX‚c 0rÛúáŒlDÐBã?£„upçwl ÀˆŠþ膌†4ºûÐYÞwõp/Æ‚vÉQûNšV^¨ð2‘Q(X‘h à¸¤5ª“­OÒå¾1W¡×1nTŽ|ÌwÌj‹W›S¬UH®XK±–¢y÷Y¥ñ>0J=Ìž=Û¾¨%®¦ž Ö[ùbáRÄ/ån¿ó!]'rG¯^½¬ ã¦›n²;IÜrÊ_¨^BÅÕ8¨ãူSÒÄ G^x¡]B*~hÚø¡\í Dy ¢å´€(’ôÑF{þùç§Ûx’´'Óž‘/–Zj©¢Ú¦â´·•*ŸãæYœ2áÚ]žþyë>é¤ríYˆ'¹Ž5”Eî Þ‡˜8ŠY…¢º@$¼o¬Ä.QÎÜ­pÀ Hµ ® nD$‰c”•ûct\#[;aå(*¿„Bƒúyä‘ÖÁð矶Ž|܃ù Ê/5qú(Ëm´Ú~UÜ`T­y !„ˆb!eA=4êû/Oü Ä5~/ŠW 4¶/ŒzV¹†^º…Ao]‚/µ¦L™Z©Ñ—ÑýÏÎ;ïlGõŒ»ï¤iuìéLïw&xðÁcç}Ò}ºm,ä¥+¢‡bàe(œ|òÉY£÷0b1fΜiEP€` È×àè74—ëáæ‹/¾°S^xÇÉ/ ‘ºÄ=ç¤yYªk’äžìê“BGènj¨ÛiŒ8p u¤37è0P ±C¡ñ•bªòÆT¹â©ÖS•+žJrŸ,Wì÷ø•ˆ¹ ½ŽÕÃVS¼Úœb­BêtÅZе„Í“¸Ï*¥ˆ÷aŸ#F؎΀ ŸÎüIbáRÆ/åŒ7t6:ÛðLýû…^¨šøBõ‚B(hê82Ø.vÔuÚÜs=K*~¨žø¡\í ~~ø"H ó>BA:Ž;6½ÚµkgŸ=iïaœÖ^·ô<$m.Õy'¤B„#GÈÂü<|Ã÷ߟ^öñÇÛém·ÝfGôùöÛoí4¬ñú§Ÿ~²/xhÿî»ï¬ë0ÊAŒÖ¸øâ‹g-§œ‘‘8#=:xñ×¥K—ôÿ×^{­í¤;wîÜØûNšV7bpS¼¸‡í|š§q¯Yþ÷¿ÿ¥ƒ† tzæÅ#(1Ú%îL¼óݤ׭ز q€›”?bèk¯½f§Á¤IØa‡ì ß»ï¾ÛŽÐJ°xùå—Wôw¾ÿþûÛQ;9?^j‘7¼läå4 '©…ä £¨ž~úé¶c>/à‹mÌ*&OJ–rÞ»ã2xð`ûò’²Îˆ¼Ü9G–“GäK.×›\¸Ñ£F©ã^:zôèôH°®c‚o¯.ÿ¸ÿçƒÑú`µÕV+ɱãžsÒ¼,Ç~òÝ“W^ye;-g'ŒrB‡‚ºº:{ÿgÄrFdfh× ¢©c‡Bã+ÅTå©rÅS­)¦*g<•¤n(Gì÷ø•ˆ¹ ½ŽÕÃV[¼Ú\b­BêtÅZе„Í“¸Ï*¥ˆ÷¡>±8ÂâzD8ûüñV°~ÐAÅŠ…K¿”3ÞtD¹ütëÖͼõÖ[v0ƒj‰/T/!„…Ž€Á»Ø~;^×®]í1lòüã?×+~hÚø¡\í ùò7BFÃò#N{r¹Û3š¢mª˜|.æA™8?ø ¸ÆÕ7ÕþóŸV¬A!yrõÕW[ÇHŬB!„B!„BQY$„Ì•9 gg{)ñî»ï†Š~x¡Å衎T*e;l3ÊèìÙ³MÛ¶míÈÎí‚õIèܹ³?~¼™s¯[n¹åB×»‘dÜ(Ðn´Zç‚V)\‡ˆ¯¾ú*ï¶Ó¦M³ÓN:•äØqÏ9i^–{?a¸ú¤¹ŽµÅ[غ±wïÞ¶¾âe?Â3~þ}ª©b‡RÄWŠ©J/ÍOµ¦˜ªœñTÒº¡Ô±WÜãWcÌÕ\bØæwÕkR§+ÖR¬%„hžÄ}V)u¼Ïý¦C‡Öy `† ’@ä‹…KI¹ÛïrQj·âbã Õ B¡8 â¸óÎ;ít„ Ö¹2 ¶ñEdŠš6~(W{CÜüÀáБ¤=¹ÜíMÑ6UH>—²†Œ0ì;ü6´ë믿¶yOh'ìÓ§M—¦‚s–D”Ér¶òóK1«B!„B!„B” !²Ê*«X×^VÅþ02àe—]f_Æñ‚ɲIuFÀLÒ©%FŒaÎ:ë¬ôK²R4­Ë,³ŒÒi±˜<-Nxà:šÙpà í Á÷ßß\pÁæŽ;î°/8øÅW̹ïb¾[iè k­µ–Å’×}ï½÷6C‡-ê¥"#o2B&ûhß¾½}ÁŒ£/¨+ /ìxyGycrÈ!Ïï$yÂK@^î3²-£Ž"Blª<)&-ndÔBFb-7t\îþËK,±„Í:RðùñÇÓ/]“ÀïW\Ù¸ïqá]GwŽÇö|*y¯pŽC”Eê™(ÝÚ Ö[o½’;î9'ÍËrï'êwâ×/Í™­·ÞÚv| Ó¿D,M;”"¾RLUùxªµÄT匧’Ô åˆ½’¿šb®JÅkÕ¯¶ÖX«:]±–b-!Dó'׳J9ãý^½zÙ)qh’X¸9Ä›ùpŽÆˆAª Õ B¡8 )ãÜÝ^|ñÅô³^<òˆ¿¹gbÅÕO9žkÙà\êHÒž\‰öŒJ·ë’Ï¥ìƒáŽé®M'”Ä‘µûM¹ßU\c×Y_­˜U!„B!„B!JÃBÊ‚d¬±Ævʈžqà¥0¡k€/†#Ž8¬¼òÊæ“O>1_|qIÏ-iZW]uU;Å'ˆkÈ/GžŠ{á1kÖ¬¬uîEQ¾t“/·ß~»Ùwß}ÍO?ýd^xá…ØÇÏõÝ\i«$Ç·nQ¯½öšu§âšðRö}ñ”„ÿûßvô]FÌ¥Œ/‚A´’ì°Ãf¥•V2Ï=÷œ}Q=jÔ(ë(UÎÑn‹Í^¼!<äeúùçŸoú÷ïßdyRlZ@˜N%^öGýÖåñÐ?/9Ý‹Î9sæUŒ7.t½[¾îºëÚ©{¡Ëï±’àzŵÜÛßyçÈín»í6+†Elµí¶Û–äØqÏ9i^–{?a¸ú¤Úï…ÂÈê0lذôˆÊM;”"¾RLÕtñTK©ÊO%©Ê{%9~5Å\Í-†-&ïZk¬UH®XK±–¢eõ¬RÎx7ð]x’ÄÂÕoúí?tôÖç/½ô’÷Ý´š*¾P½ „¢Z※îºËÖ“'Ÿ|²™?~ègƒ 6°uÂMÅÕ?”«½Á°Ï/—.?ÜwüüHÒž\ŠöŒ|ùSév½Bò9i|®svâiÎ7íÏ´©-²È"öšm¿ýöæÌ3Ï4gœqFÖ§K—.ö;ûì³ý?Øž£˜U!„B!„B!Jƒ„ ¡Ó"\rÉ%Ö©"® ¾û†›ÿøãÓËx Ä€…@§QF7„sÎ9Ǿä›={vÆ6uuu¶Q>)IÓJ#>\ýõæçŸN/§£0Ó:¥ÎÓBa„LëlÊ ^€ð’Žãù¼õÖ[YÛ/¶ØbvšÏ©$îws¥­’p½'NœhËÖo¼aGiå^ІÁ›Œd9cÆŒœû¥LàÜÄK³ÁƒÛQzqpBLÇÿÅ7ЦMsðÁÛseäR®÷AT’üK’ޏyÂo™޼h4hùÇ?þQÒt$É“BÒ±/ ß|óÍŒ“yé¾Ç{ľo”ú>Àïß½@e$ä ŒzË}ŽÎ\³BòÚ”{ÅW˜/¾ø"kÿ¯¿þº¥–Ñký{쥗^šqoäϲ$¢¡$tìØÑüío³ó;í´“yûí·³¶Á…Íh‹ ¶TÄ=ç¤yYªk’÷B›ï·¶ÙfÛúàÑGmòØ¡ñ•bªÊÆS­)¦JO%©OâÞ'“Æ^¥>~Ò˜+iìRêX¨1l)Ï¡˜xµµÆZ…Ô銵k !ZQÏ*åŠ÷©Ó©\Ý'.eüR®ö»`ü}ÀdtÞ¿õÖ[mDGóM7ýöîÞªyÿÿø—’Md¬LE” …Š éJ#2ex\³ºÕ½$Üî%®D¦ÑEhBƒ(IE! *C£R*õÿ½?¿ß÷Xg=¬µÏÞÕ鼞ÇyìÚÃÚk}×:ëû={ßësò6ÛÇQÆøô À8`{Žô÷\zé¥VY1ÑÿÜÎ?7ã‡8ý<ã‡Ì¶¯0×êï}í÷`{ôë×ÏBu5jÔÈ×q>OÎÆçéÚ'—ߥf«ã~Ÿj›[¶li­z÷Ýw |NsÏ=÷X˜¹M›6ö»¬cYÕ(xà? <‹ÚJÿ4³€¨4nP¥k4Â_˜ ü©äδ1 èìºkþl§>`ö•;²A[´háFŒafëƒl]qÍš5öÅÑ›o¾i_bø/!ôÜ™3gÚ—lš|ª/ß d²ëõL&¡êƒv})¦ ’&‘ê ­‹¾4Ñ“'OÎû ½\¹r‘—w]õ%ß±Çk_”Õ«WϵnÝÚýüóÏnÀ€vUѨURâ¶i¦êׯï^}õU×¹sg›¬/8;uêd_4î³Ï>öe©¾,Y´h‘«V­š½F_ìœxâ‰öeª&>«ÂÊÔ©Smbª®ú¨e&絩Öm[ÒUsUíï¾ûî+ðØ¡‡jÁŠà~ÐÄp}!Õ¨Q£¤ûGÇÍõ×_o_Hé 8_æñÇ·/”zöìiû<ø¥¾@Ò•zEûCôÅ“Ÿ0|ÔQG¹;î¸#Öz]}õÕ¶-úâLôe^"¹Z8m¢IözOMh×ïbÓ¦M ,邏NÊ7A:n{Dm“LÖ%L¿kÚ6't%T½¯ªi’Bݺus^!4Õïš&0h¢‚Ú_W(ÖU]µoÞy缫ÍöêÕ+ßòâ´õ\`_ÒŽ?Þ¾àÖ9´råÊÖæC‡u{íµ—¾S;=òÈ#v>V»êõzlôèÑnöìÙv¥æ\Ñ>œ7ož]ýVûEçt­ƒ®=eÊÛg:Ô:‡gKÔmŽÛ–ÙÚ'qhr‡¨ØhBÎ_ÿúWû=QXï /Ü®c‡l¯Sm›ñTqSÅOÅéO¢œ'3{eóý3se2vI%ˋێÙÞ†¨mÇX+ó>±c-;‡d«dk¼¯ex ?¨òŽÆ×ªöï¥ g{ü’‹Ïï´}êôw„Îçz½þ¯öÖûúŠ:ÛB”1>ý0Ø^ãý}£þIŸQéï»dÔ÷IúLAáC]¨°ã‡8ý<ã‡üã‡\~ÞàU¨PÁž£ïõy¯Ž7µ‡žn8Ÿ'gãóŒ(ã«L?›ÊD&í÷3øTÛ¬d‡¬]µÚöý÷ßß~_Çgÿþ÷¿ÿ]èídÌ ócI õ‰¿lQqçwÚkÕ¿"9ÓTÍZß i\‘Ž.¤¡¿-|Ñß!¥K—.²Û®1™þöªX±bÂ*ꉎ£¸í øÑyò›o¾±Ïsvöó„.zÔ§O›7¥Ï•tží}^×gX³hîŠ}ަ «í¾ûîö9¨>{€ím§Bê%]gâĉóWBô']M& Jv¿èj‡ú ?6lØ0×£G»šhðCo}0qÞyç¹ÓN;-ï>‡ôE›&¢êË6}@¯Úõ%PÆ c­OP»ví,„t÷ÝwÛoººã„ ì1u4 …^sÍ5vèˎ»®Té}õ…‹¾˜PhLƒK½¯& +„uûâ´i&ûL4‘Yë©IÀšPªvºí¶Ûò×€ú²QPëËß–úUWõíëŸûÄOØ/ÉÄymªu‹rL$zN&í¤/€ô‡‚®n©}®+ä*ˆ¡%ú÷ïo_ÔV¯^=ï‹0]YS²Hæ•W^É›è¬×{ ©¢ ~ôaÆÛo¿ï5 ƒé ©:6¥N:ùˆQÖ#HaÛ~ø¡;õÔS“¾.Wë§MôÇŒhþ[o½•pyá íqÛ#j›d².‰èø×óôeìC=d¿ úÐŒúb3Êy#çMjÐru.ÓtýxúQ¿Í›7/T[3Æ®¸¬I:7ˆ>´T¸G÷é÷+¸ èªÄ:Gú+Xëø|оÎý¡ª/wõ%·&ˆèËd_Mï¯ ùºšm²/k}Û';o%{<Î6ÇiËTÇL¶–¤`ƒBÔš¸¢ªOE‰¶K0MÜPÿ® úÒ$²eËæ|쬽ã¼oº}VÇT©Þ?×cªLÆSEeL•­¿⎧âô'QΓ™Œ½²ùþ™Œ¹âô§QÎÃq–—¬OŠÛŽÙxÏLÚŽ±Væ}:c-ÆZŠ–¸«f¼ü¼yðàÁy÷iŒ®‰ìš,ÿ·¿ýÍí±Çù^“l,œíñK.>¿ þm"ºxÆ#ªP¬¿Iäàƒ¶ I°V®?«Œò7ý0Øžã€Q£FÙmºØ!‡b+«ôýŠ¿àfaÆqúyÆùǹü¼ÁÓwˆ:Vn¼ñFûLFô¹ŒÚ#|ÌÅù<9êßö©Ú.]ûö³©dŸK¤Z§¸í÷3øtÛ¬¶ÓïµÚS•)EŸ©]|ñŶ>Q&x¦Ú>ƬœÆ(sçε@™2eì\™É"Š….¹ä’|0P–àý¶¢qµúOýŠŠU«VÙBtqÊD~Ï=õ¾‡v˜…þÒ;v¬[¾|¹Ñ“… ‹Êï.¢±™.š¢‹WD9Žâ¶€âgÅŠv!]\ªmÛ¶Û}}ô‹þÕßåÙ>+ð©‹)«ïÚÞ!HQH_‡ë‚B·Ür c1 q«>#Õ˜ÃÓÜCÍ¥€íM—ܪŠõÁòÎNý$»ßR’}‰"ú€]Á(]™FZ'«d ÷ÐUþªT©bÑeýúõ¶ìp2>Õú$³nÝ:ôèJ ºrg²×FYvœu ¶“ªVèKߺ …¾˜ ·]ºuˆÒ¦…Ùgú`CW'P;'ÎëË]ù³AƒöÅ_øC }©vÐë‚_¤Fù@#êk“­[Ôý~NœvÒÄ[M´mÖ¬™9rdçë "U‘Ò7uuM}ñ¥¶Ò€F¤i¬c)ÑÕ^uõ\}Ù”îËÜd¢®G˜®üÜsÏÙ—WÙ”ÇY\¶I¦í‘‹6‰òGàwß}gç Œê÷D¿óQι8÷®^£eë\žÜXض]õY ´ýZ×Tô!¦¾$Õ9X_6G½r²>PÔâº(@¢«Z«²š*ÄéËW}ˆ˜Ì¦M›Üüùóí*mQ®¤¤óžöA²`SºÇãnsÔ¶Lw.ÍÖrðèÚµk‘û²Â÷Ý:G%û@]kÿ…¯F™Ë±CªöŽú¾qÆVEeLåýs9¦Êdã ú`°=Ǻ€¤ú7ÿÙ_º¿åüsýß`™ŽâŒ!?ä?äúóUvÒŸ´Õ´´ßçÌ™có«hºíóyrº¿íÓCé>¿Éô³©TŸKDù\;Îç:qÛ,Ý6«M}åFMªSÉTÛ­‹“©:%Ÿe@z q)ü¤1ÆsAú<½cÇŽva‹ð÷›:ßFùÜG¦jwºxÂa={ö´~ScT ‹ ÿý‡¾# ^ˆw[Èô8p U~Ö÷澿ßt±y}÷³ï¾ûÚwAé(© ¶ê*ºpf6~o¶ÍUÐ8-Y2Ñq·½?ú›NUw” ä_|á†nótÎ&]4AUÖ¸â\ (Wt2]BF£Ï w”>9SúŒOãísK4·EŸ·è"ò°=)ÿX²8mp²I€©&&š¤«ÿE¹ ÞCÔ%ûÂ.Ý„ØDôåR°ªLÜ6Èt]ƒí¤jEAšœÉ:DiÓÂì³ *ØOØgœá:è ûléÒ¥ù¾œÓ(ºÒk&â¼6ÙºEÝo…i'}ù+ú29M•J}a£ãNT¤¢/ˆR}GÔõo‹®~«™*ÓdCœõÈe›dÒ¹j“tt êÊÊ™žsqî]í6míé ït_z{úB8ÜVÛ’Ž‹8ï¯ó\sM› IDAT^ªý“îñ¸Ûµ-Ó½g¶–óÚk¯Ùm°‚XQ‘¬ïŽÒ·çj쪽£¾oœ±UQSEyÿ\Ž©2O…1Uaÿ^ˆ;ž*L’ì<wœ‘í÷;¾ÈäýSí¸ËKÖ'ÅiÇl½g¶ÇfÅy¬gœÅX‹±€ûo•LÇû™H5ÎÖø%WŸß%;«¿ÿMS˜1[.Æô À8`{Ž4™)JÒÿ­þ¬!ÓñCœ~žñC…íöyƒßïQÛ#îçÉéþ¶Ow ¥úü¦0ŸM¥ú\"ÊçÚqÚ9n›¥ûÌJmšéäRmw²ïx³@~ªÎ«Jo êºH¾3P`]“ýTµì8À*'zº°ƒªë"ɪ2¾Jùí·ß^$«éüðÃv U»Ö¾Û– s ¨z´‚wºÝ–tÑ_UAkâĉv‘Íd4SkûªŸß›¢vÅi/Øè<¦y^ºÍ&Þ€×ß½„Ίž—^zÉ.ö¯ èâXE•¿x„újŽC;š’4ð¿ôå„®Èѹsg»’†>(NN9å»ÂØèÑ£Ýu×]çN?ýt»jŠJÉ«=ô>H9÷ÜsÝôéÓíÊS½{÷NYÙ$×2]—_~Ù®ü«¸²1È,êí‘‹6ÙÙí(û<ªk®¹Æ¾¬¾÷Þ{íKñ›o¾ÙýøãnÚ´iìÌ,ÓUpt%;}a‘«Éµã©c<µ#ô'¹~ÿtã‹l¿ÿöhÏ\½gq›1Öc-Åe,·Ï+ŠŸßÑ/ÝñCÜ1㇢ûyÃöÄ÷†ŒY WÔkü£àúÓO?ínºé¦ÏQÅ߇~¸À_UyW•uTý·(Ó0äC)’믪WÚ ÛÅ©žœ …9t±…·‡K/½Ô‚}ª¶*Ø7tèP :ê÷"Xõ²0¿7Eñ8ŠÚ^°#ÐÅvtÞʶٳg[õ½:uêùª‚Å‘ö.J¶yóæ"½þjQ/ÛAH àÆot=ô1bD±›¸¯+’èJK]ºtqýû÷·O¤´oßÞuíÚÕ®¬««/é‹Ñ‹.ºh»®s¦ë¡J䪫®Ú®ë±£´G.Údg·£ìótÎ?ÿ|ûpô—_~±ÿû?¬ôMÎWe5Mpˆ{c$§þC>ÜqÇ4O1žJ9žÚú“\¿ºñE¶ß{´g®Þ³¸Ík±€â2ŽÛçÅÏïèÈîø!î‚ñCÑû¼aGÀ÷†ŒY ðºõÖ[íßO=õ”»öÚk>O•táAO°U=N!8Y¸p¡U†VxJ•itžU JÁsU¹.W®œÑ?ÿüsW«V-«š¤À˜.d¨ Cèóû5j…—·eË÷é§ŸZõ½O<ÑUªT)év.Y²Ä*ôé¢ ÁW«VÍî_³f[¾|¹ûé§Ÿìÿú÷üùóm[¡HURâ»ï¾³‹X¨ÒT¢Jèé¶;ü¸&åôÑGö=Æ©§žêöÝwß¼e­\¹ÒMž<Ù* žp I«,û0áÅ_kû£HöÚlz\ËÞÿýí'ûZÇ´–¹xñb;†Ôvž*¿«¹œ}öÙ®|ùòv!U&׸4‘ÚmÛ¶m ý{“ˆŽAµ‰öíÚµ T/+L[È¢E‹lù:fSU.OwEm/}G¯~Aç,õ§ºT*êcÕëVýAÙ²e“>Wý¦Æ ꇴ\k½½÷ÞÛÎO »é9:çûÇuŸú-KÕŸêüºlÙ2;§kìáû‡T¾øâ »MvA(ÛQØõP¡÷Ð99Ó }Ô6 ®§æZè}Õï„ûíoY~þùg[/eJ–,™pŒ¤÷Õ{$ÚN=¦±U°Ýük´\=¦÷Rߦۃ>8Ò~ÓþÐXSÛ-ZO­ŸÆ,Z®n£nïÆíi;´­‰Ö!Óý¬}¢íÕûè ^Ëo‡–áÇÏÃêááPdÔ}’n»ÃëwZcC­Køw[m¢}£ãIÛ˜èýìüt9•­Ý»wwݺu£5€ÿ¡ÎS‚WV*n4¸ÑJ€h¡>4€ÙY|öÙgv«î@›Ù¤?Èô‡,Ä‚ñã©}<Åø‚¶kÀö 3Þ¤_0~`ü°ãñáMÓ¤ü¢ŠÏ¦³@.<ûì³VÉNúÕ×D­n¤ 7¨¢s"ï¿ÿ¾UŒûä“O섪´­0Ú?þñ;kr»&eËêÕ«í¯¼òJÞätÑ9Z†È‚ËkÒ¤‰UzÒXJ4]ó,Ãs-5¦»ùæ›íš€î]yå•î¿ÿý¯½‡.€¦‰í¾ÚŽ&’k=ô¼à:*ô×£G Ä«ç¥ÛîàãªN¬íP¨Í/SûD¡»GyÄýóŸÿ´‰æ¢Éðo¾ù¦;ùä“ ôkª|¥ekµîQ·?Ýx8Õk³q øÇU }̘1…Ú×2xð`[/MØO¤S§NîÑGÍû½zõlœ8wî\W½zõÏ×dþC=ÔB› #ö÷&H]”¡W¯^Ö&ÁuÒïC•*U Õ Fèb#&LÈ»OAËž={Z¨±fÍšù¶)Õqµ½_ßÿ½…¤ºÖ9KŸQ(ÊŸ«Ž?þx×¢E‹çKõ5cÇŽuS§NÍw.Ôg#:‡…ƒ„³fÍ²Š¼ €%¢ó”ú¿>ª<¬~+¸Ž 4°ûì®ãgœáÎ<ó̤ۨâc=fáÃ[n¹%ãíÈt=tá]HjÁ‚y÷©oÕö¾ð Z ¯W"qÛPë©`Û;ï¼cÛ¦0œÆO¢ñú™>ø ïs4ÑçZ7¶c!8FúöÛoíºàE›6m ´¯ú(w4þ·×YgeÛ¨ ‹ùöò}[«V­RöÅ£GvüqÂÇt1]l Ýö*|¨±Š.¶ÜV}nxÚi§Ùú%úˆºŸÕ–£FrÓ§OÏw i¬Ñºukû÷¸qãìa ˆÞyçí“tÛ|\Ÿ©i;|SËlÞ¼¹íƒ?üÐ?>ïBåZÖe—]fŸÅ(>”$ „ç û^¢+iìLøÒŽ6rE¼ñe6Àxª8Œ§_Ðvc-ؾcaÆ›ô ÆŒv<š˜uúé§ùíà³)Ƭ ª'šŒ'Ì¥ ž&÷+¦Ií;v´Ê/ZF¸JÒÛo¿íf̘aá+MWEEOè^»úê«-ä¥q‘&˜«:ö}÷ÝçN9å×´iÓ|ËÓ$q…Ù4![ÏÑE%RÔ¤Ë:uê¸fÍšå=W…¾}ûº /¼Ð&skŒ§IÚú‘K.¹ÄÂ_ °j… ·vØaùª2ª:žÖSa8U×dyMR×d}…îb¼çž{ ´QªíMä×vèq­‡&¬k=n¸á[?…t¿ÂÇ·P‰öÓ×_o¬ªöWxM•€ÂáµtÛŸJº×fëà„ÿL÷µ‰W\q……B† fA [?þ¸UBÒëõÞA¾ê§Ö?Q°ï¥—^²u Vƒ,ÌïM‚1Ú§j_w h?+«©Â†ÁŠ£qÚB묭ֳ~ýúöû¥P§Ž©ðïS”ã(j{€BS¯¾úª¿Ôè‚ ê­sšúú ût¾SÅZËôwÚœ9s,T¥>±]»vyUåTMOçw/uþÕ­B}Z¾.$  š‘§ê_TùyÊ”)ö\½F¦R¸Lá1UNv~S?¦å)ôg;2Y½¯„ªÔ§¾WÁRužW_U&møÍ7ߨ4>ÒcÚ§ÁíÖøIã·“N:ɪê=´] *0§À_ÉÆZOµÂxÚÚ~­·ªt*´wÞyç%]¦ž¯0ª.. õWnõuúl X‘:Õöj;µZ–ŽoUtÔ… Þ{ï=»„úÈD·£îg]h@Ÿu i¼¢uÓx)8fÒXTûI¿OªÜ¨cMcœ`UÆL÷Iªý,óæÍ³eèq­‡*ok=4–Ñ:j?è~…¡õþú}Ð8ìoû[Æc%EAH³g϶ۣŽ:*ßý MiÂ&ð+ȦÉýªJ¨UR(JÕtÂá7O£/¿ür÷â‹/æ«@#ªà£P„tž.`  Ó ‡©b8¸¥Ièª"© ž§ðÖí·ßnaƒ` L Ö²eK«€£ æÃ?lÉðDúTÛíÛ^묪’ò׿þÕ$ZgÖž~úi[®(ȦuÐ$t­k°‚îR J·ý©¤{m¶ŽTâìkU©TÅÎ'žxÂŽ+QPDZö£Žñðúƒ}‰h™:°ÌÆï÷î»ïZp@Ǿ*+y f(8ðÆo¸çž{._E¬8m¡Ð¤Â1µjÕ²êQ Mˆ‚»Z­8h’ê8ŠÚ^àCVÁêx ®õë×Ï." û}È[çó*Sð?x®QU[õË Z©‚èüª w ¼}ôÑvŸB o)ˆU±bÅ}q" ¯é| ejT OçÎTAHõa:·ÅÝŽLÖCa:… µ}W]u•]ðÉ÷Å3¬\¹2Òþɤ õ¾ÚfU% öáz¾¶M25†ñûU}£Ö]SÐEÆSರÔ^£5jÔ(ß±¥÷QûêØR 2U%ÔÚQ?g’+ɶWí¥}¼X€.Ž¡çèâ:!£îgEÀðD5>ÖXÆÓÿõ³páB Bj=µÙØ'ɶÛÓ1¦‹1¨ª¤C+Ô© thœ§qˆ‚™¢ ®ª†ëø÷ÕµDŸ@NøI×Á ¢(„×¢E‹|?š4¯*Bq)D§ª‰&U«cø½ÅWBÖäé0M&†ÁD¡DY°`A¾û5¡_T‘&š0.ÿûß TÉÓdoMÆ_»v­…Íâl·ß‚ô|5#ï|RÆS…? WSTEJI`+Ìö¶íâ´E2qöµÂ 'úv ??Øfá6UR5/žÈæï–¢ aa>(¡*b™¶…W*HéC¢ÐÇСC¶uªã(J{€?OCþ>/7nܘï|åƒã ó…)p&ª:ç)T%ÁªÍ¢0œ¨ªmÔu W¦ôa´dËÐ… S5<…ЃânG&ëáø{ø¤øÊŽQeÒ† å©? ÷áª^(õêÕËWÁXTéPËÔ>ÏVx^í AŠúgl)˜ ɶWýi¢ŠÉ~,  •…9Þ|­ dª0û$Ùv·Ã‡ ÃÇ·”>)Z†?¦Â•%ìü¨ rB“š—.]jÁ¦`ř믿ÞÕ¬Y3ïÿO=õ”³6lØû=TA']% Mÿý÷-h  7Ë–-³û5Y;,@J•*Ù­ŸàïuêÔÉBh·Ýv›=z´UÄÓ$ï%JDZw_%/\ÅÉSE±cÇ&œPžn»m‡&$zL¡QßVA~}¢@ia¶¿°m÷ˆÚFÉöµ¯ˆ¤J‹¾Ê•L›6ÍnÃIQ(@…$}X±mÛ¶Yÿ½Qõ$Q/U= òU½ÂaÃ8máà :>ÃÊ”)c·á Cªã(J{€„ÃWž‚ØêÛýõ×¼û|[ÕäTM/è·ß~Ëwn’råÊÙ­ž òiììC3YGnôï¦u”Úµkx,îvd²þõþ< F¦“Iê¾`UcÏ÷=‰ÖÉ÷'ºp@¶‚pqŽ­ÂH¶½Þúõë­ŸU5Dí'ÝŠ*mæxSxQ`P¥H]ÜAUǘÔÅ0¢*Ì>I·Ý‰¶cÏ=÷Lú˜vfò·€¢ $ȉ5j¸©S§ÚÄë Uqñ•\ä­·Þ²@WÜŠ~élݺՂvª¤÷ûï¿»Ò¥KÛ$ýÍ›7ç=EÉ’‰§[ª:ÓG}ä:wîlUÇoÕkžþy× Aƒ´Ë]²d‰Ý& W”/_Þn}¢°üd÷Díœì1?ùù¤{଒hÆ -¤:|øpÛUT ómwêø4hZ·nõß\©ŠX¢Ð*+ùê ™´…ûÀK©Ž£tí騗à|pí‡~Hx^QÀÍWœ]”àã?v“&M²‹(¯ è\¬Ê€©*ÚFíùã?ÜÌ™3í|­óXÜíÈd=T}:ØŽ™Êfúíöa¸0„Óøn[[¹ 1©BŠª½iÓ&ëƒõÞ:>âŒY“ígµÿu×]—w ]AÁ8ÿüó­²öޏOüø&Ѹ5Û3(:B€œ8î¸ãìV¡¯®]»&­¶“+÷ß¿{ôÑGÝÙgŸm!6_ÉO1U]ÊÆ$êN8Á&•õÕWî_ÿú—UúkÖ¬™ýÿ€HùZ=¾bÅ «Ò§ êaË—/·ÛÃ;l»íCv[¼xqÖ·¿0¯ÝÖFe·ÕªUs½{÷¶?^x¡ëÕ«WÂР$†ƒcÆŒ±}~å•W&¬dYØßKª&õâ‹/º:uêd½-|õÐ8ÝtÇQªö€t|å»`uÛ½÷ÞÛÖ œ'«`ä«4ë§0Ÿ~tž?æ˜c\“&MbU΋CU|×­[gçþDáõ¸Û‘‰lزنêÕ.Ú·‰ªûð¦¿h„øq]²ê‰™Ð:„­\˜8q¢›2eŠù4òÉPVôÈÆ˜U¯ºê*«Ž®°ªªŒ¾üòË®]»v‘*kg²O ÛB€œPå™GyÄ*ÏôèÑÃB]qøIß~z\¯¿þºÝjyõêÕsº­ Y8Э_¿Þ <ؽÿþû®M›6)_sÄGX(S“Ñ?þøë~Q€`{ñDRËæöGymaléß¿¿URœ6mšT"PuÆdU‘‚mu*ð)mÛ¶ÍÉïŽ+UŠüòË/s„Tå(…5TÑ3\ 3Ùqå8JÖ^à)€¥ yÁ@*å©R¯X ã)|­ Y”ágŸ}fÕo¸á té½´¼ÝvÛ-§Û¤0šè ‰ÄÝŽL”-[ÖÞCçápÕF_0Šl¶¡¶[Û¬`¢~A÷Ë~ûí—wŸ*-‹äÅÙ3¢[鯭™TÔE ¤yóæ®bÅŠ9=æ²¼à‚ ¬Bõ¬Y³Ü‚ \Íš5s²O Ûv¥ @.¨Š‘*2J·nÝ\ûöí TÒ¤ó_~ù%áë+Uªd·>$—Ÿˆ®jKžª=õÔSYÙ>MøóÁ¸(æ}î±Çsßÿ}¾ÇTÍï“O>±`„*Zn/ªP¥‰ýª`¸aƬmÔ×öÈ7 è8þôÓO-©ÿ/[¶,ék|°/nѱþæ›oZ˜¥Q£F9ù½¹üòËíöá‡v«W¯.ð¸öc&! .²Ûgžy&_°dΜ9îâ‹/¶+<õ8JÕ^¤sîСCíè}þùçV1Qá®`ue…×EÁðD• VÔ2U¥ù½÷Þ³>Y!>ýÍš59Û]`îܹvÞOVý9îvdÂ_pAãŽà9Zçl]  ªl¶¡¯Ž¬Ð}øõºˆ„–¯0^°Ýè,Q¢„'ê§=…÷^y啔溜„­éÓ§[_«`_¢ÊÝa>ˆ©÷Ëdœ!ÁjËZ]€![¿;a> ê+‚æbŸ@¶QäŒ*ûi"}—.]\ïÞ½-à§ê‡5jÔ°ÖäÉ“óPåÊ•Ë÷Úúõë»W_}ÕuîÜÙ*åh2z§Nò&b§Ó¢E 7sæL ’©:ž‚ƒ ²P&‡ÃZq((pâ‰'º3Ï<ÓµjÕÊ&ÉO:Õªý©R Ö=UãQÈqüøñîä“Ov矾«\¹²4¯½ör?þx¾êDÛÚ>ûìc&UU¤jÕªzûã¼¶°Ç@¶ôíÛ×5nÜØÝwß};ôÐC-t肞*,‰¶ÕS DÁ…ÖÈÅïM“&MìØ1b„½æŠ+®pGy¤…âT³_¿~Ö7*§Ç{¬AëÕ«çZ·nm!¤X`GÁ—¨ÇQºö€ …ãgÏžm+u^Óÿ´>묳ò*ò‰Î{Gu” ÜV¿¡Švêƒ Óý-[¶Ì êߪN¬ªÄa:Ïþå/Éz…f­»‚Œ:w&ëëãnG&T™Ym«Ð¢úµ­ú*…Luÿ¦M›"-'›m¨uP î»ï¾³>X먾Dû^ÁûR¥J¹óÎ;/ß>ß}÷Ý­}t±…çŸÞú@õOj#]€ Õv( <¶ôOvl%£1œÆcÇŽµ ¦ÚPý¤ªd¦£}¬ö=z´;餓l[4vU¨Rã…ÂŒYuŒ=ûì³6^Q;êÂK—.µñ°.>¡õÎÕ>€l# rª]»v®iÓ¦îî»ï¶Ê6š ?aÂ{L½51þšk®± ôA×_½…ßxã ׫W/{îm·Ýfù*6štL÷îÝ-L¦€–e (X§0XÆ ó½6Õò4]?ÁÇ´.wÝu—{òÉ'ó¶E–{â‰',Äèù×%Zö˜1c¬êŸÂ Z–”.]ÚB‚º¯zõêùžŸn»S=žê±Të¨mR€M“ó;tè{ûÃâ¼¶0Ç@²ÇãîkQSA‚{î¹ÇŽUåR¸BÄþýû»Ë.»Ìö•hΛ7Ï}ûí·ø«ZµjÞrö_ 4¿72lØ0×£G«,ùïÿ;ï~R8í´Ó2n‹’%KÚº(`©}sÿý÷[ÀEëòŸÿüÇ‚Q#/Y{€ø`Íš5-˜7jÔ(«èÏk:WêܦPù|à¦L™â&Mš”¯Òù¦J•*y÷)À¦ |gœq†‡tÎWMámU2dˆ»ñÆ-Ôæ×'hOtŸ§`˜?æ«§«„g;2Yݧs¸ú…Ü&Nœhc Õ×(Ÿ*¼Ÿ­6 »òÊ+­ßÓ">þøã¼>H¾æÍ›»Š+xú8bT­þH{mGÏž=“¾Ÿ‚’§œrŠ9rdÞ±¥ “:¶RŒBï£öÓ…T5Qëê/în{5î[·n…OÇgûJÅn¸ÁƱ‰öYÔý¬õ8ýôÓm,á/: º@Ú+Ügû×%ZvÜ}’n»S=žj=R=`禨õVý¡¯?¢rM½¿ùæ«H£‰Óš@ʪU«¬ÚŒž É­]»Ö&ê§{½ž§‰ß (0!š˜¯ÉÓ>–ny ¾% …mÙ²ÅÂ]ZžÖO“æÃô½>U8PTÙIá:êRMìN·Ý©W…#­G¸ò”* ©²–oŸ …Ý5jä4h`¡‚¸ÛŸLœ×fz ${<ξVe)…š5kf‰0…5UAKÕ;U}TìÚµ«p}QÇ *^*,© C®o<…/^lUŸL ïûLŽûàã .´cÖ/WUÊ¿&Õq”¨½ hãÆÖgëü¢°*îéÜ•¨ßJvU¨×(¼¬Z§þWót.SÅÞ0UÒUO¡1…åüúè¼<§&ºÏS8/NS¯€¿B7ÝtS¬¾ Ùvd²‰WõeÚü²hÔ¿SM²Õ†ÉhŒ¢ñ‚Ö+Êóµjãàóm‡Æ}úô±P .Rà-UˆT%ñL¨oÔújÿûÂ(Û«çh½5&RÐ5UûÇÝÏÚ6§´<­›Ž¡Dô<='ÕÅFâì“tÛêq)´áã\ãV½Î·€âAùG*B€mªL™2y•ó¢¨P¡‚ý„¥ Ÿ§*RA‰&§Zž&Ä'¢ æé*é9QÖõÀ´Ÿ(Û“éãÉ#š`žì1UvR¥$UHZºt©;øàƒcmªv‰úÚLdÇÙ× Š&Ý'¢@Š+¾öÚkv«J‘Þ‹/¾hˈR 2¿7Þ~ûíg?™/ÉŽûàãá*lÉB ©Ž£DíAÁP–Sª ÷ªŸDV¯^ò<¯0–(<–h}RÝç/¼ 3f̰÷‹{^Oµ™¬G¢Ç÷Ýwß|÷E Þg« “Q(1N0QÛîû¢lG&ÇV¢¾1Qÿe{õœðû'[ï¸ûYÛÞ·ÉÚ ÊºFÝ'é–•êñdc [ AÅÓ®4’Q`±cÇŽn2dH±Û~U®RÆÑ£G»ë®»Î½ð VÑpذaVõJ!¾ºuëºsÏ=מÿÕW_¹/¿üÒsÎ9ù./¿ü²µe¢JYÅù8JÖ^°­(˜­àØüùó­r¡ªö.Z´ÈÍ™3Ç :ÔÍš5Ëž£ªÙ¢óžgµjÕ¢ ˆˆŠHéÆot=ô1b„ëСC±ÚöÊ•+»‘#Gº.]º¸þýûÛ§Ê¢íÛ·w]»vÍ«L¤6R¸åŽ;îÈ·(ŒRõ³8GÉÚ ¶•²eËZH}ܸqnúôéö㩟ñ 6ŒTQ0ªóÏ?ßÎ}qªÒ†€ân—ÿùÙÚ½{w×­[7Z mÙ²Ånæ+®V¬Xá–.]êÖ¬Yã9ä IªV*nÚ´É•*UŠƒ&ÂqD{Ø‘¬[·Îýúë¯nãÆnŸ}ö±ŸâÜïíìm¨>iÉ’%¶ŽåË—gçÀLùGâôH‹ ˆs•*U²ŸTvÙeB}1Ž#Ú Àޤ`2}F IDATL™2öƒâцꓪV­ÊN€"bWšì¨B€AH°Ã" vX!°M,Y²Ä͘1ƒ† ‰_ýÕýðÃ4!!ÀNaëÖ­nÒ¤InöìÙ ¿óÎ;]=rúÅÅèÑ£Ý 7Ü+¨±yófW·n]W¯^=·~ýúbu¬eÒ^€?û¹sç‹sèüáúôéãúõëgÛ½#ôw .t+W®ä@¤]`»# ¶MêŸ5k–¶·¸ÇZ&íø_+V¬pƒ rï¼óα>¿ÿþ»›O•î½÷Þ|÷)L¦j€ûï¿¿ý$²hÑ"÷ù矻#Ž8ÂsÌ1I×Ç/¾øâ¥{Ÿ¨ï‘Ê’%Kì=Ö­[çŽ;î8W­Z5»_UUAPAHY¸p¡Ûc=,@§JP>¡uüòË/]ÕªU]¹råÜW_}eëT«V-«œ˜h¯ٲe‹ûôÓO­Êâ‰'žè*Uª”r¿iy‹/¶ð¡*1zt+S¦Œýûì³ÏvåË—wC† q}úô±ª‡É 8ÐnÛ¶m›•ã#hÍš5Ö¿üò‹«]»vÂjL…ilkqÚ ÜæÍ›­ÏÔ¹üCq{î¹gÊç«ÒâòåËíVýdÙ²e“>WAÄ+VX­åªòöÞ{o;woݺ՞£þÐ?®ûÔ§«Q¸_ýβeˬ¯;ðÀóúÎT¾øâ »ÕX!Óí(ìz¨/Õ{¨¿ÊôQÛ0¸ž?þø£½¯úãpŸ¬ý½råJ÷óÏ?Ûzí»ï¾®dÉ’ ûz½¯Þ#Ñvê±Ýwß=_»ù×h¹zLï¥>_·|p¤ýæÛ=|Ld»]!@Ž(üöÝwßÙ„þ«¯¾:Ök§OŸîêÖ­ëÎ=÷\7f̘|ýôÓO.r&LÈ»O¸ž={XŽ&¦O:Õþ(À–ì}â¼G2 hÜ|óÍ®ÿþ6ÑÞ»òÊ+Ýÿû_w÷Ýw»Þ½{çÝß²e˼¿ÿþûyUý:vîÜÙB‰ÿøÇ?l»4‘_¡€DÛ|M“&M¬J‘&è‹‚–ݺu³Ÿ°Áƒ»öíÛ[8!‘N:¹G}Ôþ­ÂQGå>úè#·`ÁW½zõ„¯Q @ÛS³fMw 'dåø7º;î¸ÃõêÕËÚëW¯ž{å•W\•*U ìç¸í‘Íc-j{’ÓÅFŒaðçðã?ÞµhÑ¢@EõÃcÇŽµss°ŸP`^çöppÖ¬YV©X•ˆQÿ¢¾VÁBÚU‘Y}ºøû4h`÷+ô\Ç3Î8Ãyæ™I·KA9…;T`1ÓíÈt=tQ†×^{Íú'OcmoqÛPë©àà;ï¼cÛ¦Pd—.]ì¹;©ÿýàƒ¬ ¼ÝvÛÍ5nÜØúõ`õe­»Æ5jÔpmÚ´)оºè‚¤›y 'j=Î:ë,kû‘#Gæµ—ïó[µj•¶Bg¢c"›í ø¿¿©h ª 'šŒžny2Á ÿþÿçœsŽ-»~ýú SÐî…^pM›6-ðúo¿ýÖ‚ûì³UŠò>qß#™Ç{ÌõíÛ×]xá…6©¿D‰nüøñö#ªŽ¨óÏ>këÙ±cG ?¨­Uƒzûí·ÝŒ3¬²ài§f!ÂTm%£F²à¢‚÷ÝwŸ[ºt©{衇\÷îÝ]:u\³fÍòžûÙgŸ¹+®¸Â&è6Ì F>þøãVÁH¯×û©—oçdÁ¾—^zÉÖ-X 2LJ è}oºé& R >Ü=òÈ#";w®UwÊ´=rq¬Ei/@bßÿ½{õÕW-l¦~rõêÕTÿ¥ó½‚îA  ©/P8]çxU"œ3gŽûðÃÝ€\»víò* *Ȧ¾O}‰ú%Ý*Ô§å«Z³‚rÁ€}²~wþüùnÊ”)ö\½FU‹'Ožl>UUNvîWÿ®å)xg;2Y½¯„ j\¢`©‚ƒêÿÔ‡G•I~óÍ7vñ…Ã;ÌÓ> n·‚¯ (žtÒIV=Rï¡í=z´U¹T°³0ãJOë©¶QEním¿Ö[U:Î<ï¼ób/?[í øAH³g϶[UÁ R˜Lþƒ4_?MîOEA6M ¯U«–7nœM¬U1ÔëõžÁIèóæÍ³[@‹"î{$£à¢(xç'þ«"¢*:ÊÉ'Ÿl?ªœ¤`ܵ×^›0éi2þå—_î^|ñÅ|RÑÄþ:X(ÓSPïöÛo·ðD0ø§*•›6mrO<ñ„U?µ¯ÞÔ~ ¯_0Ø—Œ–« £B–Ù:>Þ}÷] A*ô¢jWÞ©§žja‘7ÞxÃ=÷Üsùª>Åm\kQÚ ˜¾)Pç)¸Ö¯_?«ü«û}^çYõ M^rÉ%ùÎêö«€”ª*(êwTuP·£>ÚîSxM!=]x bÅŠv€tTmX}Q0”©uRÅCõ)©‚êÛÕçÅÝŽLÖC¡I…õ´}W]u•U\ôã…ÿW®\iÿdÒ†z_msëÖ­ómô|mÛ~ûíçþú׿æíW´îºÐ„*E* ©Àea©½N?ýtרQ£|Ç–ÞGí«cKÈ8²Õ®€?íJ€\P•Q•à§Ÿ~ÚµhÑ"ß&Í«ŠP:>ô¦€›¦‰&™:´ÀóW­Ze·q‚qß#MøUUÌ…)U=2jR‚†þ¤yóæv»`Á‚|÷k¾„ƒ‰ªŠ˜èùÁv W§ô>ùä«Z¥ààÁœµãCáJQ¬0úP¥¬Â´G.޵típ)Ï¡Á¤¿OýÈÆóÇ} ^¡³0… eñâÅy÷)T(ûî»o¾ç* 'ªøuÕ)}è0Ù2TéR¡8UET8?(îvd²þª6éÃzâ+;F•I–-[ÖúùðØæÓO?µÛzõê¨î¬Š–Z¦öy¶., ö † Eãl-\¸0ö2³Õ®€?Qä„&•/]ºÔB_ª,è]ýõ®fÍšyÿê©§,´¶aÆ´ËôÑU(¬L™2vœLï'݇ÃvÙ|d:uêdá½Ûn»Í=Ú* j²‰%2jOUWŠ[õ(¨J•*Ù­,xU«Vµ[UZô•œdÚ´ivHŠÂ’($)>°Ø¶mÛ¬_ýµÝª¢Ò›o¾™o¹¾ÂR¢°aœöÈű–®½É…qžêË–-s¿þúkÞ}>œ®jʪ¨ôÛo¿å;oK¹råìVÏ ùR”p¸/Î:ú>ÿo˜ÖQj×®]౸ۑÉzø×û>*(àK'“6Ô}‰ªû>9Ñ:ù¾VTX½zõ6?¶¢ÊV»þDäD5ÜÔ©S­"`ªøøê@òÖ[oYÐ-J¸P“ÑÅO¶OÇOlZÉ)“÷HFÕ›>úè#×¹sg7nÜ87~üx«ÞôüóÏ» l·ýR²dâ飷Ür‹{òÉ'Ý<à-Zä6lèÆŽë†nÛ¢jŠa¾]…;7oÞì dÁ‹Ö­[gõøX²d‰ÝªRV¢E­Zµ¬ŠdaÚ#ÇZªöd¦téÒv«Ê}ž®ýðà Ϲ ¸ùŠŠR·n]÷ñÇ»I“&¹_~ùÅ. jƒê£T0Neé°]wÝ5écüñ‡›9s¦õeêÃânG&ë±víÚ|혩l¶¡ßî=÷Ü3áã¾Róï¿ÿ¾Í­¨²Õ®€Àßn4È…ãŽ;În†ëÚµkÒj;qT¨PÁn}…¤t|ˆmñâÅ9{TN8á ~õÕWî_ÿú—UHlÖ¬™ýÿ€Ø¡öרQ£ì¶Zµj®wïÞö£ðá…^èzõê•00艉‚cÆŒ±êŒW^yeÂJ–…9>¨PÅÆ_|ÑÕ©S''í‘‹c-U{2ã+«þî½÷Þ>W?YUÁ yóæåûæÓúÀcŽ9Æ5iÒ$e˜±0TáxݺuÖ'& öÇÝŽLd+T˜Í6Ô¸Aí¢}›¨š³–/_>ï>Á„-[¶d­m´áck[·+àO!@N\wÝuî‘G±j@=zô°°[aU©RÅM™2Å*-†+úY&z,[ïÅÑGíèÖ¯_ïìÞÿ}צM{ÌOÜ÷“í·—þýû[%ÅiÓ¦¹åË—»üÑ*3&«ÆlD¡N…>¥mÛ¶Y?>Ž8â«ùå—_æ,™‹c-U{RS(Nƒº­[·Zc †âÆS0]•£?ûì3«®xà 7XÈNï¥åí¶Ûn9ݦ/¾øÂnk×®ðñ¸Û‘‰²eËÚ{¨ Wmô•£Èfj»µÍºèA¢>S÷Ë~ûí—wŸ*P‹IÆÙÁ¢[Ûº]Ú•&¹ *F>ú¨ý»[·n®}ûöªâhÒù/¿üy™]t‘Ý>óÌ3ù&‘Ï™3Ç]|ñÅöoM\÷TaIACU&ܰaCNÞ#Â|¨08¡¾R¥JvëÃÛ‹*(Íš5ËöÕ§Ÿ~jAHýÙ²eI_ãƒ}á‡öé›o¾i¡Feýø¸üòËíöá‡v«W¯.ð¸öõÆ Õ¹8Ö’µ =õGC‡µ¾ÁûüóÏ­bâ¾ûîkÕ‚=ûE¡ùDù6oÞœ¯r –¹bÅ ÷Þ{ï¹ï¿ÿÞB|úÿš5kr¶=º8Âܹs­?<ì°Ã>'îvdBå“O>Éש?ÓÅ¢ÊfúªÑºAøõ3f̰å+ l7K”(aljÆ0ž•¯¼òJÊ÷Ó ÂÇÖôéÓm ¢°eðØÚÖí ø!@Ψê¡&|wéÒÅõîÝÛ½øâ‹îøãw5jÔ°ÛäÉ“óÂaåÊ•K»¼óÏ?ß{ì±ЫW¯žkݺµ  `aMºÚgŸ}¬zà×_mU}ªU«–õ÷HDÞO<ñDwæ™gºV­ZÙ$ú©S§Z•DUY¬_¿~ÞsõïW_}ÕuîÜÙ}õÕW6é¾S§Ny!€m¥oß¾®qãÆî¾ûî+ðØ¡‡j¡Cô,X`·ÚÖ MðWÀCE…²}|4iÒĵhÑÂ1žÅW¸#<Ò q*„Ù¯_¿ëG.޵díHO˜={¶…àÔO蜯ÿ+„~ÖYgåUXõ Gu” hWŸª*ƒêŸ”Óý-[¶Ì «éߪܬŠÍaêþò—¿ä=7[´î 2ªO V" Š»™PÕjµ­B‹ê;Õ¶êÃ2Õý›6mŠ´œl¶¡ÖA!Çï¾ûÎÆ'ZGõ³Ú÷º A©R¥Üyç—oŸï¾ûîÖ>ºÅóÏ?oãõÛj#]˜!Õvì±ÇùŽ-]!Ù±µ­Ûð'‚ §Úµkçš6mêî¾ûn7mÚ4› ?aÂ{L“Ö51þšk®± ôÞn»íf·šèT²dI{½‚oãÇw÷ß¿M®×ëÿóŸÿؤóðkîS8môèÑ®C‡ùKô>™¼G˜¶ë®»îrO>ùdÞ¶úuyâ‰'Ü^{í•wßõ×_oïóÆo¸^½zÙko»í¶´m‘êñT¯Q0Q?áÇÆT ãž{îq 6´ÊS (ŒØ¿wÙe—¹êÕ«ç4çÍ›ç¾ýö[ üU­Z5ß²ø”¶mÛæäøaƹ=zXUÉÿûßy÷+(¡pÄi§V¨öÈö±–ª½Éù`Íš5-@6jÔ(«èÏùêCtNSØþƒ>pS¦Lq“&MÊ×Gë\\¥J•¼ûfΜiÕÏ8ã ;G«?ThMÁvU2dˆ»ñÆÝþû>Á ¢û<…èôxø1_ :]•à8Û‘Ézè>õmêW<œ8q¢+]º´ ÕëB©.j­6 »òÊ+m< ŠŠüq^߬‹34oÞÜU¬X±ÀkÔÿ+`¨£Æ/ê§uñmGÏž=“¾Ÿ‚’§œrŠ9rdÞ±¥ “:¶DzŒ†‰l´+ ЗýÏÏÖîÝ»»nݺÑ çÖ­[ç¾ùæ·çž{ÚdvMjOdíÚµ6a<Ùã ê-\¸ÐÂy~º*$i¢0 ¦[£F\ƒ lR|œ÷‰úÉlٲłq h[5©>™U«VY%"=/”ŒÒ‰O·]ÁðŸª')(ЬY3 „)¬©jF?þ¸kß¾½Ý§``×®]­šc0ˆ¨ª‡ªz©À¤ªåêøRUÉÅ‹»òåË[ø"Qe­8í‘‹c-Y{ÒÛ¸q£…ÚuÞUØîÇ´sº‚QûUùÓkjVøSeÅgŸ}ÖÎñªd¦*à ò)d§°œ_õ'Áþ&Ñ}žÂyÁ¢.4 ‹(xÓM7Åê#“mG&ë‘èqU¦VÈÐ/[Fý;Uh/[m˜Œªj,¥õŠò|m‡Ú8øüDÛ¡qWŸ>}¬¢³.Þà-UˆÜ{ï½c£©Ú=“vüIùG*B€mªL™2yUS ‡Ã4I=\JÕ‘ÂT™H•~TágéÒ¥îàƒŽü>Qß#MnRIH*T¨`?™´E¢ÇÓmW‚‡²uëÖ„ÏWèB‚• _{í5»U¥È _|Ñ–¥daŽ ýöÛÏ~2=žÂí‘‹c-Y{Ò †Î4Seȸ}‹~Y½zuÊ>PÁwQð0Ñú¤ºÏ󕉽3fØûÅíïRmG&ë‘èñ}÷Ý7ß}Q.H­6LF¡Ä8ÁDmGx\e;29¶¢¶{&í §i°3S±cÇŽ69È!4HªÎ¤ Œ£Gv×]w{á…¬¢á°a첓B|uëÖuçž{®=ÿ«¯¾r_~ù¥;çœs „8^~ùekóD¡Šë±–ª½Û—Bë ªÍŸ?ß*ªšñ¢E‹Üœ9sÜСCݬY³ì9ª˜-꺫U«m@DDÈÀNïÆot=ô1b„ëС R¹re7räH×¥K׿ûñJ—.íÚ·oïºvíšWQí¨ÇwÜQ`Y Q*xàr¬ýß±–ª½ÛWÙ²e-¼?nÜ87}útûñTµO hذaV+øþùÖ/Ä©rHŠ»]þçgk÷îÝ]·nÝh °ÓÚ²e‹Ý*¤‡äV¬Xá–.]êÖ¬Yã9ä IªÊS*nÚ´É•*UŠ‹p¬Ñ^P4¬[·Îýúë¯nãÆnŸ}ö±Æ ;oª¿^²d‰­cùòåÙy°Sþ‘8=(2DS©R%ûIe—]v!ÔãX£½ h(S¦Œý x´¡úëªU«²Ó ˆØ•&;*‚`‡Eì°B€AH K–,Yâf̘AC2ò믿º~ø† „ $ØalذÁ1Â}ùå—l[ÈwÞézôèÁA²Œ=ÚÝpà ±Â(›7ovuëÖuõêÕsëׯçw dëÖ­nÒ¤InöìÙ‘ñLö€‹Î­sçÎ-ç?þøÃõéÓÇõë×϶{G8o/\¸Ð­\¹’ýÉ6ÀvGl3šþÖ[o¹1cƸŋx\ᯖ-[ºÛn»-ï¾U«V¹‡zÈQ…µzõj aùŸùóç»uëÖm“mO´mQM˜0Á=øàƒnÑ¢EÅò¸Éæ1Õ–-[\ß¾}ÝsÏ=ù5cÇŽuË—/w­Zµr¥K—Þ!~úé§|¿ÁŸ9sæ¸M›6å¬MU-³aÆ®M›6‘ñLö€ËŠ+Ü AƒÜ;ï¼³C¬Ïï¿ÿî&Ožlcˆlûæ›oÜÚµkÝÑGíJ–,¹Ý·UÕ) àœÓý™Ë6å€GIšäÚ /¼àî¾ûn·téÒ|÷Ÿ|òÉð:óÌ3“¾väÈ‘®K—.®jÕªnÁ‚…ZZµjX‡]vÙÅ*øÝsÏ=îÜsÏ-ô¶*¸U¢D‰¬¶_Ïž=íöÒK/-ÒÇA¦m“Íc * åÊ•³ž*F1pà@»½êª«vØßš5kZX3™!C†¸ /¼p›ÉŽñLöäÒ¼yóܸqãìÜÔ±cǬ.û‹/¾°ÛÚµkÓ¦E*zîº+õÊ WB€œQu¹ë¯¿ÞB`»ï¾»Uº;å”S܆ Ü'Ÿ|bUñtR¥Æ=÷Ü3á2êׯoöt[XªÆ'ÿüç?-§ªKª@ôᇺ¦M›ºçŸÞµmÛ6ãå7kÖ̶I•õªW¯ž•6Ô:+W¹re× Aƒ"{,¦m²y DUªT)wÁX%¬‰'Z%ÃTT5ë7Þpp€kܸñû;ðã?Ú­B…‰ªtÒIÛüØHuŒÇÝk:W©b£n³IýÂܹsÝ^{íå?üpÚ´ˆy饗Ü×_íÚµkç*V¬È/ äAH3}úô±X¥J•Üðáà ¹fÍšån¿ýö”ÕsŽ<òH÷úë¯gu½„T(ÍëÞ½»»÷Þ{Ýßÿþw«†—,–Îo¿ýfÕ€Ö¯_ŸµuU»i™—\r‰U¯,ª Ó6¹8¢PuBðT0]oèСî÷ßw7ÝtS¾ª—;êï@·nÝòýlOéŽñ8ûr­B… 9©Ð<{öl ÏשS§ØUÌU›nKÚw[·nu›7oæ—r„š» 'Ö¬Yãî»ï>û·Â`‰ªÙ{ì±nôèÑ®téÒI—£€ÔŒ3òª9z[¶lq_|ñ…UâUR7nœU–[¹re¬uUu<…ÂT-oæÌ™WåÈ÷Þ{Ͻúê«V•.¼|­Ãüùó-ì' .´ÿ«2Ö?M”Ÿ:uªU\±bEÒuó¸‹/¾8áãK–,±e(ˆ§÷ÜR­C”¶Ñ­öåÏ?ÿlÿÿꫯÜ+¯¼b!ÁTÇ@øu:&>þøã´mªÀâ´iÓܰaÃܤI“l}üϺuëòžwöÙg»òåË»!C†¸7¦lƒÚm°¢hQúHö¾™´¯·hÑ"÷æ›oÚ~T8$Óc<Î~°óSÿùÍ7߸yóæåõ-©èܨ¾GÏÿå—_R>Wa¶¥K—Zõb½æ§Ÿ~ÊûñçÏt>ÖØÀÓ}Ë—/Ï ûëü©å¨¯ ö+©è|+Çw\ÆÛQØõÐrU•RçøTçítÔ6ÚGZ–ï£RIÔ¦~ýµ=Úv¿ï¿ýö[kƒ¨íZØåh¿÷Ýw6>Tû‡Ÿ¯eêøÐ±#ê3õÿU«V%mC­ÆNqŽ!@Ž 4È&Ò+èÕ²eËŒ—3}útW·nÐÈ‹E IDAT]wî¹çZËûì³Ïì~ÍjÖ¬i•ø‚ᬫ®ºÊõïßß•,™~º¤B‡~¸V¯^w¿þÝ¡C åù îRªT)×¥K«")wß}·ëÝ»wÞãÁí}ÿý÷ T±›0a‚»ì²ËlR¾¨ ž*ôé'Hè–”cŽ9&ßc ¥Ý|óͶÁ°å•W^éþûßÿn“}e¢´ßÇ;wvp€ûÇ?þaÛ^¶lY $;‚¯kÒ¤‰U“òÇ@²6r ,pÕ«WOøþ´-:O8á„"ù;ê}ã´¯(rÑEÙqîÕ®]ÛõìÙ³ÀsSã^Ôý`ç§ üˆ#òÂc:üñ®E‹ª(ª;v¬c‚´Ê•+Û9J}LBÛo½õV0žW¯^=;/[¶ÌÂíªÖ«þNü} 4°ûÜ®ãgœáÎ<ó̤ۥ¢Bvª|àf¼™®‡¥¯½öšc=õÇÚÞ8t¡µ¡ÂûžÞWý`«V­Ün»í–ðu‰ÚT4NÒýgu–µ‚þÁà ú-7]ÍL–£mQŸ«¤ö§ªÏ§vš-KÞ}÷]»H@°ï÷®½öZWµjÕ|ë¢6Ö~ñÇY”ãðÐ VsÎ9Ç&yV²ª:š¸¯0VãÆ-ø¥ Áã?núпÓñU–D2O•ꜻúê«-¦ÉóšßµkW ŸrÊ)®iÓ¦VP„gŸ}Ö* uìØÑ‚ šP®ìôÉ'ŸX›h}÷îÝ­RÓC=dÿ®S§ŽkÖ¬YÞsµ,÷Ùg·÷Þ{ç[Îc=æúöíë.¼ðB ejbþøñãíg[‰²qÚæí·ß¶ð„*ª}T)Ê10jÔ( /*ø¡ý’¬M¼âŠ+,Ü¡jûï¿¿í_#ª8¨×ê}ƒ9ä¼}‘,€÷ÒK/Ùº«AµßT¢¶¯_GmïçŸn0õ»£ è /¼`¿+a©Žñ¸ûÀÎíûï¿·ÊÌ Ë©ÿÐÅ Ô¹]4P?>7ª"¡BÖ:Wé"ºàÁ‡~è àÚµk—WOýÂ^{íåÚ´ic· Fjù{챇…ÞªT©’öœ¬ÊÂS¦L±çê5ª ï¼ó"­_œå¨×óõ<seÊ”±‹¨B¸.~ ~©Zµjö¸úZïtLžzê©ÖŸ©ß×8#| «­µ_|Œz|þAHš„/‡zhNßGÁÄ»îºË=ðÀy÷)vòÉ'»çž{Îý¿ÿ÷ÿ T} »çž{¬’&ÿ|ðÁy÷«:‚ÁêL§Ÿ~ºMìWõ½7ß|ÓÂ]z/ý¨ ’‚Zªùy«V­²*“ z Ý~ûíh†Ê RhPPóá U#T5Åm%Ê:Äi.¿ür÷â‹/Æ *¨¥MU¥R•=Ÿxâ Û·¢°ž( o* ^·`/-WÇ„B–Eåw@Çm¸‚Ö­·ÞêÎ?ÿüŒÛWšTxF—qãÆY€HT…S×Ù³gç º¤:Æãî;7UèSå<_‰OhëׯŸU°Õý Dús…ƒ °]rÉ%ùÎ%ªZ«›¯ª+êTõOA¸£>ÚîS(PÕƒʯX±bP[" Ê) ejÞyç;7¦ BªßÓ¹3(îvd²ê«‚Ôö)Lï«6ªQˆ}åÊ•‘û"… Õnº0€ïcÔ¯žtÒInß}÷Íxßk{4þjÔ¨Q¾}¯‹1hûµïdÌært¨Ýƒ!}UwÔ~REIí!5nÔÚQAHƒ’+:†39>ÿkWšä‚ ìVÕ÷rIUƒ0Q àÄOtëÖ­s'N,ðšgžyÆ=ùä“îŸÿü§UìÑ£‡M|W•ž  *ä Azšä.ªt—&ÀeÒ¼ys»õU)=…&%QHL¡ QEÃí%Ûë 0¥*GÆ­ž˜ªMýq(>˜®H¥ †áçz¾íÃÕ)=U€Re.…ü‚!Úýw@ë¬PbðGA˜Â´¯ 2Äno¾ù漤(2tèÐËNuŒÇÙv~:Cþ>õÓ7nÌw>R°Pæ S¸MÚó*”pXO!9QåÚ¨ë®LéÃmÉ–¡* ªŠ¢BæAq·#“õÐù_TmÒ‡ ÅWÇŒJÁ@ß'…ƒöÚ¶TU£´k0¼(êsý¾Ÿ²±õa‰ÖÙ UM2“íˆ{|þDEH•*U²ÛdáªlIVíQˆT!jÉ’%ëØ±cÞ¿U‰GUðyäwÄG$\–&»¿ÿþûVøñÇݲeËì~Mš+r¶•bx~R|¢0f§NܰaÃÜm·ÝæFm÷Z´háJ”(±Íöq¶×AÕ—¼ÈU›ª’“Œ1"¯â—L›6ÍnÃI9è ƒì6QHRT RTýª(ý(Pè+§eó˜õ!Uÿ +S¦ŒÝƒ®©Žñ8ûÀÎ/Ù9KAkõË¿þúkÞ}>d­JÃ> ç©tðü#åÊ•³[=7†T¿/Q«&ZGîóï¦u”Úµkx,îvd²þõþ< F¦ãûß?l¯}Ÿíå¬_¿Þú¶5kÖXÛéVTA4Ûá÷‹ªER# rB¡ÂñãÇG®Ò“m>¶víÚ½ûî»VR“þU…0\¹ÈÛºu«…üž~úi› ®×(,±yóæ¼Ç³¡dÉ’ —ç'Ì' 9¨:ÔG}ä:wîìÆgm­êPÏ?ÿ¼kРÁ6iãaÒµiÐ-·Üb•@U=QÕ<6lèÆŽë†nۢʇa¾í…;u 4ÈB ­[·.R¿¹h_ñ!a*J'Õ1u?(ÞÔ7Kðâ>ÐöÃ?$Ût¾ŠàŒ3¬rS®·É‡É/^œðñN8Á‚|_}õ•û׿þeÕ ›5kfÿ?à€¶I;ïëÕ¨Q£òŽÍÞ½{Ûöá…^èzõê•0Üç«)& ö3ÆŽ³+¯¼2a%Ëùw W*T¨`·¾ŠYañ(û@ñæ+«×î½÷Þ VH=Q¥Ã°yóæåÈÔÎÙÇsŒkÒ¤IÊ0ca|ýõ×nݺuVÁWaȰ¸Û‘‰=öØÃn [‘Pжè'“êΙÐ{…÷}¶–3qâD7eÊwØa‡Ù¸ÆWU(UÈØÖ}:€ $È‘Ë/¿ÜuëÖͪ) :Ô]tÑE9yŸ+VXe¾`ˆM¡3Uu’Ã?<ãe¿þúëv« ïÕ«WOû|?)ÞO¨/,$ô!°dÐ8p [¿~½Q¨S¡OiÛ¶íNû;—*¬*8¢J¡á ›‰Žã¨Çxªý xPàQÕø‚¡DëTáWÊ—/Ÿw¿ X+´%@øÙgŸYuÅn¸Á*$ê½´¼ÝvÛ-§ÛôÅ_Ø­.®HÜíÈ„ªë=tž ç}EÊ(Ô^ê–.]jm™M;DÝ÷ÙZŽ.è Í›7w+VŒ<,luJ@r»Ò >ø`×±cGû÷µ×^kA° M<9r¤ 5i>SŸþ¹Îóúõëg“úkÔ¨áN>ù䌗í'³«b“·eË÷ÔSO%|~¥J•ìÖ K¢4±^•ý6lØï1…6Â| /J àÁt÷Þ{¯Û´iSÆ÷ÇY‡l·M&´ïfÍšeáÄO?ýÔ‚úÿ²eË’¾ÆðÂ!•_~ùŽùæ›LiÔ¨ÑNû;—{>óÌ3ù4 ƒ^|ñÅyÛå²:Wë<ª [ðü§Š‰ªÖ§s®§Ð»|øá‡ +ê|©>ÁÓ2â{ï½÷Ü÷ßoAHýÍš59Û]8`îܹVõQU‰»™PÕKùä“Oò‡u^Ö… â.Gaøàºêœ¯õ_°`AÆë¨‹„÷ýôéÓ­/Þo¿ýòíûl-Ç·k°Â±^§‹)$¢Š˜¢Ð* 7¨ ræÎ;ï´”*+* ¥Êt'žx¢MÜW î»ï¾såÊ•s{íµWÆï¡jIšÔ~ÔQG¹Ö­[[`jÈ!Víç¾ûî˫Г‰-Z¸™3gºöíÛ»/¿üÒ ƒ ²uW•¨` Kêׯï^}õU×¹sg«$¤‰õ:u²°W&öÙgwÄGXSÕŠªU«f÷+¨ v<óÌ3]«V­lòþÔ©S­B¡ÚXë‘ʼyólßÈyç—”‹sÜuÈvÛd¢oß¾®qãÆv\„zè¡îá‡.PµÑ7´­A ‡(Ä¢b‰%ŠÜïÀ¥—^š¯"–סC×°aÃŒ×åüóÏwÇ{¬LëÕ«gë£Ò€,УpQ”c<,Ù~P|(P?{öl **ä­¢þ¯sÜYg•ï\wä‘GÚ9QAC³Õ×è\©¾KJÝß²e˼ðžþ­ªÆªf¦sô_þò—¼çf‹Ö]}ΉÎÇ™lG&TÑYm«à§Bôj[õo ™êþð…’Ñv(ð¨ `Ÿ>}l¹Ú'óçÏ·På%—\’ñ:î±Çùö½‚öÉö}¶–£vW›Œ=ÚtÒIn÷Ýw·± ¶Oý~x X¹re3Ž;Ö.´ 6T?˜íê˜Pœ„9£ààðáÃݳÏ>k?šlþí·ßÚcš\óÍ7[P¬lÙ²vŸÂ…RªT©¼e$º/¨N:îoû›»ñÆÝ# ös °©;ãŒ3ì<£¾BA6´U1Párcjóë Á'ºÏóãŽðc¾BrºJ·q¶#“õÐ}:G6ÌÂùÿ?{w%Eyïÿÿaq†APÁ BÅQ74Þ .r£‚¹×{4s4äBP6‰Â¨l 1Šì« û¾„eføçóýý«OMOuOwOÏÂð~Ó§°ªº–§jú)Ïy>õ?ß•)SÆ5iÒÄ êK¢þýÛQõc]½@Ç+zh×®#c´ãU_nÑ¢…UQö®½úM]{cÏvô¢‡ôôt „¦¥¥Yû©oíÝ»·=†«‚új?½ìañâÅÖz/…HäºrR\ýŒþ'}РA´ÈW h¸Âa Uð9vì˜ À×òhó¾ýö[×¼ys¤ÿÙgŸ¹¬¬,x_­Z5W«V­Àý«‚’*øh[ñÐþU‘Na….D Z÷Âb~´jCª2è‡G¡¼ `™ÂdmÛ¶u­[·¶ÀžŸÎY2‹öå…éb½j‹ðãw~¼ÇHÛD[k›~ÿý÷Rèܹ³ Â)¨©JXþóŸ­¨(Ä÷â‹/Zöå—_­«{A•˜T@âlúˆW"÷¬ù–-[,\ê…@ô7¨sö'Ú=í:8wdddXÿ£ßUqO¿M^ŸœÚTýOßQ8ÛÿÛ«ÊŠ ªë·JU~ÃMŸ>ݪˆ¬w<ú]ô¿X!hžGUýa7…ðõRõ}ûö¹¢G"Ç´\›ÕxÛV¿¥ÇÔÓ5:pà€íK,Ã3ÖöÓó‚ªKªr°^<à]{UvT?«¼lGÇ¥ë¥ç…Oskõ}ªVªëãïëòr]ÿï%ET„FÀU½(š j†¹U8 Wµ¨h¼ìñÒþ÷-L©AÿúÄsŒDÕ©TÁH•‹vìØá.¾øâlçO%¤ðk‘ŒùñC"mmy¬mºqãF›*ÌDÁñWüðÃmªJ‘~&L°íäV ²(þ $rïÇ{Ïú—‡Wi úŒvG»Îþ@™cª ÔÔTû9tèPÔþAnQ°-èx¢Íó„¿D@•yµ?¯q2Î#‘ãZ~ÁÄÔÿG£kT½zõ¸Ž1ÚqçåÚçu;:®ðu£µ‰ú¾ þ1/×ðÿÿ~ÓE—Âmýû÷·ÀÄäÉ“i©ŠW¹råܬY³Üã?îÞyç«>øÑGY0…íT]±C‡¶þÚµkÝŠ+\»vírUÞ{ï=».A•ÃÜ{<Úu€dPøZA´ 6XõGUúݺu«UØ2eŠ[µj•­£j‚É¢ß5…ñ5jÄ FT„(âúôéã†êf̘áúõëGƒ$ víÚnæÌ™nàÀîÿþïÿìãQuϧŸ~Ú½øâ‹¡Š…jk…T c[ Q*¼W«V-6Ÿïñh×’¡R¥JlOKKsK–,±G•ÿ¤oÓ¦MB•#éÚµ«ý¶U¨P @ŒJüûsfðàÁnРA´8kdffºÅ‹[•¦ºuëûóÍÊʲ©xÈ›½{÷º;v¸£GºK.¹ÄB’ªæ§ê„ºÇRRRø(Ä{ül¸ŠôôtwäÈ—‘‘á*V¬húÝÂí¶oßnסJ•*…¾@áQþ‘Šà¬¤àÚM7ÝtΜ/AŒä©Q£†}¢)Q¢D‘ß·¿ {ül¸ŠÔÔTû èô uêÔ)2Û®’4(ªB€"‹ $(²B€"‹ $@œ¶oßî–/_NC€¤:räˆÛ³g @‚ XùöÛoÝ„ ÜôéÓÝO?ý”mÙóÏ?ï† úïY³f¹Þ½{Ç88uê”kÞ¼¹kÙ²¥;qâD‘<Ï‚væÌ·`Á·zõêˆë$£íçõ»ëÖ­;'ú‹Ó§O»Q£F¹Ñ£GÛy6õï[¶lqûöí+vDzsçN{©…î­ÌÌLþÆâü+è¿Ëséw@d¥i_8mÀr… Ü%—\âJ”(‘ô}i€|=ÜäÉ“Cóè^~ùeû÷ܹsÝ+¯¼âž|òÉÐò¬¬,÷öÛo»Ë/¿Ü‚z±˜3gŽÛ½{·{à\™2eŠÔy†‡ŸŽåŠ+®pçw^ÒI2Û´iã6lèV¬X‘cy²ÚpîÙ»w¯›8q¢«W¯žëÕ«W¡ÏñãÇÝwß}ç.¼ðBW¿~ý¤n{ãÆîرc®Q£F®téÂ{¨:娱c]5ÜOšç}©ê <ûì³VÕÇOUgΜéj×®íZ·nšŸ’’âî¹ç«04þ|«jª)ˆ¨Pcûöí‹Üy†G¸¦M›ø=¬¶ (PvÕUWÙ4™NžøÀ_ªšxèÐ! ¿}ÿý÷ÖO*Ôï§0úPñÕŸ¥¤¤Xuº¯¿þÚúÒ§žzÊù¢@ÙG}d}pÏž=mª`¤¶_¶lYë·½x‚ªªR³BYZWßQ5g…ï »è¢‹"ös €i{z¹@¸xÎ#‘ãÐ~UñZUûTèMÁLõ÷zö‰•žMÔf:]£ÔÔT{Á—_~i/’P_¿~ý¨ÛHäXbm…êt-u¿¨z ÖÕóY¹rå#Ñk«óWøòÛo¿µ{ôç?ÿ¹=)Øwá…FÝÎEí¨@lÓ¦M]åÊ•íÞÔ~gÍšeA…“ußåõo,Òõˆ÷Þ{ï=«†®çZµŸ—ªF©û©Q£FöÒŒ :Gm·Zµj =Ÿ8û„…ªnݺ6Õ iªé(¨תnãÑ`r vŸ6mš3fŒûõ¯ín¸áû¨ÒüÓ¥| IDAT¶ñàƒº›nº)ô ¤/pÄÆ‹düøñ60Û«ÄXÔÎÓOÁÄðª=ª’Óµk×ëjy¿~ýÜŸþô§Ð<Ÿ}öY xAH&DЀô´´4 ÈÓO?ín»í6·zõêú“Õöx*ÿËÔ·Ž=Úªk¾ÂZ^Ÿ¢~T.½HÀß稺±úQ…»TNDTõºN:¹«®ºÊæ)ˆ·gÏ÷ã?ZÐ*<ÄDÁ?õáþÀ˜ŽiöìÙÖ‡F B*(§>Ö/ÞóHä8¢SðPç÷È#„ªïýìg?³—ìÛ·/¦ë£vÓqú_| ê‡:¯™3gÚy䄌÷XâiUÚÔG•7„T[ëøbk›^|ñÅöѹ(H¨*â±Ü;ºÏt¼ªløË_þ2t/+¨m¿ýöÛVeQáBu“qßååo,Yç  ¢ AêïMaFïùUAL­é…:/=êoS/çг+€â¯$M “ªñˆ*þx:Uñ ç òV¥ŸXªr|Í5׸Ž;&5Tæ§ÊÐéééîºë® |!B¼ç‘ˆxv‘ÌŸ?ß-Z´È]~ùå®sçΡ*š )þõ¯ÍQ5:ÇRíSPôŒ©{A÷sP•C/$ª `QøKÖ9èE#úŽ>±–‚hŸDyÁÏܪ(†Ÿ‹^Ä›_½zõ"ñ7–¬sð¶©—†ÄJ“Ý»w·6ž9s¦Û»w/?À9‚ $(p»wï¶ê=cÆŒ±j>üã³-ðÁmúꫯºC‡åøþÉ“'cX®*K(­ªŽú^oÀ}xáðáÃnúôéVe¨mÛ¶Eú<óÛ½÷ÞkÓ·Þz+[aÍš5î¾ûî³k°|²Ú¿]»v¹)S¦d j-]ºÔ**x§*˯úò×_X]ðÔ©S.+++ôßÚ¦ÂT_~ù¥Û¹s§ÂôßGÍ·ó9qâ„[·n=¨ŠbxÏ#ªz)ª®çï«ÕwOš4)æíxÇᯭvýç?ÿ™oÇRí“(U:”XCšzf½p"ü¾[¾|¹Ý—ziD¤{¥ ÿÆ’uÞ5W5Qÿ5Ô3¥®ëæÍ›÷U·n]wË-·¸ÌÌL÷ÁD|ÎP¼”¦ @~SP®dÉ’,ܰaC¨êK³fÍ,XW«V­lëwìØÑuéÒÅ͘1Ã5iÒÄ=ôÐCîŠ+®°AÕß}÷…G çE£ª1ª¼øÃ?XE›úõëçXÇd}à 7d›¯A÷ *(°XªT©"}ž~÷ß¿G¸~ýú¹6mÚ$t »víꮽöZ·jÕ*ײeK xj`üرc-ˆ ÐH2Û¿5j¸Õ«W[˜JÕ•ÕW꿸¿õÖ[CøD}é•W^iACõ¿ h©ZÂR {iþ]wÝ aéßãÆsóæÍ˱ßÊ•+»Ûo¿=´n²èØÔSÔg'r‰¸êª«¬müÔ3‡ÚVÏ> Ài¾‚f±Ðqj³fÍrM›6u矾U¼VPÏPá/KHƱDû$ªvíÚnåÊ•nΜ9nÿþývz~RåÑ :WüñG÷öÛo[[è9J÷»^:‘’’â:uê”í>/Ì¿±dƒîuŸŒ5ʾ£åzŽVVUY#Ñ3­ž/7nÜè¦M›u]ÅAHo4ð\`ô“jÕªÙ u ¦V—Ç{,âàÿ>úÈ 6Ì >ܽüòË¡ùP­AÔ7Þxc¶õ5¸Ú?õkß¾½…ñ48_a@¿õë×»M›6YH¯N:Ù–?Þ¦½zõ:+ÎÓ;Ž©S§îëŽ;î!Ï;(ÛQ`Aÿ²Ò¥K[@DaÍ/¾øÂ½ôÒKùÅ/~a•.5p=™m€xýgÆ -¨õÉ'ŸXpÊë+Õ·© ׳gO÷ÕW_Y¥¹ „æ+ §~çÒK/ ÍS`MUðn¾ùfëPTpM!«%K–¸É“'»>}úXˆÍ;ÿ ‚æyêÒòðeË–-³in‘ã9DŽCóÔ—ëyD¶ùóç»2eÊØ }ã7bz„žwÒÓÓ-´˜––fûS°wïÞöÒ„X¶‘ȱÄÓ>þ¶ :žd][Ñ tk×®µ ‰zŽjÕªUÔm>üðÃnîܹVó›o¾ =]vÙeîÎ;ï´çËÜŽ7–cKÆßX2ÏAÏÉÚ§Â’ºŽR¾|y×®]; WFڟα{÷î T`Smô[ øPŒúÌàÁƒÝ Aƒh P$©"̶mÛ\•*U, *ÌÊÊrÇ·ÓáÜkÛ¶­kݺµ[¸pa¶e ô½øâ‹nàÀÙ‚ˆªTX·n]«.¤AýgÃyÆëرc0ÐõpÚGxÒ¿lË–-®Aƒ¡cTõ% H_?‘¶À/##ÃüêgXT…=õ_ iÅB½#GŽØwâ÷W¤SåÀ‘#GZŸ¦ ÐáTYaH½œ E‹¡ãQßé遼æyTÉÐJSEå×_Ý‚•}ûö¹¢G"Ç´\U­Vó¶­P¨þK ÎۿίR¥JFLd‰K,í£ûGÛ z¾IƵ zfReEŸÑ¶)úŽÂ¸:ÿHëäõØòú7–¬sð_›غªì~ý"íOßÓ2/€âKùG*B€"¯zõêö‰Fƒ¼#…Uá颋.²ê6;vìp_|qhÙ‡~hÓx Ûw&L˜àΜ9“k5È¢tžñж²eËF]^qÇ ;$£íðóÈ‚RÕºx¤¦¦Ú'È¡C‡lª>?ˆ‚þ¢ [ÐñD›çñª0{T½YûÓË’u‰GÐò .¸ Û¼ —%ä¶ÿðëï6=–XÚG÷O´6Êëµ zf z¦Š¶M©P¡‚}âùÛˆ÷Øòú7–¬sðï7Úsp¤ýé{„ sCIšw öïßßB“'OÍ_»v­[±b…k×®]Ž0Â{ï½gß ª…üm{ Šú ŒmذÁª?ª ôÖ­[Ýš5kÜ”)SܪU«lzõê%mŸêÿÞjÔ¨€Qœúôéã†êf̘áúõëgóôo cýwÞyÇB|µjÕ¢ñ ¸í((•*U²—¤¥¥¹%K–ØÇ£ „-Z´pmÚ´I¨ªa$]»vµ>0žjyœëJüûsfðàÁnРA´(Ö²²²lª€£¨Jaff¦KII¡qh{À9.==Ý9rÄedd¸Š+ÚÇë·@áQþ‘Šàœf(Q¢A<Ú“ššjPô”¤ @QEY!@‘EY!ŠíÛ·»å˗ӜŎ9âöìÙCC† $8'}Ú5Ê=Úλ(ôY[¶lqûöíã:‚ ß>|Ø_;wîŒù;6X]ò‹xwÝu—{æ™g µæÎë^yå·uëÖ쬬,÷öÛo»1cÆÄ´9sæ¸Ý»w»»ï¾Û•)S¦È·}QhÿXû}Ö¬Yã233óe¿ªÜÙ¦M׳gÏ|» 8Ù»w¯›8q¢›={v‘8žãÇ»… º 6$}Û7ntÇŽsW]u•+]ºt¡Ÿ«ªSŽ;ÖMš4‰{PèJÓ ¿4nÜØ* ÉÿüÏÿ¸^x!êú |µhÑÂ-]ºÔþûÏþ³{úé§‹u1¦÷ßh^‡\åÊ•-ø¦ê€¹7nœMyäÚ>F 6´ðh$“'OvÝ»w?+ï@þY¿~½KKK³ßåþýû'uÛË–- õá ;‚ ßìØ±Ã•*Uʪ¾úê«î7¿ùKMM¸þÇlA<… þõ¯YÕ¾âlÏž=næÌ™®víÚ®uëÖ¡ù)))îž{î±*LóçÏ· ‚‘¨ZÓ´iÓ\Íš5]ûöíiûí߿ߦ UÝjÚ´éYy?ò—~£U±QÓd:yò¤Ud._¾¼«[·. @˜’4ÈO ™õèÑÃ{|ðAÔuß|óMW²dI÷ðßm£ðáéÓ§­}J”(‘m™WPU£™2eŠ;~ü¸{à,øHÛÇgРAnÈ!9>—]vÙYy?òWÕªUí7ÙXO†Õ«W»ÌÌLרQ#ë@vôŽ ßýò—¿´éÈ‘##®³jÕ*7wî\שS§\ChÇŽs_~ù¥…ûT!oß¾}9ÖQ lÙ²eVÝPÖ®]ëÞÿ}ÛOn´®>AŽ=ê,X`•û¶mÛ¸N¬ûž:uªMï»ï¾Û¸í¶Û\•*UÜäÉ“]FFFÄc7nœM{õêU,Ú¾ Û?VáÛËÊÊrß|óûôÓOÝÞ½{£~wëÖ­núôé¶ï3gÎD]7÷w§Nr7ntëׯw?ýôS®ë«Òâ–-[lýÇG]WADUT^³f}GÕ‘½÷Û«ßrUðUèѼݻw»'N„ú mç‡~pééé1—ú¹îºë>¼‡¶«ª”êÛrë³rÛ·úJïxEm¬~\×3µù?þèV®\iÛ :‡ðýªâ³ÖÏ­öh}¯Jt2Ú<ÒþõR ]^‹{ÕmÈ/¥ißT5骫®²ð˜ú7nÜ8Ç:ùË_lúë_ÿ:bîСC®_¿~jSXÁ“’’âèþð‡?„æ-Y²Ä5oÞÜ=÷Üs®fÍšî·¿ý­ V¯T©R(ÐDë >Ü]}õÕ6XÝ£Áø p¯¿þz¶p@Ë–-íx.½ôÒ¸ö­ÿþÇ?þaÿ¾æškr‡ª9^yå•nñâÅnóæÍ®Aƒ9ÖQÈnÞ¼y®aÆîúë¯?ëÛ¾ Û?þíuìØÑ*yUnTUI}ür¸÷Þ{-`êQÛ1"pɸ ¸S5cÆŒP N¿ÁMš4q]ºtÉQEQA¼9sæØo«¿ß¨]»¶ý>«OðS`ýïÿ{¶€£Ÿú›:¸]»v¹Q£F¹+®¸"TEÙ›§>WóZ÷ãÍ7ßìn¹å–ˆç¥w5jÔpµjÕJø<=?üðCë_<ê?u¾±òï»nݺ¶oU¬–óÎ;ÏÝyçÖ~ýõ×î‹/¾ Ë—/oU­/¾øâжô=½l@B¿G•¯o¼ñFwë­·îWÛš={¶µS™2eìù$šÏ>ûÌ-Z´ÈU¯^Ý=ùä“ynó ý+ŒúÉ'ŸØ³„[ ¼vëÖ?jˆAHP üq÷Ì3ÏXeB/xçQ•¿ñãÇ»:uêXUÂHaváÂ…Š¿è¢‹"þv«¯Òö‚^VÏy$rÚ¯ªK«ª¡‚~ –*˜©>LÏñRõDíûòË/w×^{­U}Ô5R®~X/eÐüzõêÙ3‡ÎMÏÿùŸÿ ³j¾Ú_í¡ëššj/ PEj½Bý¡žüT%T•:µ_·îhÔ:ÎjÕªåxŽJ¤Í#í_/0øî»ïl[zQ Uí×g8„â‘GqÏ?ÿ¼ ªõÕWmP»Gð5H_ÿÂ+:ùÝ}÷ÝtðWKºé¦›ì; ÇMŸ>=GÐLî|ðA7aÂ|Éßþö7«Ü£õhï¯LôùçŸÛ~í[ŽU%uÜšŠªw†Ó5JII‰zlê«'Mšdÿ~øá‡ûõDÛ<Òþ[¶li/]P5NUßlÑ¢……R£Uá# Ìí·ßîj×®í¾ùæ·téRפI÷æ›oÚ²¾}ûæúý3gθgžyÆýõ¯uÇweÊ”±ð©S§BË㥠L ß}õÕWîÕW_µm‡Û¾}»MU%(h»*uêÔ)®ýza€hU¦¼e¥J•Ê6_ç;qâD 5tëÖí¬mûÂlÿd)]:çp\/êkòû~€s™ú#ñò½Päž={3žôªûIóæÍ­\°`þëÔ©ã6mÚd6õQѪõæ&ZàíôéÓnåʕַ]}õÕ9–Ç{‰DZcDzµc~ðÂÿAÕƒæé™BÁAU¿ÎÌÌ´¾VǧöÊË3G•*U,ü¨*Özîðª:çW›‹î½ŒbΜ9vOé ª²ÝµkW«\ ˆãÿ½hPP4ÿ±ÇsC† ±Ê„ÿýßÿí¦Nj•qn½õÖ\¿ÿÒK/¹áÇ»Ûn»ÍB|ªø' È©’RÐ`úܨڑª+^ýõî­·Þr­ZµÊQeQƒØU‘h„ ®Y³fIi /$·mÛ¶ˆëxÀð@*'ª¢ªåVñ°(·}a¶~RÀA¼ªRù}?À¹Ì«Òç¯îW¡B ëeA•é:±÷û­@¤>ê×®¹æ×±cÇ|«Þ§ŠÇéééîºë® úÇ{‰ð*2ê%EÅüùóÝ¢E‹ìe ;wUäT8Q/LHô™C×ñž{îq#GŽ´¥^^‰3?Ú\Õžy䫤­°­ªM¾÷Þ{î©§žŠù9ðïßqš¤ÿøÿ°ìï¾û®1b„UìÓ§OLƒÚÜ ‚÷‚xÉ  ¤k€¼ª#ª:“_½zõlºbÅŠ¤í³fÍš6õÂmA¼eÞº%<0x6¶}aµ~ò*<-^¼8â5Mæý罊€UTu?Qµ?HWp.ßÿ½UúëÝ»·U?~â‰'Üï~÷;×£GlËdS NÂÃx‰žG"*Uª±ÿñª#´µk×ÚT½d²èzvïÞÝž}fΜéöîÝ[`m®sQóÚk¯u'Nœp›7oæâ@¨:uê¸víÚÙàú?þñ®L™2î¿øELßÍÈȰ©*(y²²²Ü_þò—<—ŽiРA´Ðyÿàÿ|Ц¯¾úª;tèPŽïžì¦OŸnUŠÚ¶m[,Ú¾0Ú??Ý{ï½6U…Kÿy¬Y³ÆÝwß}öo…w’q?À¹b×®]nÊ”)ÙÂK—.µê} ˜)TïiÔ¨‘M¿þúëÀJ‡z€ú0¶©@Ü—_~évîÜi}‘þûèÑ£ùv> ­[·Î*ªòaxÏ#ªz)ªèïÔMš4©P®µwNþÊʺFÿüç?“²ýºuëº[n¹Åeffº>ø Ûy'»Íu߆S5lñªqbSš&dõ IDATíñÇwsæÌ±«Ú’W}'7]ºt±jO?ý´UTx`âĉVQIƒÊÃÃeñzá…ÜW_}åfÏžmÕ'Ožló;vìhûž1c†kÒ¤‰{衇ÜW\a‰ï¾û΂‰£Gàb¡ŠDªt¨`¡ªYÕ¯_?Ç:^¥ n¸!4O¡…',UªT±iû‚nÏý÷ßo•(Ãõë×ϵiÓ&¡óèÚµ«U{Zµj•kÙ²¥ëÖ­›uÆŽk l’u?À¹¢FnõêÕT¼úê«­Ð+D~ë­·f«n¬>âÊ+¯´ ¡Bé ›«¿SàMÁ4Ϳ뮻B!@ý{ܸqnÞ¼y9ö[¹rewûí·‡ÖM»Buê‚ú¡DÎ#ªò¬¶UðS}©ÚVÏ ™j¾Â‚Mç¬ã™5k–kÚ´©;ÿüóíÙCUõ쓌gõñêo7nÜè¦M›fÏDÉns]ß‘#GºË.»ÌÚ¹\¹rnÇŽöì¤ ¦µk׿â@ä›”””À°ž‚b5kÖ´AîO<ñDà÷üSÏàÁƒ­B‘e´à@«V­, §íþõ½j;áÛˆ¶\Û{÷Ýw]³fͬêÔÔ©SíXå£>rÆ sÇw/¿ürè; °uêÔÉÝxã1ïÛÓ¾}{ ¾i ¿‚w~ëׯw›6m²@œ*9zÆoÓ^½zµm_Ú_†åË—Û>‚ÜqÇ¡ d´í©õñ/+]º´…iØüâ‹/ÜK/½dAUßT%N…!‚¶•ÈýÅlذ¡ó>ùä«Öçõú½Öïj¸ž={Z¸~Ñ¢EnÁ‚¡ù Õé·ôÒK/ ÍSÐ_o¾ùfûU€Ma@å–,YbÁü>}ú¸ /¼0t<þ>6hžGM-_¦0œäVå7žóHä84Oý“úÙüÑÍŸ?ß*FëÅ €¾ñÆ1½x!Ú¾½y±.SµÆôôt c¦¥¥Ù±+4Ø»wo{‰µí£­£mªõ¨Q£,”ºvíÚÐ}”¬6×óÀM7Ýd•,½—ˆ^| g—Üža}Ù¿?gô?¦ƒ ¢5@R©zŽhàx8UÒGÕq‚¨Ò“ª9vì˜ (×@t… D B÷BkÞz̯葶´<++Ë–UªT)ð{ n۶ͪù(0TÉ)·}‹BrmÛ¶u­[·v .̶Lá¹_|ÑB‡^ðOç\·n]«N¤pÀÙÜöE¡ýãm{Çφô/Û²e‹kРAè8umÀ_?ÞûÎÖÇè·SÅýû÷Ûo²×åF¡º#GŽØwL÷WT•?UíÓï´ª-‡SÕa…!\kÑ¢EèxÔøûŸ yþ~×BT•à×_Ý‚•}ûö¹¢G"Ç´üðáîZµj¡m+ªÇ†Œ¶oõ}ê÷ÂYÕõ½ çÍW[éyÀ[t<Ñö›Û:ºŸ´LmšmîíãàÁƒvìÚF¤}"Sþ‘Š ß j÷(ÐàÎ…‹Ä“òåË[e(¿ AåZ/šHË5¸>ROªW¯nŸD¶í§ÊS]t‘U¶Ú±c‡»øâ‹CË>üðC›>ðÀ¡y&L°À@nÕ Ï†¶/ íhÛ+[¶lÔeá•Ê"]›xï8W„WVeÈx¤¦¦Ú'È¡C‡lªþ5ˆ÷bØ‚Ž'Ú<¿ë§ŠÄÚŸ^l¬óHä8‚–_pÁÙæÅóBhûŽÔ÷)Xi™¶~­ƒŽ'–ÊŠ‘ÖÑý-˜˜×6÷öÞ®€ø•¤ ‡ýû÷·0ÄäÉ“Có×®]ëV¬XáÚµk—-$ñÞ{ïÙw‚*VáÜ»y§Ð¹€6l°êª¸¼uëV·fÍ7eÊ·jÕ*[§^½zIÛ§~ÓŽkÔ¨€Q õéÓÇ :Ô͘1Ãõë×Ïæéß H 0 Ûºï¼óŽ…åjÕªEÃq?’@ˆõ‚´´4·dÉûxT}°E‹®M›6qUFÌM×®]íw=Zf]‰Î <Ø 4ˆÖ(YYY6UÈQT033Ó¥¤¤Ð8ÜÜP@ÒÓÓÝ‘#G\FF†«X±¢}¼ßbPx”¤"$@! Y”(Q‚Ð÷÷°ÔÔTû€¢§$MŠ*‚ È" Š,‚ È" PLlß¾Ý-_¾œ†à,uäÈ·gÏ€0!Š€çŸÞ 6,ôß³fÍr½{÷Ž9 qêÔ)×¼ysײeKwâÄ ô,pæÌ·`Á·zõê|»/àl§þmݺuçÄïÞéӧݨQ£ÜèÑ£í¼‹B?µeË·oß>nD@¡# òÍ¡C‡,äå}6lØàÒÓÓi˜0sçÎu¯¼òŠÛºukh^VV–{ûí·Ý˜1cbÚÆœ9sÜîÝ»ÝÝwßíÊ”)Ã5ˆÃ²µ‘ÿ³fÍ—™™™/ûUÏ6mÚ¸ž={æÛ}g»½{÷º‰'ºÙ³g‰ã9~ü¸[¸p¡õ§É¶qãFwìØ1wÕUW¹Ò¥Kú¹ª:娱cݤI“¸…®4MòK£FÜŽ;²Í+Q¢„U-üýïï:tèç}(VªT©„—#FŒ°éý÷ßš§¶©\¹²ÞT07ãÆ³é#<Â5ˆSÆ -DÉäÉ“]÷îÝÏÊû\ëׯwiiiö[Ü¿ÿ¤n{Ù²e6mܸ1 ªf–,Y2á倳AHoöìÙcÓ^xÁ‚pªr¤*J_ýµ»ãŽ;Üßþö7׫W¯„·ß¹sg÷é§ŸZÕ¾ Ľ¼¨´ÑÌ™3]íÚµ]ëÖ­CóSRRÜ=÷Üc•˜æÏŸo•#QŦiÓ¦¹š5kºöíÛs â´ÿ~›*XT«iÓ¦gå}H>ý.«b£¦ÉtòäI·nÝ:W¾|yW·n]:À»ï¾ë~øá÷ÔSO¹jժŽpv# òBxçŸ~è¿ìþð‡?¸ÿú¯ÿ²J{åÊ•Kh»?ýô“Uþ9qâDBË‹‚?þØŽ±GV©ÑO•xSõ¿h·)S¦¸ãÇ»¾}ûF¬¼È5ÈÝ Aƒ²µÑÙ~_’¯jÕªÙ*õ&ËêÕ«]ff¦kÖ¬ #Pûœ9sÆ:u*¡å€³½#(pª¼§À™*ñ­\¹2Û2U,üòË/Ý|`ïöíÛ—ãûª€¸aà ÙÉ–-[ì¿UHá±Ü–‡;zô¨[°`UàÛ¶m[à1ë{Ë–-sÿú׿ì¿×®]ëÞÿ}·jÕªÀåYYYî›o¾±jˆ{÷îØS§Nµé}÷Ý—cÙm·ÝæªT©â&Ožì222"ncܸq6§²#× ~yÝþÖ­[ÝôéÓíxÔˆ&÷G ¹mܸѭ_¿>ÔÇD£J‹êƒ´þáÇ£®« ÝŽ;¬Š±¾sàÀÐÇû½Õï·ªöª¯ôhÞîÝ»C¡õÚŽú¼ôôô˜ÎKý‹\wÝu ŸG^CÛUUJõi¹õSA}¤ö­ãô®Ó¦M›ìx£í[íúã?Ú³ˆÎ/h]mS×@×GÔë¿DCÒ?üÐúúYo¬ Ô¾o½õV;½ØÀT¿w÷Ýw‡®“*YëE ê=ªl}ã7Úv<Ÿþ¹½xÀ3qâÄп{ì1»vіשS'OmY¾|y7{ölûN™2eì™(¯×?‚ Ày•xD>Q<…ç}ôQ ™i½Ç¿øâ‹nÈ!®E‹îŽ;î°uUùPƒÖGŽiÕ†ú÷ïoÁ ®W%% R¶Ü£Aï TtïÞÝõíÛ׺üñÇîµ×^³ žª")0è÷ÙgŸ¹åË—[U> ÔW#¿O>ùÄ >ÜÆë¸UhèСnðàÁ®Y³f®sçΡuul ðU¬XÑU¨P!°­.¹ä’кA·wß}׿ÇS ’kÐ9O÷o<Û×µi×®[ºt©kÕª•µ­Â«ï¼óN¨-Ã%ã¾€âfçÎV©X¡3õ# ï+Ðöý÷ß[?¡~ËOÁ>õ! ”ë·Y¡~…ÿ¿þúk7vìX÷ÔSOY°\jû裬êÙ³§M®ÓöË–-k¡<0ßû}§ÊÇ‹-²uõU;^¸p¡á/ºè¢ˆ¿×êÓ´=…ÃÅs‰‡ö«ÊÒ 2ªßV°TÁLõ[zƈ—ÚMûÑËt>Ú–ÚQ/õlЩS'[O¡V­«utMSSSíåªH­&¨Ÿ«_¿¾­«uRüöÛoíºÿüç?·þQà /¼ÐÎ?Úò¼´¥ªªèå—_níé½<"¯×?‚ Àýþ÷¿·êClñÅÛéºvíšðöšT˜¤Q£F.--Í5òôÓO[sõêÕ9Â4ɸ/ ¸QAUÙóW TØnôèÑV±Wó½à¼~Õ(`×£Gl¿ªÒ«ž”ªò+ "ªZ BzW]u•ÍS(P8…í«U«–-P‰‚|êÃü¡L“ª ª/ˆ„T¦¾Â/ÞóHä8äSRç÷È#¸óÎ;/Ô+´¿oß¾¸®“ö­g‡¶mÛf»No¿ý¶«®“‘jg—?ð¯ÊjU“Ôy{AH=§è£cUQ°ý×#·åyiKµ®K·nÝŸ9½æ€ø•¤ @~{ë­·Ü›o¾é^xá«à7lØ0¯ DžªU«f ày4˜]TE/™ Uÿ ç fW… pªø£*‡AƒáEƒáý9¹óÎ;mºyóæló+X®~êÈ‘#¡y^ \„UÐOÕý¿µR¹re›j]Rý „$ã9Fï·ÞÛo8£4nÜ8DzxÏ#‘ãð¾ïõ+~þ`d~\'9qâ„õ•GµcÓTT¡3™mK]û”””¤^s@bB€|÷ùçŸ[õA ²W5¿’%KæXçÌ™3î™gžqýë_ÝñãÇm}N:ZžLÛ·o·©* poÔ¨‘ëÔ©SRöUºtðMoð|´ŠAÞ²R¥Je›¯v™8q¢ ¶ïÖ­× Ák,‘¶ï…H½M,òr_À¹F}•øÃú^ØnÏž=¿“ åyU¥yóæî›o¾q ,°—Ô©SÇmÚ´É*«ša´ ½¹ êo=§OŸv+W®´>ðꫯα<ÞóHä8Ž;–­ ê:é™böìÙVÝ:33ÓúQ­£6ÉgŽd¶e^®9 ÿÏE€üÖºuë\+í½ôÒKnøðáî¶Ûnso¾ù¦Uý…äT%)¼j^^)Ø  D&LpÍš5+”vñÂqÛ¶m‹¸Ž Ò}úé§V5ñᇎZ‘kP¸ªV­jS¯U~ßp®ñªîù«õV¨PÁãzQ@P¥Ãpëׯýf+©ú¼k®¹ÆuìØ1ß‚mªŒœžžî®»îºÀÄ{‰([¶¬Mõ„ü¤óô_§ùóç»E‹¹Ë/¿ÜuîÜ9TuSAE½!ÙÏÑ–€üEÌ S§Nµ©¿{¼Üxƒä½Áõñ,¯W¯žMW¬XQhç\³fM›z¡¶ Þ2o]ÏøñãmÚ«W/®A¦ê›²xñâˆ×6™÷W ì¦OŸnÚ¶mË5(Âî½÷^›¾õÖ[Ù%kÖ¬q÷ÝwŸý[‘dÜPœíÚµËM™2%[ÈnéÒ¥VåOA:Uö4jÔȦ_ýu`¥ÃS§NYÿæÑ6÷îÝë¾üòK·sçN ]ê¿=šoçsâÄ ·nÝ:«T¨ªˆAâ=D¨ê¥|ûí·ÙúõA“&MŠ{{»wïÎq–,YbÏ.Õ«W]'ï¸ý“õþóŸ·ššjÓHaÆhË ¢-ù«4MŠ‚.]º¸•+Wº§Ÿ~Ú**0qâD ÐwÞy9ÂbÒªU+÷Á¸çž{Î* iýo~ó ’å¶¼cÇŽ¶Ï3f¸&Mš¸‡zÈ]qÅzøî»ï,h8zôèP-?¨Ê”ª"*x¨JIõë×Ï±ÎæÍ›mzà 7„æ)˜ …‚„¥J•â$Éý÷ßïJ–ÌYg¤_¿~®M›6 m³k×®îÚk¯u«V­r-[¶tݺu³ÐÎØ±c-”¡°M²î (ά_½zµ¯¾újë+ôß Žßzë­¡j€¢¾äÊ+¯´ ¡‚èêsTPA?*5ÿ®»î …õïqãÆ¹yóæåØoåÊ•Ýí·ßZ7Ytì ß©/ê{9D¨´ÚVÁOõ¹j[=c(dªù™™™qm¯lٲٮ“^tt^Úç¬Y³\Ó¦MÝùçŸoÏ 1êÙ&虣víÚöœ2gηÿ~;Nõ­ªæ™Ûò‚hK@þ" òMJJŠ x4ÀßoðàÁV}H±Úw¢S N!4m+ܯ~õ+÷Å_¸iÓ¦¹×_ÝÑ?óÌ31/ÿè£Ü°aÃÜðáÃÝË/¿š¯ Z§NÜ7Þš§  wNA¢-×€~}‚–µoßÞo (pç·~ýz·iÓ& ÂÕ©S'4üøñ6íÕ«× ×@¡‡åË—»©S§n÷Ž;î°óOdû¥K—¶`Bžj‡—^zÉB5¿øÅ/ÜÿøG  $ë¾€âÈë¿6lhÁ¼O>ùĪúy}…~£õ[®gϞ¾r‹-r ,ÍW?¤ßÏK/½44Oá9U#¼ùæ›íwUE…èFW5ÃÉ“'»>}úX Î;ÿ‹‚æy¼>8|™W)9·Ê¾ñœG"Ç¡yê“Ôÿøãnþüù®L™2ö‚@ßxã¸^º jÕ-Z´p3gÎ ]§J•*ÙuRÑsË-·¸ôôt \¦¥¥Ùñ)ÈØ»wo{ Ú§Âÿ:F½XañâÅÖÇê9%ÖåÉjËDÛ7ŠÒŸÑÿ´4ˆÖI¥*;ªè£õ±:vì˜U»Ó`tDa &÷‚háÕŸª"¨Ú ¨ÏÍmy2Ú2¯m ˆòT„ùÆÐ…äTõÉ/·_ÕªUí“èr©^½º}r;¶D—+DÕ§T=IU“vìØá.¾øâв?üЦ<ð@hÞ„ ,ØK5H®Al× Þ¶ItûZ^µ,Òõ‰÷¾€âÌfS¸L•!㑚šjŸ ‡²©úÖ ¬‹ÂrAÇmž'ü%ªB¬ýé¥É:DŽ#hù\m^¤ä&Öë¤ã _/·}ª?Öçæ¶<m™×¶$зÐ…G•‚ú÷ïoˆÉ“'‡æ¯]»Ö­X±Âµk×.[Pâ½÷Þ³ïU­Â¹{_£ ¹Bk6l°êª¶¼uëV·fÍ7eÊ·jÕ*[G•“E¿ã 6jÔˆ @Œ¨ PÈúôéã†êf̘áúõëgóôo…$ mÝwÞyÇBrµjÕ¢á¸/yT©R%{¹@ZZš[²d‰}<ªLØ¢E צM›„+#éÚµ«ý–W¨P @ŒJüûsfðàÁnРA´@!ÉÊʲ©BŽ¢J€™™™.%%…Æá¾à¾€žžîŽ9â222\ÅŠíãýþ"÷þjûöíÖfUªT¡AI¥ü#!Š€ð E‰%»û Pjjª}XU§NoJÒ ¨" Š,‚ È" Š,‚ È" ÁóÏ?ï† FCÄ`Ö¬Y®wïÞnÏž=4àœtêÔ)·nݺ"Õž9sÆmÙ²ÅíÛ· Ä=g5‚ ß8pÀ­^½:ôÙ¶m› È?Ì;×½òÊ+nëÖ­gU›Ü«D #·mÛfðñ-V8=laË–->Õ|åÊ•;âó]»vyJ~ùå¯Z­±VcX¬ $³œÆÞM›6ùƒ N8á„tcf¢íŽ—®—´µIãvfÙn¼Ç@ÔWº¦QPMû^¶lÙÐg °é3]/^Ü+ˆ« ºÑKŸk;ê¯Ò¥K§ëO½¯}Óµ¤Ö½nÝ:ŸV¯^=]ÿÆs¼bm'œÖ½}ûv_^ËUªTÉ×)rŸÔÖÍ›7ûuTfÇ>;ωxÖÙv]#êzUËè¹dÉ’¡yuýª>Wi¿¢õEV¶Y¿%ûýpt  rÍ<`/¼ð‚ùæÌ™c—\rIè3Ýd¯Ï˜ÔMÒ¦M›Ú[o½e'žxbè=ݰ}Ï=÷ØÈ‘#ýêÀM7Ýd£GýýóÏ?[÷îÝíõ×_½§¦;wîìËêFomëóÏ?÷ÏN?ýô#Úï¶’Yî¯ý« 6,ôYûöíCÿž={¶WY\°`wÞy^QS¡Äûï¿ßÛ¬›Ëu3xðyëÖ­mÚ´i¡å×kÓ¦W@ÒÍÿA¨j§^áÆçý¥Ô£éÕ«— <8ô·nD?õÔSmÞ¼y¶fÍ«W¯'9 K4fN™2ÅDZðë=L cÇŽéÆÙéÓ§û>Ÿ pÝu×…BX}ô‘}ñÅ¡Ïß|óÍпU¥Z ˆ&Ñå4êÁ mcíÅ_l-[¶<âú žvgFQ;v¬o7 ë]ĺ.‰w»ñQ€íý÷ß÷N„_kéa :t°"EŠxÐMLhÞ¼¹ê>üðC_¯‚p}ûö }®*׺N (T§÷u½¨ÀääÉ“=˜Ð3´ =8"žãk;Á>Ïš5Ë>ùäï«€Úùå—û5Uxè.|Ÿ´>û m±Ž}F׌Y='YGxÛõà µ]Ç1Øß+¯¼ÒûöÓO?µ™3g†ª0êØÝpà BMŶãé·d¿¿Ž!@®QåÝÌ>qâD¯l„ÔÒ“&M²k¯½Öî¾ûn¿ÙZó=ýôÓ\¶l™W’gŸ}ÖFŒáóêzU–ÔÚz…Óܺ‰[Û¹õÖ[}>ÝØ?fÌëÒ¥‹]uÕU¶jÕ*ªrQ™2eŽhs¼ÛJf¹›o¾ÙoW8TíèÙ³§WÃÑ ý „ûàƒÞk½á•”#%²œª@¾úê«þÐ…¸TÝzîܹ¬S5åðñ1ÞvgDc¾¶§  ®#Î>ûl¯ ©M„?ü!™þJäÈo¼áU¤O>ùdï3]Ã,_¾Ü¾ûî;¯^­&~øáØ‚æU_©_㹖Ѻԗ íiÚW…î¾ùæS¶mÛ6¡ãm;ê]›*pyî¹çzÅB…ö>ûì3›:uªÒ‹´bÅ ŸGû£ëÍŒŽ},©8'’Y‡Ž“Ú®ã¡àªŽ£®u=®c­þÕûuêÔñ¾Ñúumxß}÷ùqÎʶãí·d¿¿Ž!@®ÒM좪ªö¢›®U¹P•aºÙY7V¿ûî»6jÔ(¯°( Š~A¥ÈsÎ9Ç«%vÔ ØÍš5óÊCÁ تtçwZýúõýoÝ.A /R<ÛJv¹óÏ?ß_ª¤¤ ¡*ÛD º]áÍ×^{-]E¢Ì(<УGfúüË_þâ7§AHU©UUšXžU­‰¬B£æuC¿ìر籂ñl+•ËÅ¢0¥*G&Ýd‚UÊ”5kÖ„ÞÓ æ^¹IZµjuļá‚~‹V% €D•(Q§ òÇ¢¾(øI,Q@/§h, tIäRÕéT·ûûï¿÷©BjARÒëܹs–û+žc ÀI“&MÒUUŒ¬´­P¥ªq'z-£þ AŠÂqº–;pà€­]»6KÇOÕ/¥iÓ¦¡d@Õ ºÔv¢=ô!ÞcŸçr²ëPÛƒdäü ¬!HÑ1 f^É3+ÛÎJ¿86Pä*U‘‘àæhQåQ£÷Þ{/ÝüAU£ð ]¯^½l„ Ö»wo›:uªuïÞÝo¬/T¨Phž Ø× Aƒ ÛÜl##ų­T.‹*XFV!Їª?FªR¥ŠOU©'P«V-Ÿª2gP-SæÏŸïÓÈ€d@‰”  £-Y²Ä+Ãi,WÈN©ð ]ðU­ Âx={ö¤ßsBdxNJ•*åSU‹Lu»ƒy‚18\x02ÙíÆs ¯#‚ëŠÌ¨BdÑ¢ESÒ¿¢Ê€›7o¶ß~û-KÇ/Øhý\#ªJfx0žcômFRqN$»Žhm/Y²dÌÏ‚€ìþýû³eÛ‰ô€cAH«‚À\PõE6lØàSU•‰vƒ¼ªÒ´mÛ6ô·ªÎÌ›7Ïúôéc3f̰™3gz°òå—_¶æÍ›û n†¿îºë|ªm…¯S7Òë½ ŒØ°aC¿¹zûöí¾Hñl+šD–SõJQµ¥Ü’––fß}÷õïßß¾úê+¯¥¿ŠÌHÂ8묳8ÑYmÜ)R¤ˆOƒJˆAUéO?ý4jUÄC‡ù¸NÕ %ÑÀV²ËE“L»£9ýôÓ}úå—_¦»vѵ̸qã²¼ÝxŽAx;>ûì³tëUHQÛ Mfõ:òí·ßN†\°`?\BáÄð6$s¼t-(óæÍ³;w¦ûìÛo¿µM›6ù‚‡l¤R*ΉTW¹yNçä÷@þQ˜.ÙM• ,è7¨¯X±"T)¨qãÆöŸÿüÇN8á„tó·iÓÆ®ºê*›4i’}öÙvã7Ú)§œâ7£+”÷Þ{ïÙ‹/¾èáFÝðߨQ#kÙ²¥uèÐÁo€ÿüóÏmôèÑV»vmkÖ¬™¯Sëxúé§½²á¹çžëՌԦ©S§Ú÷ßï•¥lÙ²^‘råÊ•^Y¨nݺ¡vÅ»­H‰.§¿ÇŒc}úô±¥K—z¿õêÕ+tc~N1b„]~ùå6pàÀ#>;餓<¤„KÃ!í/Y¡ÐÔ /¼à㎪)뺆Ѓôš5kú|ºF8õÔSmÙ²e~]¡ñR•é4þ*ħ÷Û·o ꉖ]¼x±MŸ>ÝÃþªnØ´iS;þøã3lS²ËE“L»£Qßè! Û¶móë#UÙV»¾þúkÿàÁƒIo7Þc Á)§pÚðáÃ}~=\B×~ evêÔ)%ç…—ºvS Qûªª—ú[Ûºä’KBÕ“=^Z§BŽ«W¯öë!퇮µ½%K–XÑ¢E­mÛ¶é¶“*©8'Ru^åVûã‘Êï!€üƒ $È6ºÉY•sh”Š+ú ÒºI]¡ÀÛn»ÍÈÑL˜0Áž|òI¿ð ýïbÅŠÙC=dÏ?ÿ¼Íš5+4ŸB|Ï=÷œ•.]Úÿ.T¨}üñÇvÏ=÷øºµ^Ñ ÓO<ñ„#×UR!É=z„Þw[‘]îŽ;î°™3gÚ»ï¾kC† ñå{÷îퟘt~4±>Ïh9õ^áŸ)ˆ©ðÃÃ?l-Z´ðª>¿üò‹Í;×FŽi7ÜpƒÕ«W/]8sùòå¶jÕ*ÖªU‹/ K .l]t‘ÍŸ??]5A=°@×áãVçÎí“O>ñj„ïÃÇ`K'žxbºu+°¯ ›8 ÊÚV¬Ä»\pM£15’Bsú<ò³DÛÖû§?ýɯoÔ¶9sæXñâÅýaøÃlذaIo7‘c vèÚnÊ”)ÔºE×9­Zµò€afýÏçÕªU³&MšØäÉ“=x)åÊ•³+®¸ÂCx©8^7Ýt“_³©Êæ_|ê B¯¼òJ¿¦·Í±Ž},‰œ±¶›Šu„¿—Èg©ÚvFý–ì÷@þ¦øùáXÿþýé '©ŠÐúõë½òvÑ“iii^TEݤ®âcQÀO¡=…ÌŒ¬è£âe—]fÍ›7÷à_V¶••åvìØáÕ‡4oxXr×®]pÐMßÑÄú<£åt  .ôÌÛµkç!ƒH jªâÔСC­{÷î¡÷yäëׯŸõíÛ7]x€¬øý÷ß}LÔø]¾|yË2²{÷n¯¨ù4F•û4þ©â´æ‹õD–;pà€³Ñ®UT™1£@\"íŽEÛP%iõ‚åÕoúwV¶›è1Ðü?ýô“ï¯*F®3£~Šõ¹®‰TiR!Ì›o¾Ù·¡j€ªY¦L™”¯€–Óµ›ú4£ù²rì³r.g¶Y]‡ª9ªÏ"—;|ø°/§€cvµ?³~Köû ÿQþ‘Š Ï«\¹²¿2¢¤#«Å¢›­U­2–‹/¾Ø« ©ÊÐÆ­zõêIo++Ë)8 W¤X•'3û<£å"üðÃ>Õ îÑè†v‰¬ú8vìXŸªZ$©¢ T¥J•âž¿T©RþŠ‡Æ¿ð10^±–Ë(ŒTgNE»3ÚFd_ÅzpB"ÛMôhþŒ®Ý2 ­ÅjÓ6ªT©’mÇ+ efAˬû¬›Ìö!«ëˆtT¨1£d*¶Y¿%ûýO¯è€ôXìÙ³§ÇÌí“&M¬dÉ’6uêTëÚµ«½òÊ+^s„ Ö¥K<žwÞyÖºuëÐ2K—.µE‹Y«V­¬aÆœD€”¡"$@wÝu— 4È&Mšd=zô8¦ö½fÍš6yòdëÛ·¯9Ò_âÅ‹[÷îÝ­_¿~骩ŸT™éàä¤Tÿ½0Àú÷ïOo„IKKó©*D«¶mÛf7n´;wZ5<$Y¤H‘#æSõ̃ZÑ¢E9q@¶\—mذÁÊ–-k*T Cà¢ü#!b8–*Uªø+3   ²õº¬V­Zt£ Ò ¯" ò,‚ Ï" ò,‚ Ï" ò­ýû÷Û¤I“lÑ¢E)[çƒ>hO>ù$›‰©S§ÚwÞi[·n¥3øŸC‡Ù²eËŽŠ±1/íËÑÔ¯€ä„)·yófûþûïãzíڵ˗ٱc‡ 4ÈvñR²}ûöÖ»wï”´{Ö¬YöÄOغuëòU'ÓwY•––f#FŒ°Q£FqÂð?Û¶m³7ß|Ó>üðÃ<Õ®½{÷ÚܹsmÅŠùr_òj¿rVaº¤Z‡lþüùqÍ;pà@ëׯŸMž<ÙúöíkµjÕ²5kÖ¤›G¡»B… e{»ŸyæŸþñÌñ>ËÊ>fÔwÙ¥uëÖV¾|yBªŠ&ùÍï¿ÿn ̱årk–/_n3fÌðq»gÏžùb_Àñ€H!@ÊÝyçÖ¢E‹ÐߪXøÒK/ù ø·ß~{ºy/»ì2Ÿ6kÖÌ”š†k×®M›6Í–,YbõêÕ˶6oݺÕ…5kÖ´æÍ›çheucõ]v*Z´¨]sÍ5~\çÌ™“îx×½þúë¶råJëÖ­›U¬X1Û—ËÍýѵMýúõ}šöˆ† $H¹®]»¦û{éÒ¥˜«\¹²=ýôÓQ—9å”SìwÞ9âý={öx5œ}ûöek›'NœèÛéÔ©“(P Gû+«û«ï²›*g긪*$AH@~rðàA;|ø°:t(G–ËÍý9î¸ã¢V»Î«ûŽDCä .^¼ØŽ?þxýöÛo^¥Q!AY»v­•(QÂCеk×¶‚ ƵÞ;wÚ×_m¿þú«uÖYGTC AÂ믿>êç6lðöíÞ½Û6lhuëÖ{ßb-Ï>ª_-ZdµjÕòŠš •jÎ<óL;ãŒ3¢ö]ð^øriiiöÕW_yuÎFY•*Ub¶wïÞ½¾¾õë×{xµjÕª¡ÏªU«f¥J•ò_zé¥V¡B?~¼ >Ü«D4fnÛ¶Í8àc]¬ yû÷ï·-[¶øTó•+WîˆÏwíÚå!3ùå—_¬páÂ>öjL‹õ0„d–ÓX¼iÓ&ÐÁ 'œ?“iwFËeÖ.…éÔwÚ~éÒ¥“îƒTµ9Õëˆw=ê]w麨xñâ~m¤k4-S£F +Y²dh^]³­[·ÎûKÇNý“ÊmëÜØ¼y³_sEž©:>p´! ò„ Øyçg­[·¶iÓ¦Ù_ÿúW6lXèóöíÛ‡þ={öìL+*(ñÀØ!Cüæó@Ó¦Mí­·Þ²O<1ôž>ÿüóÏýß§Ÿ~zºõè&ù{î¹ÇFŽé7­nºé&=zt†mÈlÙxö1è—>}úx ñþûï÷öê&{Ý­ïÂßÓrmÚ´ñJP Aˆn ïß¿¿¿"7κwïî7ëGÓ«W/²/¾ø"ôù›o¾ú÷m·Ýæˆ&Ñå4.êA ´cïÅ_l-[¶<âZ!žvÇO»¸Ó TAZ× ÉöAªÚœªu$ºž š7oîœÐñQQŠ)bW^y¥?4ãÓO?µ™3g†*1* yà 7XõêÕS²mm[aËhçFV­B€<%¸‘üæ›oöÉ_xá[µj•õìÙÓ+æ¨J¢Â™Ñ è“&M²k¯½Öî¾ûn¿‰}âĉöôÓO{ÀpÙ²eV¬X1ŸWëW¨°lÙ²V¦L™tëyöÙgmĈ¾ž¾}ûZ¡B…üæx½2“Ù²‰ìã|`ß~û­Wa¼ð mõêÕ1û.œÂ# .êÆûÚÆmРA6`Àkܸ±µk×.4ïÂ… íÆoôÀå„ ¼ª‘‚‘C‡õ DZ^Û§ JA„¤šÂöªh¬h RhLc`ä88vìXÛ5ŸÆ7U)^²d‰‡Ú^zé%ëÖ­›øtS@íË/¿´ŸþÙ.¸àûµÞ ªr4‰,§*¯¾úª?tAá6U§ž;w®Íš5Ë++‡—ñ¶;í ®’íƒTµ9UëHv=Ë—/·Ï>ûÌN>ùd¯®­ غÒu£Î«o¾ùÆß¯S§ŽWâÖúu=tß}÷¥«HžÌ¶W¬XáÛÖ¹qÉ%—D=7²z|àhEäI矾¿Þÿ}Ù©N<HQ%ÝÌ®Jˆª¸ÐäºaýÝwßµQ£FyµFÑ ñ„úÂ)€( U$Ï9ç¯Ì˜™Ì–MduS~—.]ìµ×^óáãõÝwßY=<”Pàó/ù‹ß¤„T•ʃÚsÏ=g:tð÷š5kæL…7Uý1²}áAHRí‡~ð©ªöôÐOAßUi¯S§N鯍Ÿ~úÉÃl º©R²ªúé¥@šBf—ã —%²œª êšC×!=|áÃ?´¯¿þ:„L¤Ý©hWV–I¦¯³sYYÏöíÛý—_~¹ÿݨQ#$®\¹Ò¯·tm¤`£œ}öÙ^½[ë[·ntÒIYÚ¶ªsgvndåøÀѬ ]Ž6 ô‰*ñD n]³fMº÷uý(ð®U«VQçï³h*Ȫ%JøTÁþXØ=\ ’‚j¢ª9EccxÐM‚ð£*PçÕvÇ#mNÕ~'»Ÿ 9ÿ™gž AŠ®»j×®íÿV(1ÛŽç܉Šà¨£Š>òÊ+¯Ø{ï½—î3U’ðà^pã¹*LEêÕ«—M˜0Áz÷îmS§NµîÝ»ÛUW]e… Ê´YY6Rýúõ­téÒ /§ê‘ªT©âSU, W«V-Ÿªš¦¶˜?¾O#’R­Z5ŸF IUM›6µ%K–xÅ<ö›4iâÁ±‚ÿÿºPÁ TÍOUúÂíÙ³'ÝXŸTá/R©R¥|ªj‘yµÝñHE›SµßÉ®'Úñ)Y²dÌÏ‚0îþýû³eÛÁ¹,ˆŽ $8êlذÁ§ªÖS´hÑ#>WµŸ¶m=Í{ IDATÛ†þnJvú*ýÌ›7Ïúôéc3f̰™3gz¥Ÿ—_~Ùš7oža;²²lv*\8ú-¤÷Þ{¯=ÿüóöè£Úºuë¬E‹6}út›8q¢ï‹ªKF ú,™p'™©^½ºuíÚÕÇ£U«Vùƒ Ž;î8»ú꫽Z²üöÛo>ݺukÔñèøãUéË-Apóðáá÷òC»#¥¢Í©ÚïTö_Pq;Zåíhï¥rÛá¡^@ÿK€£Bk×®µ×^{Í7nœéüåË—÷éúõë£~~Î9çxcéÒ¥öØcÙèÑ£­]»vþwÕªU3\wV–ÍiS¦LñiݺumذaþÒÍÿ×^{­ 2$j€2}@ª©bó-·Üb?þø£}üñÇ^…ï7Þ°nݺyµä2eÊx0¿cÇŽ¡JÅùA~lw*ÚœªýÎÍþ˯çäg!@žTáÙ½{wÜËÔ©SÇ>ýôS[´hQ\AÈ „úb©_¿¾½úê«¶oß>7nœÍž=Û:wîW›2Z6™}Ì#GŽôj™óçÏ·-[¶xà¤víÚV²dɘË}–×B€£O¥J•ìšk®±C‡Ùwß}gkÖ¬± x…H=Ì@Õùâ ¥cï’º.It¹h’iw*Û•Ì2©hsªö;•ý—·Ês ŽÔÏyZ•*U|ªêOñêÒ¥‹OŸzê)ûùçŸø|ÿþýén*oذ¡ßl¾}ûvÿ,ÜÂ… X>V¨P!ÃvÄ»l2û˜ÒÒÒ¶téRûõ×_­W¯^^Œ¥M›6vÕUWÙ¤I“ìì³Ï¶o¼ÑN9åÛ¹s§üÞ{ï={ñÅíºë®óùË–-ëU$W®\iëÖ­³ºuëúû E6jÔÈZ¶li:t°Ê•+ÛçŸn£GöJ‰j[,‰,›Ì>f‡#FØå—_n<ⳓN:Ƀ¥AŸTK´¯¤’Âd/¼ð‚Aª¬¬‡ lܸÑ  Ô¬YÓçÓê©§Ú²eËì?ÿùŸªØ§±XAJ½ß¾}{;ýôÓCëÖ²‹/¶éÓ§{ð_››6mjÇ|†mJv¹h’iw*Û•Ì2©hsªö;•ý—›Ç.'Î58„ÙN•›Tq±hÑ¢Î#‘óÜqÇ6sæL{÷ÝwmÈ!V¬X1ëÝ»w†ËÈ„ ìÉ'Ÿ´ÁƒÛã?z_¡Ç¶mÛÚ…^˜n~„œ:uªõèÑÃßÓ¶zè!{þùçmÖ¬Yéæ}î¹ç¬téÒ1÷'‘e“ÝÇŒú.£å *ä¯ÈÏÆTèäᇶ-Zx…£_~ùÅæÎk#GŽ´n¸ÁêÕ« h._¾ÜV­ZåÁÑZµjq¢RªpáÂvÑEÙüùóCÁ{Ñà 4–‡c;w¶O>ùÄ>ûì3ûøãÓǧN<ñÄtëV€õêÕþ‚yóæù¶2zÀA<Ë,X04ÎFÒu>ü,Ñv§²]ÉîK*ÚœÈ:RÕ–ŒÖ¼—Èg©Úv¬s#ÙsŽVþ÷:<`Àëß¿?½²Íž={<œô¢Ùµk—/^ÜoôŽ´cÇÛ´i“W… f´L`ûöí¶~ýz¯¥À^p3z8/»ì2kÞ¼¹ÿÂ¥¥¥yHR•x´ýråÊŽ߉,›•}Œ6OFË)ä†\¸p¡ßpß®];›F*¬_¾|yÛ2²{÷nûí·ß|>ͯ Y,U1ZóeôÐx—;pà€»Ñ®5<5ð–L»SÙ®d–Ie›ãYGªÚ’ÑzTÍQû¹ÜáÇ}9³kÛÉž£p4Qþ‘Š G”,Y2Óy2ª°xÜqÇù+‘e•+WöWF.¾øb«V­š}úé§¶qãF«^½zè3Ý”~ê©§&µß‰,›•}Œ6OFË•(Q"Ýß?üðƒOu³4º¹_Â+?Ž;Ö§ª @vQ@¬R¥JqÏ_ªT)ÅCãa䘘•åâ©àœŠv§²]É,“Ê6dzŽTµ%£õÄ :*Ô˜Q2ÛÎèÜHö€£îz€.ø‹={öô àøñã¹ýoÒ¤‰‡U§Nj]»vµW^yÅ+cN˜0Áºtéâ¡ÇóÎ;ÏZ·níó/]ºÔ-Zd­Zµ²† r² !þ?wÝu— 4È&Mšd=zô8¦ö½fÍš6yòdëÛ·¯9Ò_âÅ‹[÷îÝ­_¿~¡ŠHê#Uèzà8qÙªÀÿ^‡ `ýû÷§7À1/--ͧªy¬Ú¶m›mܸÑvîÜi5jÔðd‘"EÒͣʙ´¢E‹rÒ²òT„s, UªTñWF (@# Ò ¯" ò,‚ Ï" ò,‚ Ï" E>ø =ùä“Ç|?>|Ø>þøcûþûï9)ò¹©S§ÚwÞi[·n¥3äéqgíÚµ¶}ûö”­sÓ¦Möí·ßÚ²eËìàÁƒt2yAH­:d ,°±cÇÚÛo¿m«W¯ÎÖííØ±Ã äA®œ0kÖ,{â‰'lݺuÇü±Þ°aƒµhÑÂ:wî̉ŸÏiIKK³#FبQ£8ò¬ß~ûÍ^zé%7n\–×¥P¥®U†n&L°7ß|ÓfÏžm{÷îµ¹sçÚŠ+èð$ЀT)L€ì2mÚ4»ï¾ûޏù]a9…ªT©’òmNž<ÙúöíkµjÕ²5kÖdy} „*T(æçÏ<óŒOÿøÇ?&¼,’ï÷ciR}NÇ£uëÖV¾|yBªâ)íTRumÒ¦M¯Y¤H[¾|¹Í˜1Ã{öìIG%(Õý÷ûï¿[Á‚Ô€cAH--Zd;vôŠ C6oÞÜ+7}ðÁ6qâDûòË/íŠ+®Hùv›5kf:tðiVµk×ÎÜK–,±zõêñùÖ­[=¤V³fMß¿D–Eòý~¬íO*Ïéx-ZÔ®¹æ¯´6gÎ/ÀÑ,šk<¯]»vè}Uå­_¿¾õHœú-Uý÷úë¯ÛÊ•+­[·nV±bE:Ž1!@¶1b„íÝ»×~øaûûßÿz¿k×®¶téÒl©)§œrнóÎ;)Yמ={¼òо}û¢~®@§>ïÔ©“(P ¡e‘|¿kû“Ês:ªrª ¤ªB„p´ÓälÙ²éÞ?î¸ã¢V}F|RÙªÒyøðaÈàØCd‹ï¾ûΧguÖŸ©2P,ª²øí·ßzˆ²Q£FV£FÐg “©Òd­Zµ¬|ùò¨üúë¯íÌ3Ï´3Î8#4ÏâÅ‹íøã÷—¤¥¥ù{'Ÿ|²öïßoü±Óš4ib•+WN×…!ÔØdíÚµV¢D ;ªJTÁ‚ýý œvýõ×ǽì/¿üâïzê©V²dÉ#öçÎöÃ?Xݺu­T©R ·=r]êŸ_ýÕCª«Y­[·Î×_§N;ýôÓ3œw×®]6þ|Û¶m›p vÚi§%Ýïñ®/§lذÁÑîÝ»­aÆ~ìÙŸÌÎëhçtä2:O¾úê+¯\¦ïMFAc}·´¾õë×{ŸU­Z5ôYµjÕü¼“K/½Ô*T¨`ãÇ·áÇ{•HÈM϶lÙâ¿MñþækÜÔ2šê7´\¹rG|®1E¿ÁøòÓO?Y‘"E|ÜUðNc~K—.íóè=ý¾ë÷·xñâþ›¼yóf_‡Æ¤àw4Ù6eæÀ¶qãFwÊ”)c•*UÊt›‘4Fi¿´.µ!£ ‹êí¯ˆÚ¿ð°hd_üøã¾o‡ôŠÖÁ8¦÷u,‹+æëÖu…¦Õ«WO·?Á1RRt-U¸paOµ|äÃ(G'‚ [œxâ‰>0a‚uìØ1ÓùþùgëÞ½»½þúë¡÷tc{çÎmäÈ‘\°`wÞyÖ§OoÝÿý~ƒ½º)^‚yZ·nmÓ¦Mó÷.\èï 8Ð4h`wß}·ß|¸å–[|º©^þú׿ڰaÃBŸ·oß>ôïÙ³g{uóÌ3Qk=|»A€AªëÛ·oºj¡ñô{"ëËn ÞsÏ=Þÿ tnºé&?†ñìOø9뼎vN‡/Ó¦M¯¶œúÞôïßß_‘Æçß3…V¢éÕ«— <Øÿ­sJÝyóæÙš5k¬^½zü°È ”;Ö‹ú½ÔïbF¿ÑÓ§O÷±:|,ÔC4†áC=\`îܹ¡Ïß~ûmŸ*œ÷àƒzÀQapUçÕï»ï5oÞÜßW`\¡Äà7øâ‹/¶–-[&ݦX´Ô8 0»Ö(T¨]xá…vÉ%—dÚ—¯¦L™âãHxä¼^ÓöÞÿ}@Eøµ™Bú:tð°hx_(èøá‡úzŠÔ¸­ÿDaI½¯6+09yòäP×Ú†ðÑGÙ_|úìÍ7ß ýû¶Ûnó‡Ž~!@¶Pˆïå—_ö`£UO=õT†Õ›®¼òJûôÓOý†ø[o½Õoê×úcÆŒ±.]ºØUW]š÷ƒ>ð›òU±N7þ¯^½úˆõ…ßÜPˆBá°Ë/¿Ü„ªÐ7tèP{õÕW=| ËÍ7ßì¡„^xÁV­Ze={öô Hº_AÑû ©©*’ª12[VU—„T[T ¯²§*ª2©ð€ú#™¶‹Â“&M²k¯½Öƒ“ &Lœ8Ñž~úiß-[¶ÌÉP¿¶jÕÊ+6kÖÌ•*8½òÊ+vÅW1¿öGá;ͧÀžÂ pôë×Ïêj,O¿'²¾ìöì³ÏÚˆ#¼ŸöÐ9;sæLÅ»?á2;¯£ÓúŽ(¸¨Šö_ äçXãÆ­]»v¡yªÕ÷Rá!”u.ª/u––×vÃYÕ~‚rƒ~û4Ö)8§ßÔ³Ï>Û+Cj xB´1_ãV ßC·K–,ñk—^zɺuëæ×' õ©Z¯~#UÍPóê71²js´ßà+VØgŸ}æÐõ‹*1+T©¨Ânäïf¼mŠEƒUq[!A U5Q!x=dA{ýfU‰cQ¸]„Õýæ+بñ&ÚµÔo¼áÕƒU•ZÛÔøµ|ùroƒª+,P5k…ì5¯úC.ȬÿDëR©R²¶¡c« è7ß|ã×CmÛ¶õ÷u­óå—_úz/¸à¿öRÛƒJÉ€£AH-–{üñÇí¡‡òœª%ÝqÇ^©PA¬p ;* `* !UºóÎ;­~ýúéæ×Íñ Gªj¢n‚—sjÏ£>zOÁÂóÏ?ßFeÿøÇ?üF{ý­—ª )¦jC‘Á5$Š2[¶|ùò~¿‚ªê^%pêÔ©¢¸þúë¨ oÛU5I!H…ÕçmSá‹wß}×çW%Ãd(x§ð‰3fÌððˆ¨Ê |ßÿ}º°ƒª9©- .ºè"S(¤ùÞ{ï…‚‹ñô{"ëËn .Š‚ˆA•ÍsÎ9Ç+:Æ»?Y=¯ Q…L…2 çþå/ñPMxRU*UEó¹çžó~}ç¾TxS–Èö…! 7(l¨¤oz€ª¿±º¾Ø¾}ûËè7KcžÂ‚:uJ÷›¦ªÆ —5žè¥ (©ñ-Þ ƒ !j|Õ¸Ѓt-£±2<™H›bÑõ–ƒÚª1CµþÌ‚ ,Šªi×Ú‘× A*|ª`¿ÆYÑ8qî¹çZ¥J•Òͯc¤¾SUÉD®ÍÔ‡Ç/»ì²Ð{ EêAêL«W¯î/ Bj¬% Çž‚tÈ.ª’§ Eº¹~÷îÝÖªS§ŽWÈ §pžÜwß}GTBÒ ðá¡7QèLëHäF{QµÀð ¡(pШQ#oßœ9sâ^׎;|„ŒÇŸþô'ŸFV²zë­·|ªÀA²mWØMTU*RÔPè4YA¸RAÊ ) $¼ýöÛGÌÜqÇqüD!Q5ÉD¤z}YQ±bEŸªªb*$s^+€‚” šèš5kÒ½¯‰„Wð…–£Í~~G«9A{QÅ )ªvܹsç¨Ë(à- KFRPòË*ýF†‡ %?ªjtªÛ¤q7<ÆEUSÌL0v+HŸ…*ƒë PÕÇÈv(T©ê݉^›©ÃC¢Ð£öéÀP@¨ ²•ªü|òÉ'6kÖ,{ðÁ½¢*÷é¦zUˆ”  Õ Aƒ¸Ö©ŠH @$*²Êb@Õ,X`6lˆ{]AÀ!Z(/3 nôìÙÓ+7þöÛo^ÁOaFU[T¸.2T‘HÛW®\éSUÉRuÄpAÕ¬¬„Ú‚@‚ªAE*Uª”O£… ÎP(vÓ¦M^qkóæÍþ¾BÉHõú’Ñ«W/›0a‚õîÝÛ«yª*¦B … Jj}Éœ×:w"U©Rŧª0.¨p¦ó.¼Êêüùó}”jÕªù4ZHrB0Þ¿Gბႇ¨Ònè ìÙ³'Ýz³BÕc…Áv²£MûöíóñXU¤µ¬¦’–––é²M›6µ%K–xÕJ])è¨ðfdØ1C‚1%3ªY´hÑ”ô¡è Ûu€„9¢eË–^‰P!ÈáÇÛßþö·P2¨âTØËiAÈp×®]q/ܸŸL¢|ùòÖ¡C3fŒMœ8Ñn½õV§)Ì j‘±‚ñ´=DªòT´@™gžimÛ¶Mº¯‚À¡ö!‡ö à¿ÿýoÛ»w¯/^Ü—=tèPèóD¤z}Y¡ª^ ööéÓÇf̘a3gÎôª^/¿ü²5oÞ<×¾k‘UU÷Þ{¯=ÿüó^YTß¹-ZØôéÓýÔ¾¨ºd¤àüN6Ü YŒoú½WžÛºukÔß/…ì‚*Œ©(Le›4Æ)Àøå—_ÚÁƒý÷^ýòûï¿Ç=ªÚb×®]ý÷ÕªUþpU[¾ú꫽2q ¨.Y²dÉ\9îÁñÎÉò6‚ ǨR BX#FŒ°mÛ¶y¨NUmHëUµjÕoWP)±Fq/ׯ_ŸÔ6~Tòµ×^ó¿õÖ[þþÍ7ßœ¥¶+à *QZoãÆSÞW KHPÙ*3<òˆ <Ø.½ôRá•Ô<묳¢VÌÉõeÕ9çœãa’¥K—Úc=f£G¶víÚùß¹q.gDG¥nݺ6lØ0©¿®½öZ2dHÔe¬7ø ©V¢D Ÿ*ü¯2eÊx»cÇŽQ+Iæ†T´iΜ9öÙgŸÙÉ'Ÿìcª0ŠÂ•z@@¼c ®½n¹å¯¨üñÇ{•Ê7ÞxúuëªL¬Ê–ªV­W2U¸³JÛ•h•Ç&‚ GÕU­0Õé†þÅ‹Ûüùó­aÆٶm-U90<ð¥êI HíÚµÓÍ ‚›ñÃ!· ()£eåøƒþûßÿzhnÚ´i^ *ZU¾DÚ^§N¯¼¹hÑ¢l BªZ”Bª„ÙÖh}ñÎ;ïøTzõêŵŒú.™õå2_}õUÛ·oŸ7ÎfÏžm;wŽë\È)#GŽôŠ úžmÙ²Å0:o2ªöÓ¼êpìPåc=t@¿G‘,ª,FÒõ…–Q@0¯!SÑ&]/È•W^™’*Ú R^sÍ5~}ñÝwßÙš5k¬AƒþY… üÚcãÆ^­2»hlTEËðJšº¾ ª…«‘×T‰€cSAº¤šVúÓŸlåÊ•G|öÄOø îMš4±bÅŠù{×]wOÿùÏÚÏ?ÿšW7Æë=…ÊRá믿¶.]ºø ÿ_|у §všþùéæW¥JQ¥¤H lê†|UdÜ¿ÿŸg´¬*TÈnºé&KKKó*ZÇ7Þ˜å¶kyê©§Òõe@Û‰ ¨²â=÷Üãa‡ÌÇê?ÿùOºÊ’%Kìúë¯÷ëø‚m…Ÿ Úçýë_1·‘Qß%³¾htþýï·ƒ&ýþÂ… Xo* ndv.äõ“‚.ýû÷·¯¾úÊ¿§ú[•Yc ‚ª¶ ¹áôÓO÷é—_~™n¼Õø«ày4 }‹ ­’¤ÆRý&æ¤T´)ø<¼*³®•pW´ßüà!AõÍð~×ÃÂÛ«1^û Ðd*(˜ÿöÛoû~,X`¿þú«U®\Ù+]T¥R&{¨ RNA…W^yÅ+ä]tÑE~ó¿n²WõB°J—.mÇͯàÓO?íU Ï=÷\¯N¤Ê@S§Nµï¿ÿÞÆŸ’v©“n¶?õÔS­cÇŽòÒºµ­†* š5kfcÆŒ±>}úx&ݔ߫W/A–-[Ö«/*§ªEuëÖ{ÙÀ­·ÞêE…;DÁȬ¶½M›6vÕUWÙ¤I“ìì³Ïö¾U¥É;wzß¿÷Þ{  òØcÙ¦M›ì²Ë.K÷~4W_}µqÆ kÚ´©·å—_~±—^zÉóž={Òͯ¶¨Úg÷îÝýø–)SÆÞ|óMêœMÆÓwɬ/ÒòåËíÁô·mÛ6"Mä}ã5²–-[Z‡<¬ñùçŸÛèÑ£½Ê¢ö!‘s!'Œ1Â.¿ür?_"tÒI~.Fÿ è¢}€Ü Š» ”«:¡Æ/…ÿU}WÐû‘ÁuѸ§ñrÙ²eÜ×ï­ÆQýv+¨÷Û·o ûå„T´IË«t}¤ë%=PBc¡‚zÀBfc Â–/¼ð‚ÿæ«_Þ×C4†*À_³fÍмÓxÔºuͦùu­±bÅ ¡vêÔ)%ý¢ð¥®õt¢c«‡,èomë’K.Iwm¦öé`úôéæ×y k‘ì¬X È;B€”Sõž‰'Ú Aƒlîܹ6gÎ_7é+äøè£ú õ½¯¤ªN˜0ÁìïëÆvUâÓ2T,*Z´hÌmg4OãÆí¾ûî³»îºËƒ—¢›ê‡ æáƒHwÜq‡Íœ9ÓÞ}÷]2dˆz÷îú\¡2!HèÑ£GBËŠ… É)hpÁxH"–DÚ®>|òÉ'½üñÐû o*Èwá…¦›?¨0•Ñö… ö Xjÿyä+_¾¼WUõN×ð¾0`€&”ìÛ·¯7µÏ e¶hÑ"êqʨï’Y_¤ªU«z_¨úÔ‰'ž˜ÔûjÓC=äÕ4gÍš•îœxî¹ç<ìï¹ÙyíóŒ–Ñ÷I¯ÈÏÆTæá‡ö¾R…/…Xõ9r¤Ýpà V¯^½P@SÐU«VyÈ·V­Zü°Èú×£±mõêÕ~MQ¼xqûÿáðqP¿y‘:wîlŸ|ò‰W4Ô5F@¿Áú] ÿ~;çáÛ|?Ú{÷ôy*ÚIáûÝ»w{tÆŒ¾-] Üyç>.FÛfä®T¨‚dxEG=ØA×áã†öá¶Ûn³)S¦xÕgµY4¾µjÕÊC‹™õE<ŸW«VÍ«„Ož<Ù¯‡¤\¹rvÅWxð3œBù:ôPyóæùþ„?xptS4þ°þƒP%îRMÕïT1Q7ùŸ|òÉ¡W, j)€¥ùÌ‹¬Ò¸k×.@èæ÷X"çQÅÅóÎ;Ï|ðÿtSÅŠí„NÈtvìØá•ŠTA)<à¦p›ª(6oÞÜÃd‰,¸ýöÛmÔ¨Q¨»÷Þ{ø<«mWhpýúõ^éIa¶ Pè@íW¨A–¡ ÝÚI¶Ô IDATµk=<¬W•­tÌ"Cx:& ](ä¡P¡¨’“‚±Î‰Œú.™õEžgªœ9o¢ïëx( «m« o$zÅs^Gû<£etlÂà .ôI»ví©Šed[3ê‹XŸkU¥HÈ 4^VªT)®ßçpçôʈBz±BuÑÞ§:uVÛ‹¶]¥J•„û!|_#û1³ù3º–È,ŒOXQۈܧŒÆ¸Èqpô+H$G!¸ž={z¸lüøñ /ÿÆoxE¨+®¸Â«[å´ Øûï¿oƒ òj8z5iÒÄJ–,iS§Nµ®]»Ú+¯¼âUL'L˜`]ºtñУ*¶nÝÚç_ºt©-Z´È+…6lØ«¨ wÝu— 'Mšd=zôHhÙQ£Fùô–[nÉ•¶«B“p×]wò(W³fM›|ø°}üñÇöý÷ßs ëç4èϼjêÔ©vçwÆ ;tèwÞyÖ´iSÛ·oçZŠe6vE=†ÑŽé²eËŽ‰pàï¿ÿnÇ·_|Ñ÷;/ïµk×ÚöíÛó}ßæôyt,·SB€ló믿zXbÓ¦Mq/sàÀ¿q\/²“'íÛ··Þ½{ǽ̬Y³ì‰'ž°uëÖ¥{ÇŽ6hÐ |ÄCËZ´ha;wÎ3Ç+Ñ}@Þ“Ì9͹ýýy4È­s"--ÍFŒa£FŠkþéÓ§Û–-[¬C‡V¼xñ|1%z®ýôÓO¾?Ñ^K–,±ƒf[;3»b‰ÃHÛ¶m³7ß|Ó>üðÃ<ñ]Ø»w¯Í;×V¬X‘òuÿðök×.«_¿¾.\8×÷UÕ)_zé%7n\¾ÿ Ëéó(¯·p4(L€ìrÖYgy%!ùÇ?þaûÛß2œ_a‰&MšØ×_í:Ôºwïž§öé™gžñéÿøÇtïOž<ÙúöíkµjÕ²5kÖäjÕ… Jx¹¼´È]Ùq.${^æ—íåùí÷¡uëÖV¾|yÑ©Ú`f^}õUŸÞrË-GíXÔ A{Æ2~üx»öÚkóÌø˜è1Ìë–/_n3fÌð}êÙ³gJ×ýÍ7ß„ÎY ¿Rx¼`Aê¶H=‚ ÛlܸÑ7ªÊõÔSOY¯^½¬T©R1çŸ8q¢O.øå—_¼êU^²uëVÕ¬YÓš7ožî³fÍšy2MsS»vílÚ´i^¬^½z -›Wö¹/ÕçBVÎËü°½ü"?þ>-ZÔ®¹æ¯J7gίF‹ª×½ûî»VµjU»üòËÚ±èÇô©B…Ѫž{î¹yj|LäæÚGUlÔ4•öïßïHK—.mµk׿ ùÒ믿n+W®´nݺYÅŠé)Ed+…4:uêäˆ1cÆØÿýßÿÅœ÷ùçŸ÷ 27Ýt“ 6,Ïí‹Â1ªr£ý)P @ºÏN9å{çwr½{öìñ6îÛ·/áeóÊ> ÷¥ú\ÈÊy™¶—_ä×ßUÔ¢Š‚…èÞ~ûmÛ»w¯Ý}÷ÝGT½<šÆ¢@ÿþý­X±by~|LäæÇwÜU/Sáûï¿·ƒZãÆ©¦‡|KçðáÇíСCt€Ôÿ¿ºd·Ûo¿Ý/¼ðBÌðÉwß}g³fÍòŠe'tR†ëÛµk—ÍŸ?ß¶mÛf'œp‚vÚiV¹rå¨ónذÁ/^l»wﶆ ZݺuãjóÒ¥K}ªªO týõ×1¿ ÚÎñÇï¯HëÖ­ó cuêÔ±ÓO?=©~Ìh_T M¹t’µk×Z‰%<¢ÊR U¨‹-²Zµjy¥3í£Útæ™gÚgœu"—IKK³¯¾úÊvìØa5²*UªÄl¯IZßúõëýø¨J[ ZµjVd‹æÀQåÄOL÷™Ú¥mi;Zw"´/ê¯SO=ÕJ–,yÄç;wî´~øÁû;‘6ëX¨º—ª†UªT)êg:6åÊ•K÷™BjªØ§>×¾f…‚:^?ÿüs¦Ç+žó9Ñó!žó2•߅̶§ê~©>ÖZFߣ_ýÕÎ:묔W‰ËË¿YùHä÷áÒK/µ *Øøñãmøðá^a0šW_}Õ§7ß|óQ3Åâ••ßðDÆ®ŒÆÇDŽa¼¿k:‡µ/5jÔˆú]§J‹[¶lñ©ÎÝÈßÜp qéxê;£õª:c L™2Þn…¼4ÎÑàs½§ï˜úX•GÕï›7oöó\çF<¿'ß|óOu~$»Ym‡~Ç´ «Xçr,‘ÛÖñÑy¯öF'}tn©ÿÔ®h•Mƒq_]5¿ú_ci¬}Ð1ÓqÑ2êŸDªUV#ÇêTœSY]OvôkvœOZ~õ·Ú¾ÆOÑÒ(‚ Û5oÞÜ_|ñ…ß诰P¤ýë_>½çž{B¡H sõèÑÃÞzë­ÐÖ¢`Bß¾}íïÿ{è=Ý(®u9ÒoܨÂ×èÑ£3lïý÷ßoƒöP‹‚w¢›Á?ÿüsÿw´0È‚ ì¼óγ֭[Û´iÓBïÿôÓOvÝu×y°& ýæ™gâî¿xöå¯ýkºÊeíÛ·ý{öìÙ^+hcŸ>}š^½zy'bÍš5vþùç{hKÁžð ÑC=dO>ù¤ýío³üã ­÷¹çž³x¹ÿûßG|þç?ÿÙûùµ×^³o¼1îõþ÷¿ÿµ+¯¼Ò.¹ä›9sfºÏ{ì1{ôÑG£žO=õ”=üðÃ6hÐ ï÷d霻á†<èÙñŠ÷|Nô|ˆç¼LDf߅̶§c’ªc­°Í<`C† ñïQ iÓ¦þÖÍ.¹ùûìoD¢¿ Ó(¼:oÞ<ÿ¨W¯ÞË(ü£}iРsÎ9GÅXkœ}öÙvÕUW¬VßOŸ>ÝÛþUXYûüRöý÷ß÷0W4únëüWLaNUIÕq”à=cz_Ïð6^|ñÅÖ²e˘û¥¢Â ¦*`–ì~$Û…¤ÇŽëÇ& ßío¼Â·­€µ¶­Àœ)RÄÇCCŸ~ú©‰AÅ@…ö4VU¯^=´.-§ß…‡µÿUz½ð }\ è{5eÊ?×ÃûGÒŽ;fÚî>øÀ>ûì3~j Èì÷6‘s*ëIe¿fçùôÑGùokàÍ7ß ýû¶Ûnó06dAH#ºvíj½{÷öJ\AÐ$ Šj „è&é¶mÛÆ Ÿ¨â”4·Þz«:ÐMòýúõ³Z“&MìŠ+®ðyŸ}öY1b„]{íµLÑÍóºA<2Iá3…N²ÓâU«Vy¨lÙ²^•(–ð›ÊõïV­Zy5­fÍšy»Öyå•WBíŒG<û¢ hº‰]ý«¶öìÙÓà †DV—ÒMÿß~û­WèR `õêÕ1÷! úE7ë¯U¡I}¥PYãÆ½zZ`áÂ…"Sˆb„ ^iHÇmèС^HËk»‰RpFçÐã?nݺuóý…"Õ¶“O>Ù‘‰R0Hû¡ˆBmá•ÊTQç*!)l5Tuoîܹ^©JçN 8·ôPˆ#<À£¾–ð>MÔ—_~éçžúYû–ÑñÊH´s!‘ó!‘ó2߅̶§s1UÇZ²Ô+:™8q¢=ýôÓ,TÅÏbÅŠeûok^ø}HäœHö÷A•Ï‚ßâh!º×_ÝÛ«d~‹2’‘Èox¢cW¼ãcfÇ03›6m²1cÆx Kç­© séœÒwMÇ"œ¾çú*œ©}Ô÷]ÕvSeP#AÅ<¾t>ê{ܹsgŸ*©õëw\¿ç‘áæhß…+Vx¨NójS(U¥ÓXû­ïœÖ-œ›È~$ÓmWÕTš×ï„‚¥ fêøë{•¨åË—û¶5&«š¬ª¾êé÷R¿' !ë}UÕwLû¦ïÓ}÷Ý õ¾ú_ý¡ã­Êƒ ð*Ì®À³Î¥àa תʩúG¿ èi;‘¿]ѨOÔVUÌì·#Ùc‘ªõ¤¢_³ó|Ò±RˆR×ún^pÁþ{ ã­R:$ƒ $øØ»°«æýÿÿJRR†:¦DŽYÆBE†ƒdh0†cÊЩ®“ÄáÔqÌNä¢LÉTPIƒLI4H%EEŽù}_ïëÿ¹ÿëÞ÷Ú{¯µöÞ÷½ïz>®ë¾¶ö°Öú ë³öåZ¯ý(;vt7Þx£ÝT¯Šwº©ÝÓ øº©ZÕÕR+:µiÓÆ‚ÁjIÇsŒ}FA¤×^{­$¤áCr ~øð‚ª„©ºV:O>ù¤Uts½n¶VÑÑ èâƒQ(x¢ A£Fܘ1c,L!ª„¦‘ª{¥ Ei‹*%êOÕ¬2QõtA3Ý,þùç[Õ;Ý … ª€¦P§ÐËõ×_o7ÎC4 ©Jš*-jÌDa=þQU°$!8QÐHA˜Q£FÙÍüíÚµ³±W$Uøó}‡ŽG7ì+L¡`b°ZÞÈ‘#mnj?Q+=y5jÔ°0‘B :^çDä‡~° JÚ¿ÿ÷äÉ“-È @CRË–-‹<^IDqæeÙÎ…lû«S§N^ÆZ•¯4®ZT!ËÓ¶0yõÕWÝ€¬`¡ÃúgN$]‚!º0Ú®®Ù*yV†kQ¶ë‘§}¤§*Úyæ™9­áq¯]Q¯ÙÆ0UÁSº`5@UÈ{üñÇ­ ž÷ácíCç¢BtíÛ·/u ªv© ˜Bdª–)šª”§¬ª†ŠBªZª™‚rQÂ\ ëi†2uLº¨O3!u¾©Ïƒâ¶#Éq(䦤ڧóCUýº¡ðëÒ¥Kc“Þ¯ÀíI'dÿ>ì°Ãì¼úòË/m}Ñ\SOºTµZµEaÛÝwßݞרÍÁ`­Êê£áÇ[Ÿø äܹsíQÕ+ýÚ­sSëJ&êÍo­Â¾Á+“t,òµ|ôk!ç“Ö(ýi>é{ŽÖ9òmsº”‡ºuëZèH•ñžþùR¯©*—n¼¿ì²Ë2nc»í¶+<ñt³µèfoO7¶‹ªáD¡미⊴¡Ë$NÒ¤† ôtcø!C"o'n[²QGÕÐℜt|0@#¾jÞ¼yóJ=¯›àE¦ …ÃÞ‡úQGéÖ­›»ï¾û¬Š‘‚[ A$uÉ%—Øcjõ+?W£TŠ ãƒ^¾Ê£h~© ¤F*œæ)4¥×üç’Ê4^óçÏÏyÅ™ù”s!c­Ð¨rV*Ѽ,Ű>Ä™I׿ö†UyS°XÍÒ V¦kQ”ë‘§6+”üS -×s6îµ+êõ1ÓF¡ÏCþ9ÅÚµkKµCÁBQ˜/•B`¢ŠzžB`²Ã;”z¯‚–¢Š—Q1µ2¥¦Û†*]*ä¦J©a¼¸íHrš7¢AНޙdœ|X/õXôôa=Ñ:ãûXá¹àµ>¬º¨?ïT±2ø^QØ7*…wXŽ‚Ìe,òµ|ôkyÌ'($*B€rsùå—[ˆ¢_¿~%A“qãÆÙMøªÂ¶ãŽ;FÚŽn€?~¼…TAï»ï¾³ç„ð’:t¨ëÞ½»UzS%«Ö­[»*Uª”Ùžnˆ÷•qT%.,tâoô ¿¤ãg‡~x™×|²(a£8m‰B•–pˆ#,$P¯^={ôOU›Dó|e-Q¥CI @ÅuòÉ'[ðqРAVÍlÛm·u<ð@NÛTØ£k×®vÌ H©½«V­²£‚©€¨4RÅ6…UAMã=zôh{î®»îr¯¼òŠé?þñ{¿þ[Â*ºj¼Šqû…<ò1ÖªÀ%ª–¦0M¯ž–4ìU}’ëúgN$]vÞyg{ Jú`jÔÀr±^‹¢^<Í1_1ŸçlÜkWÔëc¦1Œ"][ÐTßë|ö|8S•òT/è—_~)uÜ¢j±¢÷Ã[I HÆ9Fßg~¿©tŒrðÁ—y-n;’‡ÿ¼Ÿ `02—qR…ät¯ù ãš5kʼ¶zõj›ª|¨ãÖ£¨z§×´iS «:¡‚ÖGy¤…ôÒUuÕ|÷Áä /¼0R2—±(ä˜Æí×ò˜OPH!@¹Q€­~ýúîÃ?tŸ|ò‰;äCÜÃ?l¯uêÔ)ëç$S˜DýõWW½zu /¬_¿¾äuOÕnÞÿ}×£G7fÌ7vìX«vóä“OºæÍ›—Ú®ª0)l2iÒ$wï½÷–T ò7Ç©|ãC1>`‘Tœ¶”§ªUÃoE½æškl\o¿ýv«ŒÖ¢E ÿ½üòËÖU&Ë•Bжç=IiŒT…ñ…^°ãT…(µtƒ¿*& ƒˆ: e¼÷Þ{nÊ”)V±Ic¨¹¿çž{ºfÍš¹‰'ºÅ‹[¥:U„TàFÏj¼‚çIẏ|Êǹ±þöÛoíQÕµªU«VæuUç:õÔSËå<,Öõ!ÝœHº>øµ75@¨õÿ¹çž³PÎÙgŸ]©¯EQ¯G…>gã^»¢^Óa®Ôÿ  úP¤ÖÖ°ýimöð¤I“&6´+àªÀîW_}e;GœjЩÒòD€g̘aëÈ~ûíWæõ¸íHr+W®,Õ…àƒ³a?þöœÎ!Uíuݺu6Wu|ê¯ÔsLã£p³Ö™ªڪP…ÛTúñ…µþè<ó•Q³É×XäsLãökyÌ'(èwKº”Ý4}饗º[o½Õ*qýío³ŠxªÜsüñÇgýüm·Ýæî¿ÿ~w 'XhÅWSI•”Roú>ôÐCíÆøÏ?ÿÜÝqÇV5¬U«Vöï`Å/ŸôšÞÿÈ#X-µ²˜„,X° r{u#¾ø <¹ˆÚ–b Êz²×^{¹¾}ûÚŸÆæœsÎq}úôÉ90§°‹*Aj; ªÚ¢*­vÚi9mW8…ãhÿýüóÏÛóQ«Ì¥£Ð‚ê…UMð†n°×tÌ&L°Êo:T©Ja¼|…6&ù8rkoT¥LŸW¸ucè“b_|ø45œ§sGU8UÙ-j%Ëb½E½ZÜkWÔëcº1Ì•¯H¬ìW«V- ^*Vé0•*qú¶+©?ãþûïoUb úÒõ@i:è ÐPuÜv$á+*Ô[,t]ÔuSÁ`+¾"§| §žcªFÚ±cG«Ìª0«*êÇ®»îº2ë‚Æò¬³Îr>ú¨-Š«ÆY¨±(1-Æ}@^þ]ÊÓŸÿüg»ýÙgŸu<ð€UкꪫB+פRPEt¼žD¡÷>ýôÓ®]»vVéiüøñeÞ£`“nš× òª¦ MA>¬âƒQøJDª–*Îvâ´Å÷£‚¥ÿþVoòäÉVYlúôéVqjðàÁÖϹºóÎ;­J×_þò—’ji×^{mI&)U‰S˜âí·ß¶€’VªŒ”kË3Î8ÃSJN<ñD{ôáM9õ'ªbµ±)ļÌt.dÛ_®c­jž¢¹]L6æõÁ¯™©ÁA…%nX°X¯EQ®G…÷Úõú˜n £Òï+zª ¨Ê~¢Jžs*8ÅÔ©S­Þ•W^iÕ>U¹ô¦›nríÛ·/°Ì7ö$]/n;’¨]»vÚñóË›ÖeQµF‚ŒBïUÈñ€p«W¯vóæÍ }ŸÆTákïÇwK–,ɺí|EyŒiEîÛ¯¡Á ­/!@¹jР…Àtsý}÷ÝçªW¯nð¢ð7U«‚’·aÃ÷Ÿÿü§Ì{jHU£F { †%‚t\½zõ²°…nT­I7w«òØš5k"oÛ¶míQU½‚ÛRˆOAQˆ#›8m©W¯ž=úpEEИ̜9ÓúrÊ”)V¡IÿVè)Œ*ª]}õÕnáÂ…Y·­¾SE5…Øz÷îmÕU-Jaý;évEUUUNǯP•Æù‚ .Èéxe¿ýö³êwª<¥Ð•æüÑGm¯)¦jT£FrÆ ³Ê\'tRÎû,6ÙæeÔöE=²í/êX§sþùçÛã½÷Þë~úé§2¯k{QB wÝu—ûÇ?þáÖ­[—øù}}ð|H+S˜ðµ×^³ÊfúÓŸ6škQ¶ëQ¡Å½vE½>†aš#C† )†üä“O¬ÂpÁ ­ÖVy÷ÝwC+*øª1ó´M…áŽ^´h‘õ»þ½bÅŠ‚õ³‚z³g϶*}ª|&n;’PÕKÑ5*8~Ï—^z©ÂÖ V%Õ)@6/R©²ªøj—a6lèŽ;î8[OU!8Ûw»|EyŒiEî»fÍšöXAO¿ªt(o—_~yIen¾ùf7iÒ$ ¦©b˜ª”‰ª© œ‚/ª>¥`[6ªì§ªD ù4mÚÔ}öÙØxâ‰'ìfô( ã¶EÿÖ ý=zô°ŠJ uëÖÍ‚*åå±Ç³0ß­·ÞZæµÝwßÝÂc>h# 6*x¢ SðùTß+®¸ÂBH .iüåÁto½õ–UuS˜Í‡l¢n7èâ‹/¶ãS D–K•d»»ýë_VN}£Ð•wê©§º~ýúÙÜPõH’ÊuŸÅ$Û¼ŒÒ¾8çB”ó ÊX§sÊ)§Øz¤ðê!‡bóN%šR¸Oá¼Ç<ãX}ñÅîÆo,™GqDìç7…õÁó•ÝÔ^O!-…ÉLU¸ucºeºyçž{®ULÕ¥K×¢E‹Äc÷Úõú6†q(ÈûÙgŸÙZ¡€¹Î7ý[!Ìã?¾T5Oûì³ èÔ×øjLœÓóZo}Pÿ­jaU:ëÔ©cUdý{óEÇ®à™ú4l“´# U*Uß*ø©uK}«óJ!S=ŸÈ.j³ŽG•’?üp·å–[Ú¹¦pÎuŽ©ÿ}ôQ[;Ô]?¨×¹¨ ±~h 'š³sçÎu¯¾úª­éäk,ÊcL+rßês­Z[t×\Ò:¢Š«+‚ `ªU«NQÈbÇw´›Ü¯¹æšÐÏ=UüS…"…1zöìiÁI<ÒÍìþýºaþ¦›n²*sãÆ+ù¼Â7=ôÛzë­íß¾bPp?Ú¦ªö5nÜØ*O½òÊ+v¼þó zèÆ|M‚¶UµjU U($5vìXwÛm·Y BUÇäÓMû©mLµ-ž‚‚Ú—nèïÓ§}¾{÷îi1[2}Fc«¿Ô×´R8áïÿ»‹ª)DóÎ;ï¸þýû»óÎ;Ïí½÷Þ%á+_}H7ègòüóÏ[(H•Ò´ O"†ô§€×›o¾k»A ÿhNQ+ùå IDAT©ZÒQGúÙ$ÛUE·ûï¿ß*Z~úé¥^;묳,)éBQ÷™d¼âl'éö3Í˨í‹s.dÛ_Ô±ÎÔÞ¡C‡º{î¹ÇÆõÎ;ï,y^¡0}ÕÏt´꽚»í¶[¢ç+z}H2'⮢èW_}e;Urô(Uõ¬Ì×¢¸×#…”¦M›fÿsÚi§•!“œ³I®]™®™Æ0 <ðÀ-˜7bÄ;oýù¦öê˜RuèÐÁ®ï½÷ž›8qb©µDÇ<¿ÜÒ¹uì±ÇÚñiŽ*À¥ÜÇl!Ô«®ºÊÂ\þx‚s*ì9OM½žúš¯Ìš­Bfœv$9=§±Õšöõ×_» &XX_!o@ûöí)hœißþ¹¨¯)<¼jÕ* cŽ3ÆŽ]»+¯¼ÒÎ;ÿ^ÍÕcŽ9Æ*Eú ­(˜«u8õ|Jݶ«ë³®Á ¦* 6—ò=ùÚNÜ~-ù¤ ³æ‘úòý÷ß·1Êú€¨ô“¿ë‹˜Êfä“*̈n®N¥ Cú «~'ªôä+þ¥Z¹r¥Ýð®›µ‚t3¶}ˆÂU fè5U ª]»vè¶tÿnÔÒgõZð3 „¨b]óæÍ-´u[¢°Ïüùó-àão,WÿèFò(Á´(m Z¶l™UÍÒ{ƒa›Lǘîõlí i¦Nj7Á·jÕÊ >¼ÌûÄRÅ)UqT55…YÔŸ 7úÊlé¨ÔòåËm̃•¿‚sFsMÇg»©.»ì27`À /¥†£rÙ®ÆNs>l^«5Æ©Áµ$ûŒ3^I¶“ËöÃæeÜöÅ9ÒQÆ:ê9# Å-X°À*)D•®º[*…­4¯ƒëV’ç+r}ˆ3'⮞‚x·Ür‹…}èT×… ˜TXª²_‹â^âÈåœzíÊv} Ã8T X}ªý*°¨Jsj“ï÷lªÓõCŸQ¨3x Q%†a©•ýT2­1ú óÓO?eœ—>H«@WØñdz.8Ï‚TÑSû V>͵IŽ#ìõvØ!ÒºwßéÎC]ÛÓ½¦í¥ŽuØñhN¤wœãÓçã„ ó1ùÚN’~-ù¤µ#Îwˆbsº …Qºvíjá…ÁƒÓ!!T-K•ÕFŽé.¿ür÷ÔSOYu°¡C‡Z¥-…aš4iâZ¶li¶^ýuw÷Ýw»=öØ#oÇËv d’N;í4«ˆ•¯íVD[*ƒŠl_¦±FůÞçŸî¦OŸnCƒ¡1ŸÖä° ~(®ëcº1, m*Ð5gΫþ¨ £ß|ó›5k–2dˆ›9s¦½gÏ=÷ÌÛ>Õ ß5jÔˆÉ@DT„ˆáª«®²ÐÖ°aÃ\—.]èõë×wÇw={ötýû÷·?OÕ–:wîìn¹å«R¤*C >µmÛ6¯ÇËv `;vÌëv+¢-•AE¶/ÓX£â×Ok­c7ÜpC©m)D©ðÝN;íDÇùõ1Ý UÛT v̘1ÎÖŸ§êƒ ð¶hÑ"VeÄlÎ<óLë“LG@i›ýßßï½{÷v½zõ¢7"ذaƒ=*„ƒô–,Yâ.\èV¬XávÝuW A©êV1›:uª=zè¡ àFޱ®ëƒ* ®[·ÎB³¨œ×ÇÊ4†ª»|ùr·víZ·Í6ÛØ×z*žòT„ˆ‰PD4õêÕ³¿Ê„Pܦƒ±®ëÃf›mF²’_+ÓÖ¬YÓþ@ñÙœ.ÅŠ $(Z!@Ñ" ŠAHEåÛo¿uÓ¦M£#lr–/_î/^LG‚ $Ø(¬Y³Æ 6ÌMŸ>=Öçn¼ñFwÏ=÷ä¼Bùè£ÜÀÝk¯½æ~ù庒ͯMÝÈ‘#Ý•W^+Ô³~ýzפI×´iS·zõjÆ:ßÿÝMœ8Ñ}öÙg‘×·$ãT4­ ³gÏÞ$æío¿ýæúõëçüqkw1¬3óçÏwK—.­ÐqªLs ØŽuÑ¢EöÃ:¦uëÖqÞÓF R# Fla¢eË–¹»ï¾Û:¹Ð~Î8ã ×½{÷ÈŸ7nœ»ë®»Ü7ß|“Óv AáƒvíÚY 좋.rmÚ´qÿüç?+|Ló5^•­ Å2/*ÛØmذÁ=öØcnÀ€‘?3zôh÷ý÷ßÛœ¯^½zѯ%?þø£_Øß¬Y³ OÂÖ·$ã ´/¾øÂ3ÆÎ¥®]»æuÛŸ~úiÉšG_†ªnn¾ùÆ_SL•ȯW¯ž;å”S,”¿Å[Ý<¨Lã‘îX³ÍóMeÎå $¨PÍš5³Êoz,O‹/¶àTýúõ]óæÍ‹®_TÍM®¿þzwâ‰'æm»­Zµro¼ñ†UªÛ{ï½+Íxå³éÚkßTt»ŠqìªU«æÎ:ë,«(6a«`˜‰ª)¼ãŽ;º“N:©RÌÍ~øÁ, «àvøá‡—û¥›•i<2k¦y¾)Í9 <„êü£{å•WÊ}¿/¿ü²Uhiß¾½Ûl³ÍŠ®_.\h»îºk^·ûË/¿X»UY¯2W>Û™® ¹öME·«XÇN ¼SÂlÁ»!C†XõÓN:Å®zYÑs³W¯^i«UÓúg<”µÝvÛ¤’´ª÷©j_ãÆ7šêq…ꫤԿ¿ÿþ»UÞØé‡d›m¶)ÚyP™Æ#Ó±fšç›ÒœÊAHP¡Ö™1c†ûÃþ`Áç§OŸî4hàêÔ©ã6lØà¦L™â–-[æ;ì0W¯^½ÈûøüóÏíQÕZ<˜j×®]ÆÏêÆeí÷§Ÿ~J»ßÔcÕþ>ùäרQ#wÀ”zïŠ+쵟þÙ|ðÁeªÇèõï¿ÿÞýøãöoý÷œ9sÜV[m)ùí·ßZ®ZµÊtÐAn¯½ö²çuC¼ªÄ)H'óçÏ·m*$¥JA ]DiGØxå2V ½i{ ,puëÖµJ€ÞÎ;ïìjÖ¬k>Emg° Q>E¶±M:vù¿Š»N8Ám»í¶nðàÁ®_¿~V•0§Ÿ~Ú/ºè¢¼¬%ù\GÒ­%q1éñ|óÍ76ž{î¹§Ûÿý3î'Óúg<€b¤ë²Ö@?º.Ö¨Q#ãûUiQ×R=jm¨]»vÚ÷*´´dÉ[sµ]UgôjÕªeç‹BMzÖ8ÿºžÓ­óºzõêv®÷Ýw¶Nî´ÓN‘®eŸ~ú©=jýOÚŽ\C×/íCk„Öõ\…õU¾úËóUywØa‡Œs`åÊ•6¾ò¿ÿýÏ*øêª¶¦Æ5Ç–.]jïÓëÚvXÅߨëþ¢E‹ì Òµ-µ?Ô&ƒ® ©×…lóÀ·U})úo}§Üb‹-b‡"Õ_:í#õ<ó¯©tÌaý§öè»Jê<ˆ;Qû1“µk×ÚŒè;–Îei”mD9Ö°yž¤¹¬]À¦‚ $¨Pü±kÒ¤‰kÙ²¥{ã7Ê<ߣGwÊ)§X¥Ýd,ºyXÕ×ô—Í_ÿúWwÿý÷»ýöÛϪ-‰nXþàƒì¿3ŠÆçÎ;ï<» 9Ó~ƒÇª0˜ö©}è†eÝø,ºû†np}úô±×¼¦M›ºçŸÞí¶ÛnöïÛo¿ÝÝ}÷Ý%¯_pÁö¨›¶}uŸ0 „\}õÕ®ÿþv³¸wá…ºgžyÆýíos}ûö-yþŒ3Î(ùïñãÇ[e¸(í¯¤cõÒK/¹Î;ÛÍÿaºuëfcGœvú6DùL&QÇ6éØÅmWºñ«¨±Óÿûì³{ÿý÷ݼyóÜÞ{ïúýÔ–<Ðzè¡yYKòµŽ¤[K’cœãQ€¥mÛ¶¶y Ú>ðÀ¡ûȶ¾E )ŒüðÃ’?÷Üs%ÿ}饗Z8]t ÖZ;iÒ$ëwO!“N:ÉÖï8UÄÕŸj›ŸSéÚìéFec­€aÏž=c̓‰'ºwÞy§äuUZU,¾ñÆcß×_í äöØcwñÅ—zMû™0a‚wÏ>ûìR¯©ÿÞ~ûmë3µ)uD¸ýFP}'Ð%ÇTU§>úhwüñÇgü|”c ›çqÛ˜ËÚlJB€¢¼Ñ7hĈ>ÒÔ·Þz«UsQP°wïÞ®qãÆ®U«Vi·©÷鳪¬§н¯¾úÊ‚aªŒ£€a˜>úÈxâ‰v“´öe¿º!Ú´iVqMŸÓ äžn\VxãœsÎq:u²Ý_~ùe÷¯ýËBl³g϶›ÔÛ·oï¶Ûn;÷ÄOXàCïÕ è™*É¿ÿýo÷ØcÙöuÓ¼nð;v¬ý‰*ìéêG}ÔÚßµkW [(8’Z}*S;2Wœ±š:uª…<´:t¨UºQ¸îÁ´J9ú¼öWœvú6ÄùL˜¨c›tìò=~1v¾š©Ž=]ðîÙgŸµcKR 2Ÿí‹³–$õxÔ­CªÙ¬Y3 ¢(0úÔSO¹ÓN;-tÛQÖ·(ãUƒ{á…,l¤uO•šÒš¤5Váâ û´þ*¬óJÕgÍšåÞ}÷]»Æ^wÝu%UþdÒz¦õ»C‡ö¨`¤¶¯Šv K¥†ÚÃÖUpV0OïÕgT)X¡4…ëT)7Ýù¦5[ÛSÈ9Uœv$9íWÕxõ£ºÎ(Xª`¦Ö­Ë…üŽ—´¿DïÓg·ß~û¬× õ«Âcún§ysÔQGÙú¨@]°¹úZß½H=üðí¢¡æ†ö3räH Õ)€u¾ª_Õ6ö¢´mîܹöúÞ§ÏéXãÎU€ÖœÕy¡Šz¯®ÍÙ*§†Ñq(ªëŽª¿ËèXåË/¿´ñ D5®¬lœQÇ#i?iù¤…Êts³nvÞe—]J^ûâ‹/ìчÂ,[¶,ö~?ýôSwþùç»–º!\Ua”SHCm<Ý­_}õU7`À« xØa‡ÙŸ*íèæmmï˜cŽÉÚO ¿‰ÂU>¬¡ªzªb'Gq„ý©Ú•ÂOªF“.ä—®ù+U:\·n{衇\›6mì9½QP뢄SÅig.ŸI2¶IÇ®<ƯÐc Þ¥£í*Øé« Ã:’m- R01µ"*”y晉GI…‘5jäÆŒcáQ5N]U™25`e}‹2@±ñ!´`·† ºÇܪ­êyÔÒÜÖ¬à“~` 8÷UeU×WÅTáO´~© Û©§žêöÝw_{N¡@…Ò&WØ.[xI°Òú eê˜TáOçr¦ ¤ÖkëAqÛ‘ä8ZSRíëØ±£ßüuG¡ë¥K—lL“ö—^Óš¨qQ@\ëg&Z·õ§¶*”¦klêxjœÕŸª.yÙe—•Ì%]ÓtúÁU:T@RAÙ(ó5nÛ4šª°¼~Ç™ú‘ý©Â¨‚Ú^¦êƒ™h.èÓ¾|ôU†Õ6EÕDæ÷×ÿoý¨‡¥IÇ#—~ Òù¬~ þ0€úCý;|øpk[¦ dœcÍÇç’žóÀ¦bsº3Ýü Éé§ŸnóæÍ ýŒnl¾âŠ+Ò—r”LA¡LûÕÍåabSžÔð™^¢ .©üݪò’ …D•ùr•®ù+_-Hƒ U¿Ë4¶Å&c›Ï±K:~…;®…UULRµ#…ûÒ Ë{‰²–éøL þ)Ô’Ëñøp­‚´>) S 2$tÛQÖ·lã#ÍÛ`Ò?§Ð×ÚµkK; ŠÂ|©0’ ”<§€‘¤V_VLTe5ê1¦V¦ôA­tÛP LaCUßK ôÅmG’ãÐZ%ª<çCâ«czLãö—ÂÞ¯½öZädTS¦L±Ç¦M›–©â¬Êƒš šgQäIÚ¦ ‚­[·.sýN:òÁ_ë}ÈÞ_§Â÷ÁHÿ½@|…ÈÔïå9G‚tí «Ž¬uCTý´˜TäX•!@Q »ÁÝW˜ñÁ… ݨí+¨¼ñÆ¡Á%ã´¿ :ûU «¤›ÂE••tó~¯´”k ©[·nnèС®{÷înäÈ‘V1N7ÓW©R%ö¶Òµ#Ÿc嫩𢝾%“'O¶Ç|Ý@_hùÛ|Ž]Òñ+ôØ)D"éB‡>PzÑEÅ:u- Ò8§†gr=ºV²T5kÖ´ÇÔÀL”õ-ÛxÅ(Ýù¥`ðwß}ç–/_^òœ«B®ª«©b]ð\‘:uêØ£Þ Cúªw©É8ÇèÏU¿ßT:F9øàƒË¼·IŽÃÞ¯ AÁ`dyi¦þÒúè4à /Ì[2¸ö†õƒ_SUQÕýrm›ª†Ñ<«V­ZÞæA>(t¨ëŒ8ê¿ÕzÔ ¨B¡‚>¤ìC‘Áï…š#éú1Ìêի횺bÅ ë3=Š*Á“Šk 2  *ªUÓß©ŠJ ,Mš4ÉÝ{ï½î¿ÿýo™÷ø›ªãÞHì÷«ÁãøöÛoíQU^ÂnpoÔ¨‘;õÔSsêUŽyÿý÷]=ܘ1cÜØ±c­rÌ“O>éš7o^tcuÍ5׸‡~ØÝ~ûíî›o¾q-Z´p£Gv/¿ü²µEó*ƒ|Œí¦0vþ\ w®_¿Þ=÷Üsl8û쳋b‰º–úxîÒŠ"Êú–i<€Ê¦zõêö¨j}žE.^¼8tž+<é««I“&M܇~è&NœhâøVå?UzÕ:©Âj6›o¾yÚ×~ûí77cÆ »~ì·ß~e^ÛŽ$DZråÊRýXÑ2õ×¶ÛnkáG]{´6ûJºùàûºF¡¯ûª¼qÂwéÚ÷{d>çA\úaÍU!Ô5IAQ;Ûn»í\ýúõm<4tWHR}¨ç =G¢ô£Þ3jÔ(«<½nÝ:»Öj®ëÜK2…V‘c TŠÿ@€‰ª©ºÜ¡‡êyä׬Y³2Uæ|¨H7u—…(T…fàÀ®qãÆÛÚ¬@šªóÜqÇÖ­Zµ²ë†õb2bÄ{Ük¯½\ß¾}íOÕ…Î9ç×§OŸ¬!µb‘¯±ÝØÇÎFÃ}ª¶¨ê™ªî•¤iE®%…¦‰ø QQDYß2PÙø*iÁꀵjÕ²À¯ÂÕé*ü©¬?çˆÔŸÖµý÷ßßrÊ)Ãy¹P•½U«V¹ƒ:(4L·Iä#àW^4gu–{ôÑG-ئ°]X%Í$týÑXh>…UšôQ…1Ë[ỹLTéY×' 9êštôÑG—|Ð÷ Íåí·ßÞúïC)Ø9ׄ Ü{ï½g?n ïU¾º«‚†úƒÔªÊ­¢Ç(v!ÀFGá´Aƒ¹–-[ºN:YéÀ,yÝË|¨ÐöÜsO÷î»ïºéÓ§4éí»ï¾îé§Ÿv«W¯v/½ô’?~¼ëСƒ½æoøÖÍþ©ÿþV-qòäÉîûï¿w?üðƒkذaÚJLq%ig’Ïä{l3]±Œ_’±óçZX¨SaC)ïa>Ö’BÛm·Ý,À¡Š¡©•6Ó­_QÖ·Lã+¬TÅ-°R57U£“`@MF·vŠ&š:uªUZ»òÊ+-ð¦}i{ DÒ§Ÿ~jéÂ|qÛ‘DíÚµmZR+_úêtÅD!Eïuí>|¸Ûi§\½zõb]çƒÕCƒ}­~V¨/lmÔóR·nÝrosỹL„TÅê9sæXPOtÝ!ýk>´¯ï2¹ŽG¾è%DÕCÔÌõ{eÜcû¹Šk ØmN€Ñ‰'žèzõêeaÝ0¼™_•—tc²ªÐ­Y³¦àÇrþùçÛã½÷Þë~úé§2¯ër½ \!ŽT>˜ ‡ø°€_T” 6¸™3gÚM™2ÅÂtú÷wß}úþ‡~Ø]}õÕnáÂ…‘¶Ÿ¤I>“±:vÅ2~qÇN|ð.5ìóóÏ?»×^{ÍnöÿÓŸþTéÖ’BkÛ¶­=ª"ep¿³fÍríÚµ³ÿV,(Êú–n<€b¦5fÈ!†ô>ù䫞¦*o .{ k‹‚êa•ׯ_ok™§m.Y²Ä½ýöÛnÑ¢Ev¾ëß+V¬(X{xŸ={¶ËT­.LÜv$¡ª—¢ ‹Á5CkˆùÅH!¼ãŽ;έ[·Î½ð ‘¿ËÕ¬YÓ2K¥µSôÐCnë­·.yîŠ+®pcÇŽµÐ@Ÿ>}ì³Ý»wÜŽ°÷$é3…ùùûßÿnaU½QÀëwÞqýû÷wçwžÛ{ï½K~¾ún"I;“öMܱM:v¹Ž_E*D}õÕWÈkРA©m)ð)]tQAÖ’|Ï™Ölz.Ìi§V„Œ{< š(˜¥€­Æý¶Ûn³PÖ%—\âî»ï> ê„m+Ýú–m<€bäC‚x óFŒa,¿ÎêÓ¹ªC‡`~ï½÷,ø\w5ÿ}E;QPIUâŽ=öX;/´Æ)°¤ ÝÇláç«®ºÊÂKþx‚áí°ç<çôzêk¾²o¶Ê¬qÚ‘ä8ôœÖ]ËÔ:[½zu ÷+Ú·oßHAõ8ý÷8ÃÞ¯÷©Jo¿~ý,Tª›A «z¯*?j þàÀ…^h×bUÇTÕD¿+äúé§[UÄ$ýe Ò}&É<n+l›>ü§€]œïôÚ·>«k|B¤ BŠÂûQû$Óx$éÇ0ú¡‰U«VY°w̘1öY…¯¼òJûQ(ÛÈåX“|.îX›EÑïÝ»·•k¨+W®´›îƒÕe2=/ _¥††Ò½_!:½V»ví’ç*RuÁæÍ›[€+—ýfûLª¥K—º ¸m·ÝÖa• tÌÚWj.}Fá'…7tÓ~°½©–-[fUŽô¾à>¢´#ì=qúlêÔ©vcx«V­ÜðáÃ˼_a?U¥zðÁ]çÎ-ð¢q:ñÄ­¢NIÚ™KßDÛ\Ç.—c,ï±…÷n¹å׳gÏRAQU•R5*&P(ÔZ’Ïó9l-É×z—éxükóçÏ·ŠŸWª¥PGêû3­oéÆ(fk×®µ ±æ»‹ª¬¦óHAÈ(„Z¾|¹}Faâ`U?UY{ôÑGíÜRuÚTªì«0¤‚íGydÉñè®ñaÏyªª n)<®0»‚•:uŠÜ™Ú‘ä8Â^W¥aþü¶ ÕG ŒÅ闸ǙîýšzM}•ÖSUDT¦ûñ½®k²ú"êµ<—1Èô™$ó@ý¢±KmŸ¾£èG týíØ±c¬vi{º*Ö.…$Óõg¦ö¥\ærØþuÞéúí?ÎÜÎåXsù\”±6Ê?RT¸ta¿L!À­¶Ú*òûuƒsjpIUŸvÞyg Ù-\¸Ðí²Ë.‰÷›í3©êÖ­k™è˜ãlÓfŸ}ö‰ô^U «eŸaï‰ÓgsçεG_‘(•nø_­NÁ­š5kZÕ§¸’´3—¾‰2¶¹Ž].ÇXÞc'/¾ø¢=ªRdÐÀm;ÙªA準äó|[KòµÞe:ÿZjµ³°0J¶õ-ÝxÅ,µB«*CÆ¡kˆþÂøª²éÖ5ŽE!¤°ãÉôœç«Ázª"«ýùʹùhG’ã{}‡v(õ\”xHÒ/q3Ýû5â„ ýzši½•ZµjÙ_®ó5êD©g¨_¶©ŠŸz¾uëÖ±Û¥¹n>d›[™Ú—nÿüs7}út«æ™ø4h‡aØP˜õ-Óx›*…ªš3gŽUT…Úo¾ùÆÍš5Ë 2ÄÍœ9ÓÞ³çž{æmŸ:TkÔ¨€r£ê§šçº¨r5T6T„›¬«®ºÊvÆ s]ºt¡CÊIýúõÝðáÃ]Ïž=]ÿþýíÏSU§Î;»[n¹ÅªÝ©RŽÂumÛ¶¥ã*ÙØ‰Î-…}n¸á†2ÛRˆR½vÚ‰Ž-§õ-Óx›*UyU {̘1¾×Ÿ§Šq €·hÑ"Qõ¸tÎ<óL;“V’Ð5·]»vnÿý÷§3TJ›ýßßï½{÷v½zõ¢7À&gÆ ö¨›ÃQþ–,Yâ.\èV¬XávÝuW Ú©26ޱSEÂuëÖY ¿¾1@f«V­rË—/wk×®uÛl³ýñý€Š§ü#!À&€CŪW¯žýaã»Í6ÛŒÐ]­oŒYÍš5íŸÍéP¬B€¢E-‚ h„E‹ $Ø$Ýxãîž{î¡#*¡5kÖ¸aƹéÓ§WÊí£òÎ d6räHwå•WºÅ‹ÓØäýþûïnþüùnéÒ¥yÛæ¢E‹Ü´iÓÜìٳݺuë6™v­_¿ÞÚÏ:›‚ `f̘áfΜ™ñ=?üðƒûàƒܪU«Êí¸Æçîºë.÷Í7ßäm›Ë–-swß}·6&ÅØ.…ÜÎ8ã ×½{÷‚kºí36ů²ŒÝÆ:?6lØà{ì17`ÀyË—/wO<ñ„{饗rÞ–Â…/¾ø¢ëׯŸ:t¨{î¹çÜøñãݯ¿þêÞyç7gΜ²Ýa–,Ybí5jT¬Ï}øá‡nòäÉLL¨ÄB€‚ÐMû5²¿LzõêåŽ:ê(÷ÔSO•Û±=ðÀöxî¹çæm›Ã‡w={öt×\sM^¶§@Q1¨LíÊ÷±–—¤}RYÛ[ìsžù‘\Ë–-]:uBy¦ ˆŸ}ö™«W¯žëر£;ï¼ó\Æ Ý_|áÆŒãFŒA'eðÓO?Y(\ý´bÅŠrÙço¿ýFÇ@žU¥ À¦dñâŪ_¿¾kÞ¼yÞ¶Û¬Y3צM{ÌU«V­Üo¼áfÍšåöÞ{ï í¯ÊÔ®|kyÉ¥O*c{+Üg~$W­Z5wÖYgY5¸ &¸-Z0ˆ@Ì›7Ïõ½EHO•_÷Ýw_ûNƒô¶ÙfûaŽªU«º­·Þºàû{öÙgÝ—_~é®»î:·ýöÛ3'!À&åå—_¶*=íÛ·w›m¶YÞ¶ûÇ?þѽòÊ+yÙÖ/¿übǸzõê ï¯ÊÔ®|kyÉ¥O*c{+Üg~äF•v„TUH‚@~,_¾Üè Ún»íòZÝzcU¥JwÎ9ç”ÛþÖ­[g•Ñׯ_Oç@„EëÛo¿u3fÌp«V­rtÛk¯½Ò¾wÅŠî“O>q?ÿü³;øàƒÓVGòÁ víÚ•<·víZ÷ÙgŸY `·Ýv+õþ 6Ø1Ô­[×í¼óÎi÷¯°’Þ÷‡?üÁþüsÓ§Ow 4puêÔ±mM™2Å*8vØa®^½z¥¶¡ ƒ*V*ü$óçÏw[mµ•6UjóÍ7ÜwÚ‡>¿Ï>û¸5j„ö×ܹs­OkÖ¬YiÚ•I>Ž5›Ï?ÿÜU+—ù¥OR]ûÖWU«8 ´½¹´ù×_µí-X°ÀæûŽ;îXòšæ~¦y’´/²õÃÿþ÷¿¼Ìã$kEE®o…š¹œQçÇ 'œà¶Ýv[7xð`ׯ_?« 亶/Y²ÄÕªU+ô<×k[n¹¥«]»v™×t.éu]çu¤«Î·fÍ÷ý÷ßÛ£Þ¶­l´¦hšÿ:G¢È¶_=¿råJ;ÿDÿýã?º-¶ØÂB‘ Û©}ê—`¥C=¯5DçyõêÕ­¿ûî;ÛÎN;í”q½ŒÛIÚF¡Á¥K—Úº¯mí°ÃVÁ1Êç´Fj=Ûu×]C¯¢~ÓÚ©mçÚfƒúWûVú€ª/!EmQ´vk¿Áàˆ:7ÿ?‚ èèfö«¯¾ÚõïßßnÞ÷.¼ðB÷Ì3Ï”z¯n ¿á†\Ÿ>}ìÆ¯iÓ¦îùçŸ/lÔë|ðý÷þûï_òü¼yóÜGa7È+$ $ÝtÓMîž{îq7ß|³ûç?ÿ™ö˜?þøcפIײeK÷Æo”z®Gî”SN±ªMºé]t3|¯^½ìÏûÛßþæúöí[òï3Î8£ä¿Ç«ºÚC=äz÷îí:uêäþûßÿ–yýÚk¯µ¾8p »à‚ *M»2ÉDZfò׿þÕÝÿýn¿ýö³àl.ó7JŸ]¡3í_sX …+ÂÚ›´Í/½ô’ëܹ³;ÂtëÖÍÚžïs9[?¼ýöÛy™Çq׊Š^ß 5?’žqæ‡B? ®¾ÿþû¶¶î½÷Þ\ÔÍ£§Ÿ~ÚÖÞ:”zM!¼ÿüç?$ÓyåéÜ1b„Íùàù®ÐñÙgŸ]ê|=z´}7¾Oé¶mÛF D*°üâ‹/Úqz:'uîeZ¢ìwâĉîwÞ)y}È!ö¨àç7ÞháFŽUVkˆçŸoÞ¼¹½¦`²B×þ\?öØcÝqÇ—è˜riwÕ¸qãܤI“ì<…=O:é$[³ÒUðV|ذa¥ÚvÈ!‡¸Ö­[—ù‘­© f«ß’¶YAÒ×_ÝM›6­ä9íSáó6mÚ¸·ÞzË}øá‡%¯=÷Üs%ÿ}饗Z=êÜ”Eÿûßî±ÇsçœsŽëÙ³§«R¥Š;v¬ý¥Òêº ^ïUXJ‘^~ùe÷¯ýËÂB³g϶À€|õÕWRåU–òÔéÞ½»»óÎ;Ýu×]çÞ|óM{^¡H…{öØc DF¼©ÝÓ ïÚŽ ·Þz«[¸p¡»ûî»-àÕ¸qcתU+{ßE]d7ß?úè£v¬]»vµJCº™_7ÈÇ¡€“¶¯‚‚_Áªl«W¯¶Ê˜ªuúé§WªvúXÓÑ{õy…dšÈuþÆéÍG….Tiïè£v_ýuÖöÆióÔ©S-D¨ËСC-P¤àÛƒ>h¬ôYí·çr¶~бäkÇY+*z}+ôüˆs>$™ªÊæ×\‚(©ó\A\U;Õh~*¬¦s#õüÐÚ¢s_ïÓÜ×3kÖ,÷î»ïº'žx¾dªJ¨ý*¤©j‚:gÂS8SÕYŸ}öÙ´Ÿ‹º_ìT Vçá?ü`ïÕy—Zõ0Ýu`Μ9î½÷Þ³ ÷ñÇoq¬TðP•\ƒçgœ¾HÚît}¡@£*Ò~øáVÅRAN÷È‘#-|¨àfªE‹¹^xÁ‚žZúé' 4ª¯´–+ìeßqÆРAVWß UQXkò_|áfΜiÕxõœÂ“}ô‘ÏQGeß75ÿ|eÞ¨sPAHPt|QA_¥íÐCµŠgAª¼£`“nvWµ#O7žëÆöW_}Õ 0 ¤J”nVÒ ºå–[ì†úQ£FÙñíÚµ³°Ôúõë­‚Iéù.]ºXÊSóú믷í}èHU)õ§jC ©zPÒ  *²©týªÁ+Ê)X 0„Ú¥ÚU1µ«ÐÇæÉ'Ÿ´ÀšBªP¸Ë.»ä<ãôɧŸ~êÎ?ÿ|«z˜®*V.mVÂuëÖYQU´’fÍšY¸Ná<Í¥¤ã•­/²õƒ1ù˜Çq׊Š^ß =?âœIæG0 T„¹sçÚ£ªúõAabÍ]OóSç¿‚tíÛ·/5üñG ç)T§Š„é(h¨0 Bn;v´*†þ~ê©§ÜÒ¥KË|&Î~uÌú›?¾!¶SUÁ¨TñUë\0¨ ¾ë(´èƒqû"I»Ã(ü§m×­[×]vÙe%at­+:6…ÆU)RI…׃|@RO¯aÆîñÇ·j‹z>S¸=n›g̘a!H?V÷'µþéøvØa =ê;‚úGAH­í>gnÂmN€b³ýöÛÛ£ªŽe¢€Ž¨bO*Ó¿ªúxË–-³Ç° ¤‚Ž |¸»âŠ+"‡ ãÌߨ–SeÀ8!È8mV`C< :ñÄsË|ôE>æqܵ¢Ö·BÎ8çC’ùá×X*œ¡¢ø/Pè7…yEá½T lj‚o™|öÙgö¨j‚> ( íuèС`ûJçbjeD~T…ì¤Ç”¤ÝaTQš6mZ&´¨Š• 6®]»64T­¶Cþ9… õ™l×®¸mVhRŽ<òÈ’¤§ ‘ÁJã¹ÎM@8*B€¢£ âСC]÷îÝ­ê[çÎ]ëÖ­]•*UJ½ïË/¿´GUzíµ×J½æ«ƒ8þ¦Ý$æä“O¶àã Aƒ¬2Ú¶ÛnëxàœÛ£Jk©êÕ«gª:T( #tíÚÕ*á-_¾ÜŽcÕªUnĈÆJ GT–v•×±ª‚¨¯¥j„QBqæoTûî»o™JXùl³¯.¦y¢}y“'O¶ÇÔ\!ÎåBÏã¸kE1¬o…œq·$óC&)¶@46 ÖÍš5Ë**Ì«ðš€Á›ÿqUUõ!7ï—_~)õ½!ÿºŸóAÁ€`P>öUXEÄš5k–ÚW’cJÒî0~½ ÛŽÿ¾¦ Šª®¥m¢ Œß}÷]/2‰Ûf¬~­,äÜ„# FÁž 6¸Õ«W»êÕ«‡¾gÍš5ö¼¡]ÕyÞÿ}×£G7fÌ7vìX«Îóä“OºæÍ›—¼ïÛo¿µGUõ©V­Z™m7jÔÈzê©%ÿöûÈ0ðAHQÕ´\oxO§jÕÂ߯Y§NצM÷ /¸—_~Ù]|ñÅfÒ þª²'¬PLí*¯cU…'…'Mšäî½÷Þ’Š¡ÙD¿ÅÒæk®¹Æ=üðÃîöÛowß|ókÑ¢…=z´ÍµEçARùè‹|Ìã¸kE!ëüHw>$™~Mîr¥µûòË/·¹ªj‚ :o·ÝvîÌ3Ï´*ªâƒr‹/« ÔùÊ€é¬\¹ÒÓ}Ç “ýæ",p÷˜’´;S_Ô¨Q#ôu_=ñ×_¼MLª ™ÏqøùçŸ3k>ç& Í÷UºÂf›mæ6lhÕnt“÷~ûíú>_…-µªØ¡‡j7‰þùçîŽ;îpÏ<óŒkÕª•ý{Çw´÷èfòùó绺Æg=&ªdÁ‚¡¯ë¦yU‚T ¨nݺîõ×_·Šm§vZ¥…Æ S鿟þy{þ¢‹.b’f¡€ææâ#<âš5k¹ß¢Ìßb¡Êв×^{¹¾}ûÚŸÎßsÎ9ÇõéÓ'çpk>ú"×yw­(´}~øà©_s\¿Oˆ~X!UìØ±£ûá‡Üĉ­òŸ~èàºë®³*ªµjÕ²ÐîÙgŸ¶"a6I‚zùØo¾Å=¦$í£qP…_ÛÃ*ÕúÀ¥*tGå«9†m/—6«’¦ŽUIª4Ç™›€pÔÎãÃÇ}ýÇt}ô‘…ößÿÐ÷ì»ï¾îé§ŸvíÚµ³j<ãÇ/ymÏ=÷´ÇéÓ§G:0ò!TwÞy§›5k–ûË_þRRýïÚk¯-¹©¾¼øÐ‡n¶ÏÕÉ'Ÿl7Ü¿ýöÛ²zã7¬ÂQ.UþŠ¡]åE:…TA«S§NnÆŒ±>ŸiþKŸôïßß*"Nž<Ù}÷Ýwv>)|2xð`k¾dê‹lýë<Ž»V”—u~ø5¶ØB¨œ@Š òUý2Ùa‡ÜYgå8à«P=oÞ<{^UøD“ª]»vÚïéŽ-ûÍ·¸Ç”¤Ý™ö« zÿ¼~œ"•¾›ýöÛo¥žûý÷ß­r­d OÆm³ßÞÂ… #ßÉV•2ÝÜ„# æŠ+®°ÇÛo¿Ý½÷Þ{¥^S°GÁ2Ý0饗–Ü.S§N-³­5jØcðÆöóÏ?ßï½÷^÷ÓO?•ùÌš5kJÝ„~ÐAÙÍéK—.µ×‚€Te6…­z÷îíÚ´icÚtSºþ]žêÕ«gªæá‡vW_}u¤›ñ«T©â.¼ðB«¤¥êyj÷\P!ó!Ÿí*O'žx¢ëÕ«—….T/[È#êüÒ'åAscæÌ™ÖÆ)S¦X…*ý[¡·\Ç)j_dë‡\çqܵ"»îºËýãÿpëÖ­KüüÆ>?ć£>ø`.„È™BwZ4ç4ÿ<Ø|uØTaóSU~ÅW3TÀWÞ}÷ÝÐʆëׯÏZ…Òÿˆƒ~Ô!ø½Bß3^zé¥ÐÏäc¿ù÷˜’´;Œ¾—Éûï¿ïV¬XQêµiÓ¦¹E‹Y z=öã!C†” C~òÉ'VåQÃlAþ¤mÖ÷Ùàû¾Ô6‚!FÞ YF™›€pUéP(§Ÿ~º…–žyæwôÑG»–-[ÚäºQ~ܸqVµG7·+œäé†úÃ;ÌwÜqFT >øÀ¶Ñ°aC׬Y³’÷žrÊ)®uëÖnذaîC±`”ªÄéfzv^{í5÷øã»¶mÛÚû·Ùf« ÷å—_Ú¾÷Úk/{^7±+´© Ô}÷ÝçjÕªeÏ?øàƒî­·Þr<ð€m»¼B=jã /¼àzôèaÕïT)®[·n%6øÓŸþTÒ¶L.¾øbëcD²Šïv•§›o¾ÙMš4É5Êýù϶jxaâÌß(}R{ì1wÒI'¹[o½µÌk»ï¾»ÍàxD§8}¥r™Çq׊0_|ñ…»ñÆí¿O=õTwÄGÄ~~S˜âAj+«-·ÜÒæ¼BÄO>ù¤Ã »Íž=Ûí¼óÎeÈ °=úè£6?UuUAc·(Vظ~ýúö>­ûì³mç‘G±}èGtž*¬¦çÏ8㌴«EÛW`yÉ’%¶†¨¶*û)§çS-_ûÍ·¸Ç”¤Ýaô9}üúë¯m­Ñvõ]M×ý@EµjÕlýôƒ´ŸÏ>ûÌÞ«íh=׿õÞã?>ô3¹´YÁInìׯŸ«ö1gÎû^Û¾}û’mkŽ©‚ôèÑ£-¼«¾iÚ´©Û~ûí#ÍM@8‚  žzê)w 'XU´×_ÝþDÕzºwïnÕ}åQàᦛn²ªs Kz â<ôÐCnë­·.µý¡C‡º{î¹ÇÝÿýîÎ;ï,y^7Òëæy0ƒ´!GŽéºtébÏ©¢”BnªüwÞy畼WÁ$àô§ Ò›o¾™¶¾šnÚÏôœ§êVú {M¡Ì±cǺW_}ÕõéÓÇúD}åù Eº‰?Š8ÀUºÿ¨£ŽŠü¹bmWºýçëXÃ>³ùæ›»gŸ}Ö5nÜØ*P½òÊ+îÌ3Ï,³Í¸ó7SŸd:ö¸ý©Í Ú)8ô÷¿ÿݵhÑÂ*])dôÎ;ï¸þýûÛ9±÷Þ{—„Sœ¾È67¢ÎãLm»V¤RU2½WÕÇvÛm·DÏWôüHr>Ä €~õÕW4oРAä…ÎQ…ëtÓÜÓH¡"U²kÞ¼¹…)D!—/_n7ЇUÒqë¦øl7¨¯\¹Ò‚ºÑ=ÓsžEéBƒ²lÙ2«t¤öû€”nö×±ë¦{UŠê²Ë.s °Ö5×\kÜŠ±]éöŸ¯cM÷ÍK½–m>Æ¿a}’íØãöCX›UaMV­Z¹áÇ—y¯‚xªö¥ê¨;wN4ÿâôEº~ˆ3³õYÔµ"ŒZ/| 0éó9?âœqç‡ÜvÛmî–[nq={ö,8òAaHqõŸ·:ÏtíN Þ)„¬sG¯×©SÇæ}&«V­²ïzŸÞŸ­¢`ر©j«ŽÍ6ݱÅݯڢm…]WUÍZçsê:–îy¬aaÈ$}‘´Ýé¾'j] ŽoµMëªö¡¾QÕE«¾Ç…Ñ{TÕV?¼qýõ×çÜfmïÇ´cTÉtïÕšª6i{Á±‹;7Î~@‰Š ÜÔªU«¤jX6ºy~Ÿ}ö‰µýºuëÚ_&Ç{¬Ûyç-еpáB eêöLA$waÁ­°ç¼­¶Ú*ãöts½þ‚2Òüýúõ‹Ü/º±_UÝPõ£¸Š±]éöŸ¯cM÷ÍË(¡Ü¸ó7¬rÝSó¸IDATO²{Ü~kóܹsíQa½tsG|e¿$ó/N_¤ë‡8ó8[ŸE]+¤ Æ}¾"çGœó!îü_|уÕu|ÑùŸzî¦;ÏRSõ騴¶+T'9¶Ôýe dÇÙ¯Ú’îÇâ>ï5_}‘´Ýé¾oEùΕZ±Y•!3QPÓ·+mÖ>£\C´¦†}ψ;7ÿßõ….›ºvíêzôèáìºtéRiŽýã?v¯¿þºëÛ·¯Ûc="nРAVé¨M›6Y+qV¦v!wGy¤«Q£†9r¤»üòËÝ1ÇãöÜsO·dÉ;?jkÒ¤‰kÙ²e…S±ÏãM}~ÈçŸî¦OŸnC£†Þ P/^lÕgΜiÿVU]@åElr®ºê*w÷Ýw»aÆUª ¤* )xÔ¶mÛXŸ0`€=vìØq£jrW¿~}7|øp׳gO׿ûóªW¯î:wîìn¹å·å–[Vø8û<ÞÔç‡hMU¥³n¸ÎPáfÏžíÆŽkÿ½ÓN;Y˜Pymö¿÷îÝÛõêÕ‹Þ›Œ 6Ø£*Dnì¦Nj‡z(´TåoáÂ…nÅŠn×]wµÜ[lÁ}ºLžÓ¸qc tês2úûï¿-ðqÜqÇI“&M’~O]O?kKÚÙÔ Aª%JØÃ ó+V,îóLö=|¨Î…:;Ž ïÚ¹Ki‡¿–-[†Žÿúë¯)·Sñº‰v=1gíj}饗Úß9Ÿ}öYŽ¿@„’Ô 7ÜãçåË—[@ 33S† ÷ñìܹS8 »wïvý™O:ôšî©üÖ>ˆúGšËúõëeóæÍy~×a‡&Õ«Wú jüε§c*_¾¼ìÙ³GæÏŸoA¯³Î:Ëj™ŸíÛ·ËÂ… åŸþ‘*UªÈÉ'Ÿ\àûuÏ|ÿý÷¶Vº–¹Ã|Aß×üî‰ öfÐ÷Ýýõ—üûï¿ö³þû—_~±ß©uÌo̱æu¸©³—õ‹ö{/ž÷¼Dîñ-ZHFF†Lžú¨<ñÄ¡Ÿ{Y¹rå,¤—ߘcÅëðRg/ëçw$âž—È=®×¯þǾüòKû.®U«Ô! 1W\q…̘1C.¿ür iGÇ©S§Ê°aÃ,ñÓO?I©R¥,`Я_?:t¨Ü~ûíòÁØç5ü¤¡í¼¦Á?ýY;¾½ð òÛo¿ÉwÞiµ4(¨¨ÜM9fΜiçiÒ¤‰…›Ö¬Yc 6hÐ@Ú·oïk^ê™gž‘±cÇÚ{5ì’––&sæÌ±‡C碡í.§!‡Ûºx¥¡ÛÛo¿mA²ðNWÚÕN»S–.]Z.ºè"_õôRÓîÝ»G½¦áÜÔ; ;¯—yê:k@íHvî¹çÊï¿ÿhÍ5ȧ¡0 M™2ź¬i@hÔ¨QÖM?«çE Ú?÷Üsujshç¶ÇÜjtä‘Gú¾c5~½~4luá…ZøQ;Þi _{í5 ré¿zMi{ôèaÁ-íX§s8p Õ[;I¶k×.ô~½ö4zþùçÛgtº¾'N”®]»J‡½¯µ'¢Ù›Aß :wîlÝtÇouŸè=3÷^ÊoÌAóº¼ÔÙÏúyýÞKä=/Q{ÜéÀªã' AH ùä“O,¨¤É“'‡ŽŸ}öÙRš>}ºŒ7Î:J) ƒhÈçÃ?´€Q§N,¸²oß>ëZX¦L뎨÷ßß‚ ×]w¯€Ln?þø£uêÒ`‡CC‰ÌÒMx˜Â뼜ð¢†5œ.uõêÕ³ÎWŽŸþÙžÐE87uñJ»\éxµÓ•vØ ïÔ5kÖ,Ù¶m›GÃ\±®iÐkê¦Þ±PØy½Ìó»ï¾³°›väÔîfA×\»µiG:í Ú±cG;Ö¸qc "iˆI÷‡ß5ˆ¦þÚRíø¨æô³^ò{ ÆjübÓ ²vth(R×YϯZkHë¬ãÕ ˜£iÓ¦ ÓëùÝwß …àôzפ®‰^÷NgIí´×«W/9餓¿¯ÅrO¸KÐ÷‚úõëÛcþüù„ÔkJkž(^ö€W~ÖÏËþHõ{žß…! \qJ:PÚÉ07 ƒ( û84ЧÁ>Õ·o_yúé§íu G´nÝ:¦cÕUxB9ÝW®\Õ¼Ž8â{ÖÎSùÙ¸q£=G Bƪ.×^{­=¿ñÆ9ŽgggÛ³vóŠWMƒä¦ÞÉ~^ i5/ /5ÿå—_ìYƒ?áZ¶lõúUíÞ¦«¬¬,ëîÍ5«ñk¿ð¤jذ¡…ïvìØ!Ÿ}öYè¸v& À948¦V­Z:¦ANuÇw„Bíé÷D{ ÆrO$ò~L¼ì¯ü¬_´k’J÷<¿5r¾“#u¨ph£#$P„üúë¯öüꫯZ§«pëׯ·çÜá‚V­ZYÀïÍ7ß´ŽN2bĈ˜U;HåvÔQGÙó¿ÿþÕ¼4¼8eÊéׯŸu[ìÝ»·tèÐAÒÒÒBïÙ¼y³=çpŠE]ºté"wÞy§uÖÛºu«Õ@Ã[3g䀋(‹GMƒä¦ÞÉ~^íøW¶lÙ˜Õ¼Zµjö¬kÞ]páÂ…öœ;(ï:ìß¿_zöìi]O5•žžõ½%ãϯcªv•ûöÛoåÏ?ÿÌóÚ–-[dÞ¼y²víZÙ°aƒ¬[·ÎŽÿ÷ß¡÷8­:uêÄíŒåžHäý ¹Ù^ùY¿h×$•îy~kTµjU{>TºÜ# !Nèûï¿—’%Kæy½nݺҶmÛ<ÇÀŸÒnON¨!Þrwbó;¯FÉ—_~)ýû÷—?þXæÌ™#'žx¢¼òÊ+Ò¤I{O©R¥ìÙ DFt]*V¬(;v”‰'ÊÔ©S¥GÙ¹s§u‹,Q¢DÜj$7õ.JçõSó[o½Už}öYëf¨]èš5k&}ô‘í‡î¯DÖA»žjGHÝ“N·¶ î-ñ¿ܾ}{èØÁƒ-0¦Ý]wíÚ%¥K—¶kPÞÎë§3 Óq/×`,÷D"ïÉÄËð*¨õó²&©tÏó[#ç;9ÖAz©‡ $P„sÌ1òÇÈ믿. 4põ툥5È™™)ï¿ÿ¾ušj×®]JÏ«^½z¸X¾|¹<öØc2aÂiß¾½ý|ôÑG[F­^½:®uÑ ™!u.úïììl;Þ½{÷”Þ{…Õ»¨×+íú©jÖ¬)cÆŒ±G±bÅäòË/—‘#GFP‹¦ÚíqðàÁô>|x`×`<×ÑéJyì±Ç†Ž=òÈ#6Ÿ-ZXËéH§aÎÓO?ÝêïйÿóÏ?öˆ×¾‰õž€·=*ë—*÷<¿5rB×Îw48øVŠ5jÈ‚ ä‡~pV:t¨,[¶Lîºë.ëÖtÉ%—Èm·Ý&?þø£¤§§‡ÞçFvìØ‘órhðåµ×^“Ý»wˤI“dÞ¼yÒ¥K—P`Ä ]ø­‹W­Zµ’*UªÈ§Ÿ~jÁ•Ù³g[G¯Xt+L,Ö4¿zÇZAçMäÞu¼üòËÖ5qáÂ…ò×_Ɇ $+++ª½Tý{õêeÝòtŒ•*U ü rüTÔn~á*íê7þ|û·ÖÔ1mÚ4{Ön€µjÕ*ð¼Õ«W—%K–ØúœvÚiEjO$ê^ ¼ìT[¿d¿çù­‘óœl¡N‰WœEG×®]íù©§ž’M›6åy}Ïž=ÖéСA?í(¥Á<í×±cGë,µråJû9œvLSß}÷]ÒÏkÑ¢EyÞã/222ìYÃNÑNrúùp^ê¢´ÓØ-·Ü"kÖ¬)t.iiiÒ­[7Ù¿¿uÔs_}õÕ Ù/…­©Ûy¹©w,¸=o"÷®C×[C´ƒ ’o¾ùÆAúóºuëòýL<ê¯A% åj§Ó«®º*°kÐË5áeü‹/¶±hÒñÒK/YgדO>Y5j:îŒG;^†¯ÃsÏ=—ç|W\q…=?ýôÓ9æwàÀ;¦!³d؉ºx]ÓXs;/{ Ö/•îy~kä!µc'„£#$P„´iÓF:tè 3fÌ3Î8ÃvÚmpÛ¶mBx÷Ýw-4¤¡í¢vã7ZPDƒ>åÊ•³ß1jÔ(ùä“OdĈöy'ŒÐ¸qc™8q¢ôïßß:nÙ²Eúöí—îi^楬úõëKóæÍ-À˜™™)_}õ•L˜0Á:QéøÀŽk8pΜ92}út9r¤”*UJúõë›Ê=¶HÇó{¯Òn‰úˆôšÛyé¸î¿ÿ~ë6wîÜÐû4Œ1zôh)[¶lŽc„Ô°SŸ>}|ÕEi×+¥Á07N=õT ª,X°@Î>ûlןóRg75-hMÝÎËK½ƒäå¼~önÐ5×P’v1üßÿþ'Íš5“]»vÉæÍ›åóÏ?·®Œº×jÕª•#¨tý#Y?¯>úè£<¿[Çâ!½\ƒ^® ¯û§AƒrÇwÈM7ÝdÁEuÜqÇɘ1cäâ‹/Îñ^íÞª_Ç/ °`£^wÜÔ5¯ƒ®×üùó­Ë ÎSç¨*W®,?þ¸#ƒ¾¯ùÙ±ºv/ðzŸs~¿Û±y­©Û±xÙ^ÇáeýüÞ£qÏKäÿùçŸå·ß~³ÿ0AµjÕøÃ@úŸ:8¨7vm3 ¹íܹÓÂN ¡0Y½zµdddX¨@C 턵uëV 1…wQsh§7 Oä6lܸÑ:qi7§ðàÅöíÛ¥téÒrØa9ûµD:žß{•% …4/‡e4ä¸{÷nk… ò¼Gƒ"ôjÒ¤‰3üÔEÃŒúy NF “å§gÏž2nÜ8 µÜzë­®?ç¥ÎnkiM½ÎËM½ó£ ¡mÚ´I*V¬˜çuí~¦5 S½zußçõºwƒªù¢E‹¬»Yûöíå½÷ÞËó^ -i7Eí:ªëbYÿÂæêVA× Ÿk¢°ñk÷Ô† J«V­,„¬ï_¶l™qÄR¥J•B¯í2wüñÇÛu­ô<º>‘î¥ÜÒ@–¾¦A»H÷hïk~öD<îù]#~ö£ž'R:Úï ?ûËËp3?ëí÷^<ïy‰Úã<òˆ 8ÐB«áakÐü#!€’žžîéý™™™öˆDÃ=(Ê•+ñx¥J•ì‘[~ÿ"/¨K`™2e¢š—Cµk×.ð=çwžT­ZÕ‚5kÖ¬‘cŽ9Æs]4¸qøá‡Ë‹/¾èz]4h©²4€Ó¥KOkê¥ÎnkiM½ÎËM½cÁËy½îÝ j¾bÅ {Öm~ûA…w@‹UýƒêÐYÐ5èçšðºôýuêÔq}Íä~¯†¹ò£!¯SN9Ås½\ƒ~öD<îù]#~ö£×±¹­©Ÿýåe¸‡Ÿõ‹ö{/ž÷¼Díñ·ß~۞û1@è»è=EÿxoÞ¼9ÕpÈѤvkÓ¦œuÖYž>»nÝ: ã´k×κչ¥ÝífΜia–+¯¼2éjâw^~?^V­ZeõÐþÿÛ5d§5Ò.¨úšv"ëÓ§OÄŽ‘ÉN?cÆŒ‘½{÷J½zõ,ø£wìØ!K—.•%K–X ÷ú믷ùÇ»þ©°w´«Ýرc¥FÒ­[·”¿ïx݇Òý Æ’Jë—*5Ú°aƒuHÎÊÊ’îÝ»óÇ €æÎ+ÿ!À<Å@xIEND®B`‚charm-2.1.1/src/github.com/ajstarks/svgo/newsvg0000775000175000017500000000077712672604563020506 0ustar marcomarco#!/bin/sh if test $# -lt 1 then echo "specify a file" exit 2 fi if test ! -f $1 then cat < $1 package main import ( "github.com/ajstarks/svgo" "os" ) var ( width = 500 height = 500 canvas = svg.New(os.Stdout) ) func background(v int) { canvas.Rect(0, 0, width, height, canvas.RGB(v, v, v)) } func main() { canvas.Start(width, height) background(255) // your code here canvas.Grid(0, 0, width, height, 10, "stroke:black;opacity:0.1") canvas.End() } ! fi $EDITOR $1 charm-2.1.1/src/github.com/ajstarks/svgo/webfonts/0000775000175000017500000000000012672604563021063 5ustar marcomarcocharm-2.1.1/src/github.com/ajstarks/svgo/webfonts/webfonts.go0000664000175000017500000000247312672604563023247 0ustar marcomarco// webfonts demo // +build !appengine package main import ( "fmt" "io/ioutil" "net/http" "net/url" "os" "strings" "github.com/ajstarks/svgo" ) var ( canvas = svg.New(os.Stdout) width = 500 height = 1100 fontlist = "Sue Ellen Francisco|Over the Rainbow|Pacifico|Inconsolata|Miltonian|Megrim|Monofett|Permanent Marker|Homemade Apple|Ultra" ) const ( gwfURI = "http://fonts.googleapis.com/css?family=" fontfmt = "\n" gfmt = "fill:white;font-size:36pt;text-anchor:middle" ) func googlefont(f string) []string { empty := []string{} r, err := http.Get(gwfURI + url.QueryEscape(f)) if err != nil { return empty } defer r.Body.Close() b, rerr := ioutil.ReadAll(r.Body) if rerr != nil || r.StatusCode != http.StatusOK { return empty } canvas.Def() fmt.Fprintf(canvas.Writer, fontfmt, b) canvas.DefEnd() return strings.Split(fontlist, "|") } func main() { canvas.Start(width, height) canvas.Title("Webfonts") if len(os.Args) > 1 { fontlist = os.Args[1] } fl := googlefont(fontlist) canvas.Rect(0, 0, width, height) canvas.Ellipse(width/2, height+50, width/2, height/5, "fill:rgb(44,77,232)") canvas.Gstyle(gfmt) for i, f := range fl { canvas.Text(width/2, (i+1)*100, "Hello, World", "font-family:"+f) } canvas.Gend() canvas.End() } charm-2.1.1/src/github.com/ajstarks/svgo/colortab/0000775000175000017500000000000012672604563021041 5ustar marcomarcocharm-2.1.1/src/github.com/ajstarks/svgo/colortab/colortab.go0000664000175000017500000000471312672604563023202 0ustar marcomarco// colortab -- make a color/code placemat // +build !appengine package main import ( "bufio" "flag" "fmt" "os" "strings" "github.com/ajstarks/svgo" ) func main() { var ( canvas = svg.New(os.Stdout) filename = flag.String("f", "svgcolors.txt", "input file") fontname = flag.String("font", "Calibri,sans-serif", "fontname") outline = flag.Bool("o", false, "outline") neg = flag.Bool("n", false, "negative") showrgb = flag.Bool("rgb", false, "show RGB") showcode = flag.Bool("showcode", true, "only show colors") circsw = flag.Bool("circle", true, "circle swatch") fontsize = flag.Int("fs", 12, "fontsize") width = flag.Int("w", 1600, "width") height = flag.Int("h", 900, "height") rowsize = flag.Int("r", 32, "rowsize") colw = flag.Int("c", 320, "column size") swatch = flag.Int("s", 16, "swatch size") gutter = flag.Int("g", 11, "gutter") err error colorfmt, tcolor, line string ) flag.Parse() f, oerr := os.Open(*filename) if oerr != nil { fmt.Fprintf(os.Stderr, "%v\n", oerr) return } canvas.Start(*width, *height) canvas.Title("SVG Color Table") if *neg { canvas.Rect(0, 0, *width, *height, "fill:black") tcolor = "white" } else { canvas.Rect(0, 0, *width, *height, "fill:white") tcolor = "black" } top := 32 left := 32 in := bufio.NewReader(f) canvas.Gstyle(fmt.Sprintf("font-family:%s;font-size:%dpt;fill:%s", *fontname, *fontsize, tcolor)) for x, y, nr := left, top, 0; err == nil; nr++ { line, err = in.ReadString('\n') fields := strings.Split(strings.TrimSpace(line), "\t") if nr%*rowsize == 0 && nr > 0 { x += *colw y = top } if len(fields) == 3 { colorfmt = "fill:" + fields[1] if *outline { colorfmt = colorfmt + ";stroke-width:1;stroke:" + tcolor } if *circsw { canvas.Circle(x, y, *swatch/2, colorfmt) } else { canvas.CenterRect(x, y, *swatch, *swatch, colorfmt) } canvas.Text(x+*swatch+*fontsize/2, y+(*swatch/4), fields[0], "stroke:none") var label string if *showcode { if *showrgb { label = fields[1] } else { label = fields[2] } canvas.Text(x+((*colw*4)/5), y+(*swatch/4), label, "text-anchor:end;fill:gray") } } y += (*swatch + *gutter) } canvas.Gend() canvas.End() } charm-2.1.1/src/github.com/ajstarks/svgo/colortab/svgcolors.txt0000775000175000017500000001022712672604563023630 0ustar marcomarcoaliceblue #F0F8FF 240,248,255 antiquewhite #FAEBD7 250,235,215 aqua #00FFFF 0,255,25 aquamarine #7FFFD4 127,255,21 azure #F0FFFF 240,255,255 beige #F5F5DC 245,245,220 bisque #FFE4C4 255,228,196 black #000000 0,0,0 blanchedalmond #FFEBCD 255,235,205 blue #0000FF 0,0,255 blueviolet #8A2BE2 138,43,226 brown #A52A2A 165,42,42 burlywood #DEB887 222,184,135 cadetblue #5F9EA0 95,158,160 chartreuse #7FFF00 127,255,0 chocolate #D2691E 210,105,30 coral #FF7F50 255,127,80 cornflowerblue #6495ED 100,149,237 cornsilk #FFF8DC 255,248,220 crimson #DC143C 220,20,60 cyan #00FFFF 0,255,255 darkblue #00008B 0,0,139 darkcyan #008B8B 0,139,139 darkgoldenrod #B8860B 184,134,11 darkgray #A9A9A9 169,169,169 darkgreen #006400 0,100,0 darkgrey #A9A9A9 169,169,169 darkkhaki #BDB76B 189,183,107 darkmagenta #8B008B 139,0,139 darkolivegreen #556B2F 85,107,47 darkorange #FF8C00 255,140,0 darkorchid #9932CC 153,50,204 darkred #8B0000 139,0,0 darksalmon #E9967A 233,150,122 darkseagreen #8FBC8F 143,188,143 darkslateblue #483D8B 72,61,139 darkslategray #2F4F4F 47,79,79 darkslategrey #2F4F4F 47,79,79 darkturquoise #00CED1 0,206,209 darkviolet #9400D3 148,0,211 deeppink #FF1493 255,20,147 deepskyblue #00BFFF 0,191,255 dimgray #696969 105,105,105 dimgrey #696969 105,105,105 dodgerblue #1E90FF 30,144,255 firebrick #B22222 178,34,34 floralwhite #FFFAF0 255,250,240 forestgreen #228B22 34,139,34 fuchsia #FF00FF 255,0,255 gainsboro #DCDCDC 220,220,220 ghostwhite #F8F8FF 248,248,255 gold #FFD700 255,215,0 goldenrod #DAA520 218,165,32 gray #808080 128,128,128 green #008000 0,128,0 greenyellow #ADFF2F 173,255,47 grey #808080 128,128,128 honeydew #F0FFF0 240,255,240 hotpink #FF69B4 255,105,180 indianred #CD5C5C 205,92,92 indigo #4B0082 75,0,130 ivory #FFFFF0 255,255,240 khaki #F0E68C 240,230,140 lavender #E6E6FA 230,230,250 lavenderblush #FFF0F5 255,240,245 lawngreen #7CFC00 124,252,0 lemonchiffon #FFFACD 255,250,205 lightblue #ADD8E6 173,216,230 lightcoral #F08080 240,128,128 lightcyan #E0FFFF 224,255,255 lightgoldenrodyellow #FAFAD2 250,250,210 lightgray #D3D3D3 211,211,211 lightgreen #90EE90 144,238,144 lightgrey #D3D3D3 211,211,211 lightpink #FFB6C1 255,182,193 lightsalmon #FFA07A 255,160,122 lightseagreen #20B2AA 32,178,170 lightskyblue #87CEFA 135,206,250 lightslategray #778899 119,136,153 lightslategrey #778899 119,136,153 lightsteelblue #B0C4DE 176,196,222 lightyellow #FFFFE0 255,255,224 lime #00FF00 0,255,0 limegreen #32CD32 50,205,50 linen #FAF0E6 250,240,230 magenta #FF00FF 255,0,255 maroon #800000 128,0,0 mediumaquamarine #66CDAA 102,205,170 mediumblue #0000CD 0,0,205 mediumorchid #BA55D3 186,85,211 mediumpurple #9370DB 147,112,219 mediumseagreen #3CB371 60,179,113 mediumslateblue #7B68EE 123,104,238 mediumspringgreen #00FA9A 0,250,154 mediumturquoise #48D1CC 72,209,204 mediumvioletred #C71585 199,21,133 midnightblue #191970 25,25,112 mintcream #F5FFFA 245,255,250 mistyrose #FFE4E1 255,228,225 moccasin #FFE4B5 255,228,181 navajowhite #FFDEAD 255,222,173 navy #000080 0,0,128 oldlace #FDF5E6 253,245,230 olive #808000 128,128,0 olivedrab #6B8E23 107,142,35 orange #FFA500 255,165,0 orangered #FF4500 255,69,0 orchid #DA70D6 218,112,214 palegoldenrod #EEE8AA 238,232,170 palegreen #98FB98 152,251,152 paleturquoise #AFEEEE 175,238,238 palevioletred #DB7093 219,112,147 papayawhip #FFEFD5 255,239,213 peachpuff #FFDAB9 255,218,185 peru #CD853F 205,133,63 pink #FFC0CB 255,192,203 plum #DDA0DD 221,160,221 powderblue #B0E0E6 176,224,230 purple #800080 128,0,128 red #FF0000 255,0,0 rosybrown #BC8F8F 188,143,143 royalblue #4169E1 65,105,225 saddlebrown #8B4513 139,69,19 salmon #FA8072 250,128,114 sandybrown #F4A460 244,164,96 seagreen #2E8B57 46,139,87 seashell #FFF5EE 255,245,238 sienna #A0522D 160,82,45 silver #C0C0C0 192,192,192 skyblue #87CEEB 135,206,235 slateblue #6A5ACD 106,90,205 slategray #708090 112,128,144 slategrey #708090 112,128,144 snow #FFFAFA 255,250,250 springgreen #00FF7F 0,255,127 steelblue #4682B4 70,130,180 tan #D2B48C 210,180,140 teal #008080 0,128,128 thistle #D8BFD8 216,191,216 tomato #FF6347 255,99,71 turquoise #40E0D0 64,224,208 violet #EE82EE 238,130,238 wheat #F5DEB3 245,222,179 white #FFFFFF 255,255,255 whitesmoke #F5F5F5 245,245,245 yellow #FFFF00 255,255,0 yellowgreen #9ACD32 154,205,50 charm-2.1.1/src/github.com/ajstarks/svgo/funnel/0000775000175000017500000000000012672604563020523 5ustar marcomarcocharm-2.1.1/src/github.com/ajstarks/svgo/funnel/funnel.go0000664000175000017500000000103712672604563022342 0ustar marcomarco// funnel draws a funnel-like shape // +build !appengine package main import ( "os" "github.com/ajstarks/svgo" ) var canvas = svg.New(os.Stdout) var width = 320 var height = 480 func funnel(bg int, fg int, grid int, dim int) { h := dim / 2 canvas.Rect(0, 0, width, height, canvas.RGB(bg, bg, bg)) for size := grid; size < width; size += grid { canvas.Ellipse(h, size, size/2, size/2, canvas.RGBA(fg, fg, fg, 0.2)) } } func main() { canvas.Start(width, height) canvas.Title("Funnel") funnel(0, 255, 25, width) canvas.End() } charm-2.1.1/src/github.com/ajstarks/svgo/tsg/0000775000175000017500000000000012672604563020031 5ustar marcomarcocharm-2.1.1/src/github.com/ajstarks/svgo/tsg/tsg.go0000664000175000017500000000525612672604563021165 0ustar marcomarco// tsg -- twitter search grid // +build !appengine package main import ( "encoding/xml" "flag" "fmt" "io" "net/http" "net/url" "os" "github.com/ajstarks/svgo" ) var canvas = svg.New(os.Stdout) // Feed is the Atom feed structure type Feed struct { XMLName xml.Name `xml:"http://www.w3.org/2005/Atom feed"` Entry []Entry `xml:"entry"` } // Entry defines an entry within an Aton feed type Entry struct { Link []Link `xml:"link"` Title string `xml:"title"` Author Person `xml:"author"` } // Link defines a link within an Atom feed type Link struct { Rel string `xml:"rel,attr"` Href string `xml:"href,attr"` } // Person defines a person responsible for the tweet type Person struct { Name string `xml:"name"` } // Text defines the text of the tweet type Text struct { Type string `xml:",attr"` Body string `xml:",chardata"` } var ( nresults = flag.Int("n", 100, "Maximum results (up to 100)") since = flag.String("d", "", "Search since this date (YYYY-MM-DD)") ) const ( queryURI = "http://search.twitter.com/search.atom?q=%s&since=%s&rpp=%d" textfmt = "font-family:Calibri,Lucida,sans;fill:gray;text-anchor:middle;font-size:48px" imw = 48 imh = 48 ) // ts dereferences the twitter search URL and reads the XML (Atom) response func ts(s string, date string, n int) { r, err := http.Get(fmt.Sprintf(queryURI, url.QueryEscape(s), date, n)) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) return } if r.StatusCode == http.StatusOK { readatom(r.Body) } else { fmt.Fprintf(os.Stderr, "Twitter is unable to search for %s (%s)\n", s, r.Status) } r.Body.Close() } // readatom unmarshals the twitter search response and formats the results into a grid func readatom(r io.Reader) { var twitter Feed err := xml.NewDecoder(r).Decode(&twitter) if err == nil { tgrid(twitter, 25, 25, 50, 50, 10) } else { fmt.Fprintf(os.Stderr, "Unable to parse the Atom feed (%v)\n", err) } } // tgrid makes a clickable grid of tweets from the Atom feed func tgrid(t Feed, x, y, w, h, nc int) { var slink, imlink string xp := x for i, entry := range t.Entry { for _, link := range entry.Link { switch link.Rel { case "alternate": slink = link.Href case "image": imlink = link.Href } } if i%nc == 0 && i > 0 { xp = x y += h } canvas.Link(slink, slink) canvas.Image(xp, y, imw, imh, imlink) canvas.LinkEnd() xp += w } } // for every non-flag argument, make a twitter search grid func main() { flag.Parse() width := 550 height := 700 canvas.Start(width, height) for _, s := range flag.Args() { canvas.Title("Twitter search for " + s) ts(s, *since, *nresults) canvas.Text(width/2, height-50, s, textfmt) } canvas.End() } charm-2.1.1/src/github.com/ajstarks/svgo/android/0000775000175000017500000000000012672604563020654 5ustar marcomarcocharm-2.1.1/src/github.com/ajstarks/svgo/android/android.go0000664000175000017500000000323412672604563022625 0ustar marcomarco// android draws bugdroid, the Android mascot // +build !appengine package main import ( "fmt" "os" "github.com/ajstarks/svgo" ) var ( width = 500 height = 500 canvas = svg.New(os.Stdout) ) const androidcolor = "rgb(164,198,57)" func background(v int) { canvas.Rect(0, 0, width, height, canvas.RGB(v, v, v)) } func android(x, y int, fill string, opacity float64) { var linestyle = []string{`stroke="` + fill + `"`, `stroke-linecap="round"`, `stroke-width="5"`} globalstyle := fmt.Sprintf("fill:%s;opacity:%.2f", fill, opacity) canvas.Gstyle(globalstyle) canvas.Arc(x+30, y+70, 35, 35, 0, false, true, x+130, y+70) // head canvas.Line(x+60, y+25, x+50, y+10, linestyle[0], linestyle[1], linestyle[2]) // left antenna canvas.Line(x+100, y+25, x+110, y+10, linestyle[0], linestyle[1], linestyle[2]) // right antenna canvas.Circle(x+60, y+45, 5, "fill:white") // left eye canvas.Circle(x+100, y+45, 5, `fill="white"`) // right eye canvas.Roundrect(x+30, y+75, 100, 90, 10, 10) // body canvas.Rect(x+30, y+75, 100, 80) canvas.Roundrect(x+5, y+80, 20, 70, 10, 10) // left arm canvas.Roundrect(x+135, y+80, 20, 70, 10, 10) // right arm canvas.Roundrect(x+50, y+150, 20, 50, 10, 10) // left leg canvas.Roundrect(x+90, y+150, 20, 50, 10, 10) // right leg canvas.Gend() } func main() { canvas.Start(width, height) canvas.Title("Android") background(255) android(100, 100, androidcolor, 1.0) canvas.Scale(3.0) android(50, 50, "gray", 0.5) canvas.Gend() canvas.Scale(0.5) android(100, 100, "red", 1.0) canvas.Gend() canvas.End() } charm-2.1.1/src/github.com/ajstarks/svgo/imfade/0000775000175000017500000000000012672604563020461 5ustar marcomarcocharm-2.1.1/src/github.com/ajstarks/svgo/imfade/imfade.go0000664000175000017500000000111712672604563022235 0ustar marcomarco// imfade progressively fades the Go gopher image // +build !appengine package main import ( "fmt" "os" "github.com/ajstarks/svgo" ) var canvas = svg.New(os.Stdout) func main() { width := 768 height := 128 image := "gophercolor128x128.png" if len(os.Args) > 1 { image = os.Args[1] } canvas.Start(width, height) canvas.Title("Image Fade") opacity := 1.0 for i := 0; i < width-128; i += 100 { canvas.Image(i, 0, 128, 128, image, fmt.Sprintf("opacity:%.2f", opacity)) opacity -= 0.10 } canvas.Grid(0, 0, width, height, 16, "stroke:gray; opacity:0.2") canvas.End() } charm-2.1.1/src/github.com/ajstarks/svgo/imfade/gophercolor128x128.png0000664000175000017500000003021112672604563024365 0ustar marcomarco‰PNG  IHDR€€Ã>aË DiCCPICC Profilex–wT×ÇßÌl/´]–"eé½·¤.½H•& ËîKYÖe°7D"ŠˆV$(bÀh(+¢Xì "J F•ÌÆõ÷;'ùýNÞw>ó}÷žwçÞûÎ(!a¬@¶P"Žô÷fÆÅ'0ñ½D€6p¸¹¢Ð(¿h€®@_63u’ñ_ àõ-€Z®[„3™éÿïC‘+K€ÂÑ;?—‹r!ÊYù‘LŸD™ž’)c#c1š ʪ2NûÄæú|bOó²…<ÔG–³ˆ—Í“qÊó¤|”‘”‹òü|”o ¬Ÿ%Í üez6Ÿ“ †"Ó%|n:ÊÖ(SÄÑ‘l”ç@ ¤}Å)_±„_€æ ;G´D,HK—0¹&Lgg3€ŸŸÅ—H,Â9ÜLŽ˜Çdçd‹8Â%|úfYP’Õ–‰ÙÑÆÙÑÑÂÖ-ÿçõ›Ÿ½þd½ýäñ2âÏžAŒž/Ú—Ø/ZN-¬)´6[¾h);h[€êÝ/šþ>ä híûê{²yI—HD.VVùùù–>×RVÐÏë:|öü{øêù ¢‚¡[£°R¡Fá„ ”"MÑF1L1[±Tñ°âeÅ'Jx%C%_%žR¡Ò¥óJ#4„¦GcÓ¸´u´:ÚÚ(G7¢Ò3è%ôïè½ôIe%e{ååååSÊC „aÈdd1ÊÇ·ïT4U¼Tø*›TšTT¦Uç¨zªòU‹U›Uoª¾ScªùªeªmUkS{ ŽQ7UPÏWߣ~A}b}Žëîœâ9ÇæÜÕ€5L5"5–iÐèјÒÔÒô×iîÔ<¯9¡ÅÐòÔÊЪÐ:­5®MÓv×hWhŸÑ~ÊTfz1³˜UÌ.椎†N€ŽTg¿N¯ÎŒ®‘î|ݵºÍºôHz,½T½ ½N½I}mýPýåúúw ˆ,ƒtƒÝÓ†F†±† Û Ÿ©-5j4ºoL5ö0^l\k|ÃgÂ2É4ÙmrÍ6u0M7­1í3ƒÍÍf»ÍúͱæÎæBóZóA Š…—EžE£Å°%Ã2Är­e›ås+}««­VÝV­¬³¬ë¬ïÙ(ÙÙ¬µé°ùÝÖÔ–k[c{ÃŽjçg·Ê®Ýî…½™=ß~ýmšC¨Ã‡N‡ŽNŽbÇ&Çq'}§d§]Nƒ,:+œUʺäŒuöv^å|Òù­‹£‹Äå˜Ëo®®™®‡]ŸÌ5šËŸ[7wÄM×ã¶ßmÈéžì¾Ï}ÈCǃãQëñÈSÏ“çYï9æeâ•áuÄë¹·µ·Ø»Å{šíÂ^Á>ëƒøøûûôú*ùÎ÷­ö}è§ë—æ×è7éïà¿Ìÿl6 8`kÀ` f 7°!p2È)hEPW0%8*¸:øQˆiˆ8¤# ÝzžÁ<á¼¶0¶-ìA¸Qøâð#pá5#m"—GvGÑ¢’¢G½ŽöŽ.‹¾7ßx¾t~gŒ|LbLCÌt¬OlyìPœUÜŠ¸«ñêñ‚øö|BLB}ÂÔßÛŒ&:$%ÞZh´°`áåEꋲJ’Oâ$OÆ&Ç&N~Ï ãÔr¦RSv¥LrÙÜÜgIsKÛ–6žî‘^™>!` ª/22öfLg†e̜͊ÍjÎ&d'gŸ* 3…]9Z99ý"3Q‘hh±Ëâí‹'ÅÁâú\(wan»„ŽþLõH¥ë¥Ãyîy5yoòcò( z–˜.Ù´dl©ßÒo—a–q—u.×Y¾fùð ¯ûWB+SVv®Ò[U¸jtµÿêCkHk2×ü´ÖzmùÚWëb×uj®.Y￾±H®H\4¸ÁuÃÞ˜‚½›ì6íÜô±˜W|¥Äº¤²ä})·ôÊ76ßT}3»9uso™cÙž-¸-Â-·¶zl=T®X¾´|d[è¶Ö fEqÅ«íIÛ/WÚWîÝAÚ!Ý1TRÕ¾Sç–ï«Ó«oÖx×4ïÒØµi×ônÞî=ž{šöjî-ÙûnŸ`ßíýþû[k k+àäx\S×ý-ëÛ†zõú’ú…‡Eêjpjh8¬q¸¬n”6ŽI§¾ökÎ×:úçöŸð8wÝçúÅ7®Þœw³ÿÖü[·‡nón?¹“uçÅݼ»3÷VßÇÞ/~ ð ò¡ÆÃÚŸM~nr:5ì3Üó(êѽîȳ_ry?Zø˜ú¸rL{¬á‰í““ã~ãמ.x:úLôlf¢èWÅ_w=7~þÃož¿õLÆM޾¿˜ý½ô¥Ú˃¯ì_uN…O=|ýzfºøÚ›CoYo»ßž›É_õÁäCÇÇà÷g³ggÿ˜óüI°)˜ pHYs\F\F”CA IDATxí] \é÷ÿvÛ÷´/B I(EÉ.²/cÉc0ó7ŒÁ0»Y3fùÍ ƒ†Æ6Ã0ö-k¡, ¥’J*´ïõ?çM$Ê­»t¯œÏç­{ïû¼ÏzÞóœýQAÃU¶]ºti?š‚ûô?ãÑçóO¥ŒTDct¤ËÓÔØ¸­•¹±«–¦š…±ž‘­µ¹¾Šš¥%¥eÙ¹÷SÓ3o¤=È<õÆ­“Tþ<]Ét½,©€]ÜF$]çèJ¥K)àeE€Æ4ûnjj^Í›Y÷vwqhÓªES]ÖŽ°±4EII)²²s‘˜’Šôû™(£Âfưob…ÂÂbž¾ˆðȘ´ð«·Â£ã6Óíèʧ«2t331™ekiÒÍÂÌÈT$RAúƒL\¼{7??ÿ ù9Pp³òŠøùeBšàA†úú~®­ì»¹¶´3îäÑ Í[À¤‘4ÔÕq;éÖn @hÄuä䣑‰9TTU‘œ”Œ¬Ì 42Іw»–˜<º?œìm AÒq6< ûŽ…ž;ra Õÿ7]™€è«~Ý=æÎšôššµ…)Õ­&¬mIi)¢bðïž ì=šœ—Ÿÿ1P¶PaSHx€÷ñÉv­ßé×ãeÿ„7Y¶¤¤***ÈÊÉÃoëvbÏÑ30·n‚/>ÿžžž°³k555dff"99—.aûöˆ¸§&&xïÍ‘„@–x‘…]GNãM"’RÒ"¦Œè?ç­jÅÅ%D=Ê Jm¡¬ Eô]MU$|¿på&¯Ùްˆë?~Ÿ[K”Ttéರ»Wk·~ÝÚÃÜÔEE%(¦…gà73‰Þâ9_¯@‰š>fΜÿ±ÐÓÓ«q-“ðÕ‚8x3ß„žÞí„òLA¾øyôõ´ñÃ'oAOGñwRp#6^í] I”†)††:òò 0ýËe8~m"U°®ÆFë馲"€§{k§ÏÇí9 w—vª<Ù…EE(-}Bi5é·›q‰˜ýõJø¿ñ>úðƒ.|Õ5Ø _÷Ço_½ƒÖNvü’#;7OXT]m-´jÞ[ö€…ucA¥à!~þìmèhi¢” kiªãÔ¹HL»tcQIÑëUëW„ï,)ÑþûÉ›£ú­š7k\ëÖNÍD%¥ôÆ3)~²öDÖUM‹ÿöÜÅxkêLÌŸ÷%½‘µgóæŽ01³À—À·«»@Q´45Сm üòׄœ¿JÔ¦ ¼½q/%aá—¡§« ¯vÎb„dj|>/3+ç/ê@¥^Öº;2y@™À§™å¶OÞ5r’_uÞwyá«“4F†é_,©µþZ³º|®Zðßããã¢s§N8xä8®\¹ŸNíPPXcC}"œ:wÙ9¹¸yó&îÞ½+Ô˜WP€¡½½…•fþ£ˆ¶£íÁE™9«¨@Ñ š•ûm–‘ˆÅVûf@OÏýü0ÛyXŸÎDܗ>·ß¼°wóv*–ÿö+ÔiO® „††¢ÿþhÓ¦ Z·nÉ“'£Cì8‚øÄ{„L"¡ýNî­ðÇw³Ñ¶¥½€(eHKófÔf¹TÀí2Bª@5‹þówEE§mÛ:;®ž;}Ôä©þÕ˜éâů ˜[ôÇŒ3^¯Ý¶{äÈañ¯]»Fú€Bdgg#** ÙYY$6B_[M`ô˜ò°.Á´ˆAaq1.GÅÂÜÄßü&íýD…Ê¥ƒÄ”4lÞs|{~AÁîšú]_÷•‘x¶pük½ƒ[0½_âðYĪàî«›,&¹´×"9= “&M¬®Xµ¿/]º999ï‹D"˜˜˜àÆpsuÅ­ÛÉÂ]Q€‘QOWŸ¿ëoÞŸ(lÇÏ^‚*=Ç ®®ŠC'Γ™qºâEûÿ„V)DÏzP‚Æêhi}2ï½ñ­†Ð^Êo[~A¡X½cm\JÚhjéÁ¶1+kûxÅS...8pà „®]»bï¶¿‘G” 2ðVĿРê¤S˜ûÓçð£ïDEÎ_‰N§ò'*?£HŸˆè¹Ðâï%uìú%ó¦µêÛIx£X¦øÍ»”}èè°­§vи ÒÄÄÄ OŸ>¸sç|zöDVn¡ B¥© yùÜ»#~[0ƒDÃã8rÄøáÆ­Ä@*_µ¼¢|W PF³©6‰$ì…¾]=,çÍKscáÍ×Öz"º±HUD{meY¿ê$òOocéê3fÌÀÞ½{?Ÿ››‹ÈÈHL›6 ŽP! Ãò}uÀB7Ï6piÑŸÿ¼ë¶–¥?ÌÝQ]yEø½¾€_e.1Sß|8ÅC|½i¿ÌƱS™Ù‘¹Ž5zÎŽMо­¬,Lˆ,y®Àª_‡f6ÈÊ8.¨w Ø< >ô¤·< þù'®^½*H&LÀ»ï¾‹Ä¤$h‘„Á ¦êQ€Ì?„ ôðÉÔ1˜ôáOT´°ªIüÉ¡d}"€1 ú{ÿî~½=†ú:X½y?þ ž‘)\Z·®¥-qÓ¥7~ðüml:pn-¬1a¸/Q a²+Ä/ž«bâÌYw¯¥VŠÈkQèèåYë)ôññ_ÅDm˜ ä‹!::%EÐÑ&-_%ãó`†µ©9~ùršhʧ¿¬ÊÈ*¼Aˆpåyeëû·úBS=½ÍOóë5z`ì:| ËÖí†w·^øûßípum --­gæ&…´mÿmÛŽ¯–¯GÏöÍ1Ч£ð–VÖ ðÙ76GÀÁƒuB€ŠFÙHTvíÚ…V‰©‹Å”2%pkå€w^lòÝòM+k_ )·rŠð™H°ÜÁÅÜ´Ñú…Lrïѱ-¾^ºûƒ/aÙÒe3f´XI&­Û¼y_!5î¾zoí"*‚ÜÍóvÁ[ÈúýçqæÌiÁ(V¥5Ê!^ sG/|5c$›Ú¼P­¨ŠyžàÙ_ÿŽ£!á_ÑÇù÷忼¥›F†»ÿ÷É[î}ºzàÛßþÁ®c°g÷n±Ÿ'ÎÊÒ«V®€wïaX¾q·°/WLhÉæ;´jq9Zñ³Dÿ×®]=ZØ5{ñ¹AÞžx ù|†?m –z¶”¨#2xXžš@m-MÍ­ß}4Ù;ÿì:ŠÕ[b×Î]èÒ¥³ØCc ]\\œÀúúöÆÒ•ëPœ›‰V-š Ú9®ˆU±Nv6˜ûõÏhïékk±ë¯Z044 ?~;s§ú‘aGç1¥©Z®ºï,5°C ù h g;ô®êÊÖÇïr¤¢ùÓ_ìÓ·›î$§ÒâÂßÿlB÷îÝÄ7sèÝ»wGÛ¶m==‹gÆÀ¯öNò9+Z;Ùct¿ð;·Än£rÁsç/Àoäp¼;®/šÙZ=F°ÊeÄùÌŠ¢þ=í½:cÌØ1pné ++væ}âȼgÏ^œ=uz¢l|7g ØçO\UôÓµ=ý¥¶ ’WòÌä{[É2Ì^Çõò@€éÜ[Ø7±,yìû.}ýšÝ²*Ï ¿ñiii•B·nݤ8p ˆc‡à^ÚC˜>¥$ʧ‰@âb;Gü¸r † ÙHê °žßÁÁººº‚åïVL4¢oÞ@G+݆€:L”¦ø…Öǧ:UÃÖ˜‘µð‘½µ¿_¾y:\Cq¹Ý’1˜êiSXÜã·ŸÅ"F5u Bº%&°751r±±±Ÿ Cß¾}¦Ì××Míì‘vÿ¡`’eë3‚lH¼›†;t™éã§Ï¦">‡EDázÌm¨ä%CCE fúºèØÇVã}„½žy i¼õ;ûèS0ÖÍY2^Zªþ-é £«–‘÷w#ÀCO#½¦üöñÛÄ"Ñý‡Y‚õŒ?‹ ìý3{öl„„„<æÂÙ“—¯ùóç ¾~LLK`[gRÕ–!üJ4ŸÃ¾ÀPÁ_ßÜ̯‘±æƒÿ ;[KA>g„äýžÍÌltb…ßü$îÊËq;ŽM­áÚÒÞ0<òÖXúUü}°vM‰]ZÀuºÑE$®¸«D͉,ó3¨IŽ’Ì¸ÕÖ`3bÄÁP³aÃ\¿~] Ý,øûû ƒÕÑÑÔ·üæ.Xº;…”o´È¶¶¶‚OàªMû迚 ¡c…~vn>Éö6B0ŸQƒGì ­© ×O²0™Ž»ràÉk€Ç÷€T7ËUM Õâž´ ³¾žÞ7ý»»wë×ÃS´ïØYáíc7-f‚X&6Ðc7þR²ÚÔ¢›åE ¾˜” ¾øjàÅã=ÜÍÖ…AÁÙ‹Që¶$F‘}þ¹ Ãúm‡qñj´ Ó¿›(ŸÞ æqÊØ‚äðøA}`Æ´S{42ÔsyqÞžš¹.£¦ÄªVJàGâäÖùŽÍl>$ÏÍv.‚sÄC²ìýw ø1ÙfFˆ™4scòß´ bõ²J¡Ê‹Ï·x+¸—’Œ ¢§ŸïUvòðrk‰é¯‚‘®À0R’ÙmÀ€<{ƽæ#±¸ÇmÖ¼ÍX’+õE3àx˜•­W#®vTlÏßös'—ÏWÿ0G³­³½°Ç3¦ßK@$Oã±>žU£lõssn‚Ë—/W[cmoŸ<c= Ü'„«)gNBHGàìyñYOO ¾xw,…rÁC2?3_ kà&Øo€`„¬ÛzQýR@éº:³>£Id1§&–'2õ~qßÂÞ_Ñf²zwn‡Õ¬’Êž›™™…¹s?Å À•¯*°úvÔ nøpÊH!`£²_!mÉÂoeaŒÜ¼|œ¾pMªÖ!íï€ÚÓÛBج;RÝä U á KÚ”œùÝZÃŽìð‹Ïž¹ªÄð±s‡…I£§FǶò6-ð©o·ïØÇ=u¿¶_~û-ÌõTÐ&tç‘3p§àÖä‘Ú7^ü¦Öæhle*¼õÏs/ã vÝJ'éÄ€¨“T°òƒ`JhLbi¿îíu—oØ=œŠ_}Á#2»-!”°uË„÷Vö—簞Ф»é9³Ag"ÈcvÜ3nTì<9kÒ0¼?{É÷VðîÈ/Bíañ/K°Çf¬"ÿüy‹Ö¡„øŠ/ÉòÆ}àIæ‹)“{–ïùS'¾Çœ†WýoÒÒuts–šâG¨¼†?Ìù’Kùê-Çè.(N½@Rpâ>·$w-öÆa1ïþƒ,¼;ïW ˜¼# §EÑ<šëÇÃãÁ»’¼>iXw :_Ì_€w¦M}|ÿEòòò°àëo°jÅ2¬þ~)zRv™âò¦ÈVU‰Ã‹ÎbßCr¿pù’ï¥ :vòÌ}d¨™ì×—ž••àÙ±­‚CÐGöïÒâïGÉYÛž-%û_$äJl9þž’0¢{Ëœ½ôxñ¹ûoZÕ¡0“8zpO,_0 ÿ¹ cýÇ‘|OžS5@…]ý»e+zôèàC;±æïƒ?·nß¶ f0+{qUì ÂAL!fÍ_Ž+wrÑ£ù½‰¨ØŒØM%Dó¾ÈÕ«†®Õé‹ÅþC{ÂÐ@o1%”p¬S%>$!P¹®BN›å$—¼ÞXÆõp´]ÑñIB×î’Ÿ~…™¶j_™4;‘Î}ÅÂYøo_F ¼‹|}ûvOWXLMr »MFš›×¯ánRÙþïc\?wôìä.ðÌš¿|ò¨Ji´)Bç…‰:soMÿ€|vb&yþ:T芽½æÏ}ý{z bù¦Pµ—²ûÎTÀ¡‰5Fôëb»fKÀ7ôºvP¾É$ˆ6JªºÅÅEo²?‹Wüö’^EÁ‹‘1‚XÅQµ­›VëIÃϰXÔ®u tñpFü­›8w&˜,rA8ˆÔÛQÐ)Ë€{ søz»`xß®Ì= õñËNWœ”aÓî@òh*èxW©âðÉó¸—ÿ¶ï„oï^hÒħNœ>yÔ­Z9SFx\¹xÞ„¸¼ ò¦-lpüœKvÎÇC(×(" @/·°(g<sðž^¡SgÎxßÎ$VEÁ™Gùyf¬¦Éågu)ö)ˆo×öÐÓ ÔÝËUˆÄ571†¶¦æã6*êâ ´43²€l¢Ô,ŽM­h¿þ¼ëðiŒðƒaÃHëJФI°*ÙÑÑæææÂo®m]ñÃO¿òµ¬*À[$¿<ô⨎èRV¦}¸éd¡srø#!sƒÈ+6!Ée0-Eò‚𥫣%Xâx î.Í«¥•ÇÈ“Áˆ 1•,¶U|æ:«#Ñœ#€cö ÉÄ|èÄ\¾Êê1ž4{GO… üS6@qvåË—ãµ×^¤¶Jž»ìûÉp¡$,5ȸM¦’á‘Ñ II®€Ùf€8T9€„À=4ÏȺ?þ^úCmoJʤ§«EÆžòIdcK)ƒÚÓâðBÊ?„Œ¾]ÜÉ]ÜK0øpôîþÀÓèÞ«eð0š·³³lœÑƒ?˜w9uü(º“Ùº>¶îoc¦Æ”êlãÒ²üt²—Èe+< ¶àú­8ʘAjzU!“sžØÀÓô6ºPÝûË×ð›T!u0Â1Eºtõ  …öB#L .Q2¨õë×ÃÙÙ7£c°í¿-hko*ðÏSI§w5×ÂíÚZ™!âZ Å7Þµ#*°VT@ À+9K†Îé÷Ó")(2ëè©‹&”ŠM/GP¯FbhŸNÄ”I(qÖ<ϽËZ}ÎÓ³aåú™8ñqV¿ÿþìݵ—ÂN`@''â5Ü$2P=®\‚lªæˆBʉ4•9ý¶Õ‰õ(Ï‘,€9,i¦˜™vßñû<‘ %0–#°tÁMr¦.Ÿ{8dîÝKÁŠe‹aÓH¬½•,K’X'¥5$¦N¬Ä3ã[Ä&¤|GH0WZuWW”(À3Õs–Ž…[OüYÛq 7ylU{Âv vÃâ¨ß°@”e&bì@oô"O_V\ñ¶QUyTµy~gF.ùJœ¾pÕ”ÚM «]íéH×`ºØŒH<‚pÑ?É@V r¯z|úÎØcGúªTM®P¹,?óö#¨ƒ5Âîi,U("°M%!)•¨ÀBøö'ÇU{A|µ²²â%9yÕúõRŽÎ"öu“¤cP(Vó""oÝ![­X¥ePˆ¬’Bù‹wu S$v£kãÔ þãÆQbŠOUÓ‰²– <Ȭÿ«Nž &Þ ·ž*PË/òàÌ2ÉÿíÂ=R ×#XËùPˆâÌv÷jƒÅ‹?ã;™€©S§1³È~õf’vX@š¹´Ý”pYpÈ”´Ã áyfH»P€ë©à gÂÚØÁµW¯^8~<èÐ,BÒù P'÷®Ýv(%6án¹ß€¤½~ÉŸçmÀÆÒ 6æ¤ÈÚ¿_ˆ‡¨Å1¤=ÄåK:²’ªö+'7¯@—‚5ztó$m›Œµ‚UWÆï¬ÄŠ 3ö¢¥¿çýõ×Ú˜ƒ‰’““´ƒ‚‚°hÑ’ðÔÔŸ“þ…O9‘ä!<ê ¶©‘Fø¿Ë?33#¯`E½$šA=Ìδ¿®ßekw&R,2ÃÞ….Î2D—T<ˆäµPóÓfl¼r=Ž\ÆåEx¨Y%–RM)¯Ñ{­Öôÿ]d$Ânº¤²øTäˆÜV?{9›5^¯ æ` iEfnš¬bŽ´’ È®‡]ÝÊ©ÜYC÷ ªŸÖ]4&ã'½"•póêKJvGÞ€Ôôû+þÝw¢°²fN²!¼œO³Úœ#˜ØÃŠ,ÞôGC#•;Ð Îí=v6”}• 0sâHxe®?×&êùE Ê1•œ!êlLDzm@êP/t˜C I߯ùų̫ŒÀŽ®‘Ññ½)›rÀi)¤M$›ƒ>„´²ú@hR^„@Š­ˆOLá}€ôþ¥ÉÒž«zA@?:ævü¸ömZ°D}9aÔu2™…å>úã:ÜËÓ@¾Š.ÒrÊwòFàtXÌŒ „ô2’ˆ»ìÒÎ .(Ú™šÔØBÌ`l]û\Ýsò0=§í¬4rñ_°lÝ®•|êF…Ýþ9ó'êðC:JÎ¥u[¬ÝÈ’ÙÈ¥ •ÀÀ ,Yü&“,×¶UyrŒ'%ÄÿÄ|{ —CQíÓŸ‹ÑT}ðºÕcMèÅë»… û§}U˜"•þ ¹å9*wLG[ôÇÊ?Ö`ý®ã( ºJ½©Ä§i@_5ë6l$†p œ( ¥šª~_¹-¬õ)ø³—D”u Üz)*–Œ@ŽR¹T,ê¶9Õ4Suº—W\"šG“»‚|°§¼¼Šà¥[y(̈Qüžƒè“ic(tÍ“Þj>¹„âiûòéìAgáËf@]C ]0vÈpÁêÉe$Î,F`¤²$P I]UŸU «ŒpbX k+‡.óϤ¶"È£j§åý˜³Ð!ДÚÅU`ʪú4p–Û™z1òRftÁK²¥ç¸ áª/­K‘G0‰RÙxDq¤ÔLJIMõ'r«Ò¥}k…rá…à퉥•ê”;¼Ï3Ò22HK¹%¢ö8êÞc¡7€A+€HIqê)ô©o)à©Îùäæ$Z÷ïÞã‚LÑœHù­¯ªÄ9H $j½Uª‹Ï“¯`ÀÉLæ>ÈÈºÍ "&© ˆ¢<2š1µ–ú„(ðr§Þ%¼üaÏÑ3B*W&§ xüYtB9ûÿJ{.xˆfë(kä…«7_¹=Yq ž|•Î'E€”:/pñ‘“áJ¥"–Î’<] oúÌ€Ê „án;s1*95=ƒ"î6À#ÏΡD,@SaV¤üG‘à.u!‰rú±®!Ã#k ‡I(ôÌ’¦íÆMJé®ì®c’ /‹åÖ@ J*4ÐÄE‡²­°C¨T A\©‚#€>:å,aêjOçA*30ùÏΤÖ6´- -‹}*öÎ]aªVÓí ŽPÊ à€G«º;…„LJI¯³k•”çLîÕq(ÁU" ôÎÑI$¹ç¯Ü¨ðŽ•ûÔgƒlzf§YÚ­#eÑ% ZdBJT ùCT›-XSSÿuò†_HÎ%|œ 9…¦È¢GJ€)94ð¨z #H `QQ Iø°"€0츆Ȳ«<e—z?“¹@N)uP ŒùËÂ%:WJ}öd\!»ƒ1ó›““GîrRõ®èº² @<çÐe/\© Â3¡€ÿ9…|rN!bÄÜ–E• Qȵà),‹YPà:“èôÊ*èD¤ÞSeA€4>tZÑœD¥¾U*äF#oÆó¯WªÜ’ÚWeA€ ŽhHŒ •³e€”aIR[ñ*) SL>™Æ¥®«2Šó•½ 2è ËÌì\´ ieÊ‚eEEE 8—âù+7‘•KQa ~ qžlÞŬ•·€KרœÝÁeÃråÊBrè”ÑbE=ä'RÚÀÀ¸;¬ý‘vÝ•ëS0*(,T“4ʶòÀù3;Á¦R< Ù?ÈPºA–}U°É/(RaËXCØxÿN_©itèàµÈÈÈôKB‚^tñ!ReA€&¹¤ .â³}io|Ù#‹ÿÚ€_£tttHȉ¯–,YzÄÁ¡E½³4ǯ, Î™2‚€ßþÈqˆILÇðaÃ(G®pÀåÌ™3pàÀÞ6žžžÿ¸H ”Jútõ ´©Zõž-­É§YûßAô066”)¾4oÞ›6m²±¶nÌ|§“”òø0ê—ús®aÎBr",ôá3‹›™™‰?þønn®Lìž)P‡¤o^‡>ˆóÈï`NßIDATÃr•¨8E•³ G?ÑÜ T0 XrøðÕœœ¸¸¸Ð6Pž$”³………q2u2Ó%y´¸²P€‹3s‹_VM0«}YÇñíòM”ú¦íýÞ{³|¼¼¼ƒûöí‹ôôr_ JíA‡`çç燒y˜4„’ƒ² @ eÌŽÏ£#U_6`™Ÿ™ÛïWlÆÁãç«¡t,P3°¬¬¤ÏÉ“'—|ôÑG°“““±k×nòSŸ.-í ²l¹±·ï]¤mÀ¡‰…àò2 ›·Y´]øëߨpò ëO@•?)`þœ5k¾ÉNO¿ÿFll|ÌõëQÿ£±Ÿ—Öø•I¨~ûõá½ÿlº¿p¶´& ¾êanŸÔÛøâçuØxv‘ôÉtTMÇÀR_ù°(©šD•e àu:xôTx^¥ÉSö„œñ#’M°p/þÊ:ú‹Ïãçü€R]|®T™`ʃÌ<Í]‡NÑÑ,RÏ“Às!sà$ÓìÖv (o|ðSNàé‹´——ͦ(øzcn”<‡ñ~HHˆèò­T!W¿2Qvïæt¯|€ö?¯Åì¯WžŽKHîM·\á^µÁdÕÚ®¯²ÚÚº~k׮خ];•ÒAÄ{v£3eU†@Vî7Syº×ÅësB#¢¾¥,hï§Ovþú¥@‘H3‹¸__ºL<”»}ïaU/7g[k:oHAR§C"X¯üì%|òߨqðÔaÊ÷÷-ù?´ø’kp¤„;Ê$ð±i-éJ¢«EK‡¦›W}7ËÐØP_aRȰ<Ï{ŠKEˆ‹»G+L?öM¬l•xÞ°ÜΪZ¦ ¼¯ÇÓ‘°'éà&RÜàú­Äâü‚‚kôÜv²Úý äÝy^ÊòÛK„<åæ”LÄÏ@Ogz{ÏŽÎ’íê¿þ¶ÿl\ ŸN˜ì×OøßîòëÑrÑ^ÁÆ™‡YÙà ¥A$ÂSŠº„ä´ LaÌ^²Ä“rI™Èü£Ñ=÷ßK†Ç¸~ûöí¯#ŸºÊÀ:<Ž–º=¸‡w—LÑ·¹dfæðs~Ûè<À¸ÄÔÛ‰wSȼsH‰šd©Ë®o­]å±¼ú\ý 4öòꘑ——Ç¡dÏÀ¤I“˜'¼.‚êzúÏÜ:›WWе€®Ytu£‹NèzJ9"‘Úw{öìyfáù‡€€€2r«¢` u/¥Ü«N¿pÜÆŒ›K™DžA€¨¨¨2›ÆDÆÑÿ…µ¼* ”3 ÒÒÒÙñÌâóþþãX6gòþ ^Òð2dHqlllÙáÇŸB‚èèè2ss‹³4n¥0~És}”Å@œ9qjÕª•*;NΙ3‡Nð|¢ù[´hîÝKÙ@•ÔN $N«¯Ê(Ì ¸¸ººåÞ¹“Pæîî^¶cÇŽ2’ÿËÖ­[[¦««›F— ÓÓW‘Ù ððð<ݬ™Ý³´-œXä£L£¢92kQÉ+~A¼Ï[ÒÅä¾EÐdRL-SýJÿ`5 Ð#ÓFIEND®B`‚charm-2.1.1/src/github.com/ajstarks/svgo/pattern/0000775000175000017500000000000012672604563020711 5ustar marcomarcocharm-2.1.1/src/github.com/ajstarks/svgo/pattern/pattern.go0000664000175000017500000000137012672604563022716 0ustar marcomarco// pattern: test the pattern function package main import ( "github.com/ajstarks/svgo" "fmt" "os" ) func main() { canvas := svg.New(os.Stdout) w, h := 500, 500 pct := 5 pw, ph := (w*pct)/100, (h*pct)/100 canvas.Start(w, h) // define the pattern canvas.Def() canvas.Pattern("hatch", 0, 0, pw, ph, "user") canvas.Gstyle("fill:none;stroke-width:1") canvas.Path(fmt.Sprintf("M0,0 l%d,%d", pw, ph), "stroke:red") canvas.Path(fmt.Sprintf("M%d,0 l-%d,%d", pw, pw, ph), "stroke:blue") canvas.Gend() canvas.PatternEnd() canvas.DefEnd() // use the pattern canvas.Gstyle("stroke:black; stroke-width:2") canvas.Circle(w/2, h/2, h/8, "fill:url(#hatch)") canvas.CenterRect((w*4)/5, h/2, h/4, h/4, "fill:url(#hatch)") canvas.Gend() canvas.End() } charm-2.1.1/src/github.com/ajstarks/svgo/README.markdown0000664000175000017500000005716312672604563021751 0ustar marcomarco#SVGo: A Go library for SVG generation# The library generates SVG as defined by the Scalable Vector Graphics 1.1 Specification (). Output goes to the specified io.Writer. ## Supported SVG elements and functions ## ### Shapes, lines, text circle, ellipse, polygon, polyline, rect (including roundrects), line, text ### Paths general, arc, cubic and quadratic bezier paths, ### Image and Gradients image, linearGradient, radialGradient, ### Transforms ### translate, rotate, scale, skewX, skewY ### Filter Effects filter, feBlend, feColorMatrix, feColorMatrix, feComponentTransfer, feComposite, feConvolveMatrix, feDiffuseLighting, feDisplacementMap, feDistantLight, feFlood, feGaussianBlur, feImage, feMerge, feMorphology, feOffset, fePointLight, feSpecularLighting, feSpotLight,feTile, feTurbulence ### Metadata elements ### desc, defs, g (style, transform, id), marker, mask, pattern, title, (a)ddress, link, script, use ## Building and Usage ## See svgdef.[svg|png|pdf] for a graphical view of the function calls Usage: (assuming GOPATH is set) go get github.com/ajstarks/svgo go install github.com/ajstarks/svgo/... You can use godoc to browse the documentation from the command line: $ godoc github.com/ajstarks/svgo a minimal program, to generate SVG to standard output. package main import ( "github.com/ajstarks/svgo" "os" ) func main() { width := 500 height := 500 canvas := svg.New(os.Stdout) canvas.Start(width, height) canvas.Circle(width/2, height/2, 100) canvas.Text(width/2, height/2, "Hello, SVG", "text-anchor:middle;font-size:30px;fill:white") canvas.End() } Drawing in a web server: (http://localhost:2003/circle) package main import ( "log" "github.com/ajstarks/svgo" "net/http" ) func main() { http.Handle("/circle", http.HandlerFunc(circle)) err := http.ListenAndServe(":2003", nil) if err != nil { log.Fatal("ListenAndServe:", err) } } func circle(w http.ResponseWriter, req *http.Request) { w.Header().Set("Content-Type", "image/svg+xml") s := svg.New(w) s.Start(500, 500) s.Circle(250, 250, 125, "fill:none;stroke:black") s.End() } You may view the SVG output with a browser that supports SVG (tested on Chrome, Opera, Firefox and Safari), or any other SVG user-agent such as Batik Squiggle. ### Graphics Sketching with SVGo and svgplay ### Combined with the svgplay command, SVGo can be used to "sketch" with code in a browser. To use svgplay and SVGo, first go to a directory with your code, and run: $ svgplay 2014/06/25 22:05:28 ☠ ☠ ☠ Warning: this server allows a client connecting to 127.0.0.1:1999 to execute code on this computer ☠ ☠ ☠ Next open your browser to the svgplay server you just started. svgplay only listens on localhost, and uses port 1999 (guess which year SVG was first introduced) by default http://localhost:1999/ Enter your code in the textarea, and when you are ready to run press Shift--Enter. The code will be compiled, with the results on the right. To update, change the code and repeat. Note that compilation errors are shown in red under the code. In order for svgplay/SVGo to work, make sure that the io.Writer specified with the New function is os.Stdout. If you want to sketch with an existing file, enter its URL: http://localhost:1999/foo.go ![SVGplay](https://farm4.staticflickr.com/3859/14322978157_31c0114850.jpg) ### SVGo Papers and presentations ### * SVGo paper from SVGOpen 2011 * Programming Pictures with SVGo * SVGo Workshop ### Tutorial Video ### A video describing how to use the package can be seen on YouTube at ## Package contents ## * svg.go: Library * newsvg: Coding template command * svgdef: Creates a SVG representation of the API * android: The Android logo * bubtrail: Bubble trails * bulletgraph: Bullet Graphs (via Stephen Few) * colortab: Display SVG named colors with RGB values * compx: Component diagrams * flower: Random "flowers" * fontcompare: Compare two fonts * f50: Get 50 photos from Flickr based on a query * fe: Filter effects * funnel: Funnel from transparent circles * gradient: Linear and radial gradients * html5logo: HTML5 logo with draggable elements * imfade: Show image fading * lewitt: Version of Sol Lewitt's Wall Drawing 91 * ltr: Layer Tennis Remixes * marker: Test markers * paths: Demonstrate SVG paths * pattern: Test patterns * planets: Show the scale of the Solar system * pmap: Proportion maps * randcomp: Compare random number generators * richter: Gerhard Richter's 256 colors * rl: Random lines (port of a Processing demo) * skewabc: Skew ABC * stockproduct: Visualize product and stock prices * svgopher: SVGo Mascot * svgplay: SVGo sketching server * svgplot: Plot data * svgrid: Compose SVG files in a grid * tsg: Twitter Search Grid * tumblrgrid: Tumblr picture grid * turbulence: Turbulence filter effect * vismem: Visualize data from files * webfonts: "Hello, World" with Google Web Fonts * websvg: Generate SVG as a web server ## Functions and types ## Many functions use x, y to specify an object's location, and w, h to specify the object's width and height. Where applicable, a final optional argument specifies the style to be applied to the object. The style strings follow the SVG standard; name:value pairs delimited by semicolons, or a series of name="value" pairs. For example: `"fill:none; opacity:0.3"` or `fill="none" opacity="0.3"` (see: ) The Offcolor type: type Offcolor struct { Offset uint8 Color string Opacity float } is used to specify the offset, color, and opacity of stop colors in linear and radial gradients The Filterspec type: type Filterspec struct { In string In2 string Result string } is used to specify inputs and results for filter effects ### Structure, Scripting, Metadata, Transformation and Links ### New(w io.Writer) *SVG Constructor, Specify the output destination. Start(w int, h int, attributes ...string) begin the SVG document with the width w and height h. Optionally add additional elememts (such as additional namespaces or scripting events) Startview(w, h, minx, miny, vw, vh int) begin the SVG document with the width w, height h, with a viewBox at minx, miny, vw, vh. Startunit(w int, h int, unit string, ns ...string) begin the SVG document, with width and height in the specified units. Optionally add additional elememts (such as additional namespaces or scripting events) Startpercent(w int, h int, ns ...string) begin the SVG document, with width and height in percent. Optionally add additional elememts (such as additional namespaces or scripting events) StartviewUnit(w, h int, unit string, minx, miny, vw, vh int) begin the SVG document with the width w, height h, in the specified unit, with a viewBox at minx, miny, vw, vh. End() end the SVG document Script(scriptype string, data ...string) Script defines a script with a specified type, (for example "application/javascript"). if the first variadic argument is a link, use only the link reference. Otherwise, treat variadic arguments as the text of the script (marked up as CDATA). if no data is specified, simply close the script element. Group(s ...string) begin a group, with arbitrary attributes Gstyle(s string) begin a group, with the specified style. Gid(s string) begin a group, with the specified id. Gtransform(s string) begin a group, with the specified transform, end with Gend(). Translate(x, y int) begins coordinate translation to (x,y), end with Gend(). Scale(n float64) scales the coordinate system by n, end with Gend(). ScaleXY(x, y float64) scales the coordinate system by x, y. End with Gend(). SkewX(a float64) SkewX skews the x coordinate system by angle a, end with Gend(). SkewY(a float64) SkewY skews the y coordinate system by angle a, end with Gend(). SkewXY(ax, ay float64) SkewXY skews x and y coordinate systems by ax, ay respectively, end with Gend(). Rotate(r float64) rotates the coordinate system by r degrees, end with Gend(). TranslateRotate(x, y int, r float64) translates the coordinate system to (x,y), then rotates to r degrees, end with Gend(). RotateTranslate(x, y int, r float64) rotates the coordinate system r degrees, then translates to (x,y), end with Gend(). Gend() end the group (must be paired with Gstyle, Gtransform, Gid). ClipPath(s ...string) Begin a ClipPath ClipEnd() End a ClipPath Def() begin a definition block. DefEnd() end a definition block. Marker(id string, x, y, w, h int, s ...string) define a marker MarkerEnd() end a marker Mask(id string, x int, y int, w int, h int, s ...string) creates a mask with a specified id, dimension, and optional style. MaskEnd() ends the Mask element. Pattern(id string, x, y, width, height int, putype string, s ...string) define a Pattern with the specified dimensions, the putype can be either "user" or "obj", which sets the patternUnits attribute to be either userSpaceOnUse or objectBoundingBox. Desc(s string) specify the text of the description. Title(s string) specify the text of the title. Link(href string, title string) begin a link named "href", with the specified title. LinkEnd() end the link. Use(x int, y int, link string, s ...string) place the object referenced at link at the location x, y. ### Shapes ### Circle(x int, y int, r int, s ...string) draw a circle, centered at x,y with radius r. ![Circle](http://farm5.static.flickr.com/4144/5187953823_01a1741489_m.jpg) Ellipse(x int, y int, w int, h int, s ...string) draw an ellipse, centered at x,y with radii w, and h. ![Ellipse](http://farm2.static.flickr.com/1271/5187953773_a9d1fc406c_m.jpg) Polygon(x []int, y []int, s ...string) draw a series of line segments using an array of x, y coordinates. ![Polygon](http://farm2.static.flickr.com/1006/5187953873_337dc26597_m.jpg) Rect(x int, y int, w int, h int, s ...string) draw a rectangle with upper left-hand corner at x,y, with width w, and height h. ![Rect](http://farm2.static.flickr.com/1233/5188556032_86c90e354b_m.jpg) CenterRect(x int, y int, w int, h int, s ...string) draw a rectangle with its center at x,y, with width w, and height h. Roundrect(x int, y int, w int, h int, rx int, ry int, s ...string) draw a rounded rectangle with upper the left-hand corner at x,y, with width w, and height h. The radii for the rounded portion is specified by rx (width), and ry (height). ![Roundrect](http://farm2.static.flickr.com/1275/5188556120_e2a9998fee_m.jpg) Square(x int, y int, s int, style ...string) draw a square with upper left corner at x,y with sides of length s. ![Square](http://farm5.static.flickr.com/4110/5187953659_54dcce242e_m.jpg) ### Paths ### Path(p string, s ...style) draw the arbitrary path as specified in p, according to the style specified in s. Arc(sx int, sy int, ax int, ay int, r int, large bool, sweep bool, ex int, ey int, s ...string) draw an elliptical arc beginning coordinate at sx,sy, ending coordinate at ex, ey width and height of the arc are specified by ax, ay, the x axis rotation is r if sweep is true, then the arc will be drawn in a "positive-angle" direction (clockwise), if false, the arc is drawn counterclockwise. if large is true, the arc sweep angle is greater than or equal to 180 degrees, otherwise the arc sweep is less than 180 degrees. ![Arc](http://farm2.static.flickr.com/1300/5188556148_df1a176074_m.jpg) Bezier(sx int, sy int, cx int, cy int, px int, py int, ex int, ey int, s ...string) draw a cubic bezier curve, beginning at sx,sy, ending at ex,ey with control points at cx,cy and px,py. ![Bezier](http://farm2.static.flickr.com/1233/5188556246_a03e67d013.jpg) Qbezier(sx int, sy int, cx int, cy int, ex int, ey int, tx int, ty int, s ...string) draw a quadratic bezier curve, beginning at sx, sy, ending at tx,ty with control points are at cx,cy, ex,ey. ![Qbezier](http://farm2.static.flickr.com/1018/5187953917_9a43cf64fb.jpg) Qbez(sx int, sy int, cx int, cy int, ex int, ey int, s...string) draws a quadratic bezier curver, with optional style beginning at sx,sy, ending at ex, sy with the control point at cx, cy. ![Qbez](http://farm6.static.flickr.com/5176/5569879349_5f726aab5e.jpg) ### Lines ### Line(x1 int, y1 int, x2 int, y2 int, s ...string) draw a line segment between x1,y1 and x2,y2. ![Line](http://farm5.static.flickr.com/4154/5188556080_0be19da0bc.jpg) Polyline(x []int, y []int, s ...string) draw a polygon using coordinates specified in x,y arrays. ![Polyline](http://farm2.static.flickr.com/1266/5188556384_a863273a69.jpg) ### Image and Text ### Image(x int, y int, w int, h int, link string, s ...string) place at x,y (upper left hand corner), the image with width w, and height h, referenced at link. ![Image](http://farm5.static.flickr.com/4058/5188556346_e5ce3dcbc2_m.jpg) Text(x int, y int, t string, s ...string) Place the specified text, t at x,y according to the style specified in s. Textlines(x, y int, s []string, size, spacing int, fill, align string) Places lines of text in s, starting at x,y, at the specified size, fill, and alignment, and spacing. Textpath(t string, pathid string, s ...string) places optionally styled text along a previously defined path. ![Image](http://farm4.static.flickr.com/3149/5694580737_4b291df768_m.jpg) ### Color ### RGB(r int, g int, b int) string creates a style string for the fill color designated by the (r)ed, g(reen), (b)lue components. RGBA(r int, g int, b int, a float64) string as above, but includes the color's opacity as a value between 0.0 (fully transparent) and 1.0 (opaque). ### Gradients ### LinearGradient(id string, x1, y1, x2, y2 uint8, sc []Offcolor) constructs a linear color gradient identified by id, along the vector defined by (x1,y1), and (x2,y2). The stop color sequence defined in sc. Coordinates are expressed as percentages. ![LinearGradient](http://farm5.static.flickr.com/4153/5187954033_3972f63fa9.jpg) RadialGradient(id string, cx, cy, r, fx, fy uint8, sc []Offcolor) constructs a radial color gradient identified by id, centered at (cx,cy), with a radius of r. (fx, fy) define the location of the focal point of the light source. The stop color sequence defined in sc. Coordinates are expressed as percentages. ![RadialGradient](http://farm2.static.flickr.com/1302/5187954065_7ddba7b819.jpg) ### Filter Effects ### Filter(id string, s ...string) Filter begins a filter set Standard reference: Fend() Fend ends a filter set Standard reference: FeBlend(fs Filterspec, mode string, s ...string) FeBlend specifies a Blend filter primitive Standard reference: FeColorMatrix(fs Filterspec, values [20]float64, s ...string) FeColorMatrix specifies a color matrix filter primitive, with matrix values Standard reference: FeColorMatrixHue(fs Filterspec, value float64, s ...string) FeColorMatrix specifies a color matrix filter primitive, with hue values Standard reference: FeColorMatrixSaturate(fs Filterspec, value float64, s ...string) FeColorMatrix specifies a color matrix filter primitive, with saturation values Standard reference: FeColorMatrixLuminence(fs Filterspec, s ...string) FeColorMatrix specifies a color matrix filter primitive, with luminence values Standard reference: FeComponentTransfer() FeComponentTransfer begins a feComponent filter Element> Standard reference: FeCompEnd() FeCompEnd ends a feComponent filter Element> FeComposite(fs Filterspec, operator string, k1, k2, k3, k4 int, s ...string) FeComposite specifies a feComposite filter primitive Standard reference: FeConvolveMatrix(fs Filterspec, matrix [9]int, s ...string) FeConvolveMatrix specifies a feConvolveMatrix filter primitive Standard referencd: FeDiffuseLighting(fs Filterspec, scale, constant float64, s ...string) FeDiffuseLighting specifies a diffuse lighting filter primitive, a container for light source Element>s, end with DiffuseEnd() FeDiffEnd() FeDiffuseEnd ends a diffuse lighting filter primitive container Standard reference: FeDisplacementMap(fs Filterspec, scale float64, xchannel, ychannel string, s ...string) FeDisplacementMap specifies a feDisplacementMap filter primitive Standard reference: FeDistantLight(fs Filterspec, azimuth, elevation float64, s ...string) FeDistantLight specifies a feDistantLight filter primitive Standard reference: FeFlood(fs Filterspec, color string, opacity float64, s ...string) FeFlood specifies a flood filter primitive Standard reference: FeFuncLinear(channel string, slope, intercept float64) FeFuncLinear is the linear form of feFunc Standard reference: FeFuncGamma(channel, amplitude, exponent, offset float64) FeFuncGamma is the gamma curve form of feFunc Standard reference: FeFuncTable(channel string, tv []float64) FeFuncGamma is the form of feFunc using a table of values Standard reference: FeFuncDiscrete(channel string, tv []float64) FeFuncGamma is the form of feFunc using discrete values Standard reference: FeGaussianBlur(fs Filterspec, stdx, stdy float64, s ...string) FeGaussianBlur specifies a Gaussian Blur filter primitive Standard reference: FeImage(href string, result string, s ...string) FeImage specifies a feImage filter primitive Standard reference: FeMerge(nodes []string, s ...string) FeMerge specifies a feMerge filter primitive, containing feMerge Element>s Standard reference: FeMorphology(fs Filterspec, operator string, xradius, yradius float64, s ...string) FeMorphologyLight specifies a feMorphologyLight filter primitive Standard reference: FeOffset(fs Filterspec, dx, dy int, s ...string) FeOffset specifies the feOffset filter primitive Standard reference: FePointLight(x, y, z float64, s ...string) FePointLight specifies a fePpointLight filter primitive Standard reference: FeSpecularLighting(fs Filterspec, scale, constant float64, exponent int, color string, s ...string) FeSpecularLighting specifies a specular lighting filter primitive, a container for light source elements, end with SpecularEnd() FeSpecEnd() FeSpecularEnd ends a specular lighting filter primitive container Standard reference: FeSpotLight(fs Filterspec, x, y, z, px, py, pz float64, s ...string) FeSpotLight specifies a feSpotLight filter primitive Standard reference: FeTile(fs Filterspec, in string, s ...string) FeTile specifies the tile utility filter primitive Standard reference: FeTurbulence(fs Filterspec, ftype string, bfx, bfy float64, octaves int, seed int64, stitch bool, s ...string) FeTurbulence specifies a turbulence filter primitive Standard reference: ### Filter convenience functions (modeled on CSS filter effects) ### Blur(p float64) Blur function by standard deviation Brightness(p float64) Brightness function (0-100) Grayscale() Apply a grayscale filter to the image HueRotate(a float64) Rotate Hues (0-360 degrees) Invert() Invert the image's colors Saturate(p float64) Percent saturation, 0 is grayscale Sepia() Apply sepia tone ### Utility ### Grid(x int, y int, w int, h int, n int, s ...string) draws a grid of straight lines starting at x,y, with a width w, and height h, and a size of n. ![Grid](http://farm5.static.flickr.com/4133/5190957924_7a31d0db34.jpg) ### Credits ### Thanks to Jonathan Wright for the io.Writer update. charm-2.1.1/src/github.com/ajstarks/svgo/marker/0000775000175000017500000000000012672604563020515 5ustar marcomarcocharm-2.1.1/src/github.com/ajstarks/svgo/marker/marker.go0000664000175000017500000000134512672604563022330 0ustar marcomarco// marker test // +build !appengine package main import ( "github.com/ajstarks/svgo" "os" ) func main() { canvas := svg.New(os.Stdout) canvas.Start(500, 500) canvas.Title("Marker") canvas.Def() canvas.Marker("dot", 5, 5, 8, 8) canvas.Circle(5, 5, 3, "fill:black") canvas.MarkerEnd() canvas.Marker("box", 5, 5, 8, 8) canvas.CenterRect(5, 5, 6, 6, "fill:green") canvas.MarkerEnd() canvas.Marker("arrow", 2, 6, 13, 13) canvas.Path("M2,2 L2,11 L10,6 L2,2", "fill:blue") canvas.MarkerEnd() canvas.DefEnd() x := []int{100, 250, 100} y := []int{100, 250, 400} canvas.Polyline(x, y, `fill="none"`, `stroke="red"`, `marker-start="url(#dot)"`, `marker-mid="url(#arrow)"`, `marker-end="url(#box)"`) canvas.End() } charm-2.1.1/src/github.com/ajstarks/svgo/paths/0000775000175000017500000000000012672604563020353 5ustar marcomarcocharm-2.1.1/src/github.com/ajstarks/svgo/paths/paths.go0000664000175000017500000000130412672604563022017 0ustar marcomarco// paths draws the W3C logo as a paths // +build !appengine package main import ( "fmt" "os" "github.com/ajstarks/svgo" ) var canvas = svg.New(os.Stdout) func w3c() { w3path := `M36,5l12,41l12-41h33v4l-13,21c30,10,2,69-21,28l7-2c15,27,33,-22,3,-19v-4l12-20h-15l-17,59h-1l-13-42l-12,42h-1l-20-67h9l12,41l8-28l-4-13h9` cpath := `M94,53c15,32,30,14,35,7l-1-7c-16,26-32,3-34,0M122,16c-10-21-34,0-21,30c-5-30 16,-38 23,-21l5-10l-2-9` canvas.Path(w3path, "fill:#005A9C") canvas.Path(cpath) } func main() { canvas.Startview(700, 200, 0, 0, 700, 200) canvas.Title("Paths") for i := 0; i < 5; i++ { canvas.Gtransform(fmt.Sprintf("translate(%d,0)", i*130)) w3c() canvas.Gend() } canvas.End() } charm-2.1.1/src/github.com/ajstarks/svgo/flower/0000775000175000017500000000000012672604563020532 5ustar marcomarcocharm-2.1.1/src/github.com/ajstarks/svgo/flower/flower.go0000664000175000017500000000346212672604563022364 0ustar marcomarco// flower - draw random flowers, inspired by Evelyn Eastmond's DesignBlocks gererated "grain2" // +build !appengine package main import ( "flag" "fmt" "math" "math/rand" "os" "time" "github.com/ajstarks/svgo" ) var ( canvas = svg.New(os.Stdout) niter = flag.Int("n", 200, "number of iterations") width = flag.Int("w", 500, "width") height = flag.Int("h", 500, "height") thickness = flag.Int("t", 10, "max petal thinkness") np = flag.Int("p", 15, "max number of petals") psize = flag.Int("s", 30, "max length of petals") opacity = flag.Int("o", 50, "max opacity (10-100)") ) const flowerfmt = `stroke:rgb(%d,%d,%d); stroke-opacity:%.2f; stroke-width:%d` func radial(xp int, yp int, n int, l int, style ...string) { var x, y, r, t, limit float64 limit = 2.0 * math.Pi r = float64(l) canvas.Gstyle(style[0]) for t = 0.0; t < limit; t += limit / float64(n) { x = r * math.Cos(t) y = r * math.Sin(t) canvas.Line(xp, yp, xp+int(x), yp+int(y)) } canvas.Gend() } func random(howsmall, howbig int) int { if howsmall >= howbig { return howsmall } return rand.Intn(howbig-howsmall) + howsmall } func randrad(w int, h int, n int) { var x, y, r, g, b, o, s, t, p int for i := 0; i < n; i++ { x = rand.Intn(w) y = rand.Intn(h) r = rand.Intn(255) g = rand.Intn(255) b = rand.Intn(255) o = random(10, *opacity) s = random(10, *psize) t = random(2, *thickness) p = random(10, *np) radial(x, y, p, s, fmt.Sprintf(flowerfmt, r, g, b, float64(o)/100.0, t)) } } func background(v int) { canvas.Rect(0, 0, *width, *height, canvas.RGB(v, v, v)) } func init() { flag.Parse() rand.Seed(int64(time.Now().Nanosecond()) % 1e9) } func main() { canvas.Start(*width, *height) canvas.Title("Random Flowers") background(255) randrad(*width, *height, *niter) canvas.End() } charm-2.1.1/src/github.com/ajstarks/svgo/svgdef/0000775000175000017500000000000012672604563020512 5ustar marcomarcocharm-2.1.1/src/github.com/ajstarks/svgo/svgdef/svgdef.go0000664000175000017500000004255612672604563022333 0ustar marcomarco// svgdef - SVG Object Definition and Use // +build !appengine package main import ( "math" "os" "github.com/ajstarks/svgo" ) const ( textsize = 24 coordsize = 4 objcolor = "rgb(0,0,127)" objstyle = "fill:none; stroke-width:2;stroke:" + objcolor fobjstyle = "fill-opacity:0.25;fill:" + objcolor legendstyle = "fill:gray; text-anchor:middle" titlestyle = "fill:black; text-anchor:middle;font-size:24px" linestyle = "stroke:black; stroke-width:1" gtextstyle = "font-family:Calibri; text-anchor:middle; font-size:24px" coordstring = "x, y" tpathstring = `It's "fine" & "dandy" to draw text along a path` ) var ( canvas = svg.New(os.Stdout) grayfill = canvas.RGB(220, 220, 220) oc1 = svg.Offcolor{Offset:0, Color:"white", Opacity:1.0} oc2 = svg.Offcolor{Offset:25, Color:"lightblue", Opacity:1.0} oc3 = svg.Offcolor{Offset:75, Color:"blue", Opacity:1.0} oc4 = svg.Offcolor{Offset:100, Color:objcolor, Opacity:1.0} ga = []svg.Offcolor{oc1, oc2, oc3, oc4} ) // defcoodstr defines coordinate strings: (x,y) func defcoordstr(x int, y int, s string) { canvas.Circle(x, y, coordsize, grayfill) canvas.Text(x, y-textsize, s, legendstyle) } // defcoord defines a coordinate func defcoord(x, y, n int) { canvas.Circle(x, y, coordsize, grayfill) canvas.Text(x, y+n, coordstring, legendstyle) } // deflegend makes object legends func deflegend(x int, y int, size int, legend string) { canvas.Text(x, y+size+textsize, legend, titlestyle) } // defcircle defines the circle object for arbitrary placement and size func defcircle(id string, w, h int, legend string) { canvas.Gid(id) canvas.Translate(w, h) defcoord(0, 0, -textsize) canvas.Circle(0, 0, h, objstyle) canvas.Line(0, 0, h, 0, linestyle) canvas.Text(h/2, textsize, "r", legendstyle) deflegend(0, 0, h, legend) canvas.Gend() canvas.Gend() } // defellipse defines the ellipse object for arbitrary placement and size func defellipse(id string, w int, h int, legend string) { canvas.Gid(id) canvas.Translate(w, h) defcoord(0, 0, -textsize) canvas.Ellipse(0, 0, w, h, objstyle) canvas.Line(0, 0, w, 0, linestyle) canvas.Line(0, 0, 0, h, linestyle) canvas.Text(w/2, textsize, "rx", legendstyle) canvas.Text(-textsize, (h / 2), "ry", legendstyle) deflegend(0, 0, h, legend) canvas.Gend() canvas.Gend() } // defrect defines the rectangle object for arbitrary placement and size func defrect(id string, w int, h int, legend string) { canvas.Gid(id) defcoord(0, 0, -textsize) canvas.Rect(0, 0, w, h, objstyle) canvas.Text(-textsize, (h / 2), "h", legendstyle) canvas.Text((w / 2), -textsize, "w", legendstyle) deflegend((w / 2), 0, h, legend) canvas.Gend() } // defcrect defines the centered rectangle object for arbitrary placement and size func defcrect(id string, w int, h int, legend string) { canvas.Gid(id) defcoord(w/2, h/2, -textsize) canvas.Rect(0, 0, w, h, objstyle) canvas.Text(-textsize, (h / 2), "h", legendstyle) canvas.Text((w / 2), -textsize, "w", legendstyle) deflegend((w / 2), 0, h, legend) canvas.Gend() } // defsquare defines the square object for arbitrary placement and size func defsquare(id string, w int, legend string) { canvas.Gid(id) defcoord(0, 0, -textsize) canvas.Square(0, 0, w, objstyle) canvas.Text((w / 2), -textsize, "w", legendstyle) deflegend((w / 2), 0, w, legend) canvas.Gend() } // defimage defines the image object for arbitrary placement and size func defimage(id string, w int, h int, s string, legend string) { canvas.Gid(id) defcoord(0, 0, -textsize) canvas.Rect(0, 0, w, h, objstyle) canvas.Text(-textsize, (h / 2), "h", legendstyle) canvas.Text((w / 2), -textsize, "w", legendstyle) canvas.Image(0, 0, w, h, s) deflegend(w/2, h, 0, legend) canvas.Gend() } // defline defines the line object for arbitrary placement and size func defline(id string, w int, h int, legend string) { canvas.Gid(id) defcoordstr(0, 0, "x1, y1") defcoordstr(w, 0, "x2, y2") canvas.Line(0, 0, w, 0, objstyle) deflegend(w/2, h, 0, legend) canvas.Gend() } // defarc defines the arc object for arbitrary placement and size func defarc(id string, w int, h int, legend string) { canvas.Gid(id) defcoordstr(0, 0, "sx, sy") defcoordstr(w*2, 0, "ex, ey") canvas.Arc(0, 0, h, h, 0, false, true, w*2, 0, objstyle) deflegend(w, h, h, legend) canvas.Gend() } // defbez defines the cublic bezier object for arbitrary placement and size func defbez(id string, x int, y int, h int, legend string) { sx, sy := 0, 0 cx, cy := x, -y px, py := x, y ex, ey := x*2, 0 canvas.Gid(id) defcoordstr(sx, sy, "sx, sy") defcoordstr(cx, cy, "cx, cy") defcoordstr(px, py, "px, py") defcoordstr(ex, ey, "ex, ey") canvas.Bezier(sx, sy, cx, cy, px, py, ex, ey, objstyle) deflegend(px, h, 0, legend) canvas.Gend() } // defqbez defines the quadratic bezier object for arbitrary placement and size func defqbez(id string, px int, py int, h int, legend string) { sx, sy := 0, 0 ex, ey := px*2, 0 cx, cy := (ex-px)/3, -py-(py/2) canvas.Gid(id) defcoordstr(sx, sy, "sx, sy") defcoordstr(cx, cy, "cx, cy") defcoordstr(ex, ey, "ex, ey") canvas.Qbez(sx, sy, cx, cy, ex, ey, objstyle) deflegend(px, h, 0, legend) canvas.Gend() } // defroundrect defines the roundrect object for arbitrary placement and size func defroundrect(id string, w int, h int, rx int, ry int, legend string) { canvas.Gid(id) defcoord(0, 0, -textsize) canvas.Roundrect(0, 0, w, h, rx, ry, objstyle) canvas.Text(-textsize, (h / 2), "h", legendstyle) canvas.Text((w / 2), -textsize, "w", legendstyle) canvas.Line(rx, 0, rx, ry, linestyle) canvas.Line(0, ry, rx, ry, linestyle) canvas.Text(rx+textsize, ry-(ry/2), "ry", legendstyle) canvas.Text((rx / 2), ry+textsize, "rx", legendstyle) deflegend((w / 2), 0, h, legend) canvas.Gend() } // defpolygon defines the polygon object for arbitrary placement and size func defpolygon(id string, w int, h int, legend string) { var x = []int{0, w / 2, w, w, w / 2, 0} var y = []int{0, -h / 4, 0, (h * 3) / 4, h / 2, (h * 3) / 4} canvas.Gid(id) for i := 0; i < len(x); i++ { defcoord(x[i], y[i], -textsize) } canvas.Polygon(x, y, objstyle) deflegend(w/2, h, 0, legend) canvas.Gend() } // defpolyline defines the polyline object for arbitrary placement and size func defpolyline(id string, w int, h int, legend string) { var x = []int{0, w / 3, (w * 3) / 4, w} var y = []int{0, -(h / 2), -(h / 3), -h} canvas.Gid(id) for i := 0; i < len(x); i++ { defcoord(x[i], y[i], -textsize) } canvas.Polyline(x, y, objstyle) deflegend(w/2, h, 0, legend) canvas.Gend() } // defpath defines the path object for arbitrary placement and size func defpath(id string, x, y int, legend string) { var w3path = `M36,5l12,41l12-41h33v4l-13,21c30,10,2,69-21,28l7-2c15,27,33,-22,3,-19v-4l12-20h-15l-17,59h-1l-13-42l-12,42h-1l-20-67h9l12,41l8-28l-4-13h9` var cpath = `M94,53c15,32,30,14,35,7l-1-7c-16,26-32,3-34,0M122,16c-10-21-34,0-21,30c-5-30 16,-38 23,-21l5-10l-2-9` canvas.Gid(id) canvas.Path(w3path, `fill="`+objcolor+`"`) canvas.Path(cpath, canvas.RGBA(0, 0, 0, 0.5)) defcoord(0, 0, -textsize) deflegend(x/2, y+50, textsize, legend) canvas.Gend() } // deflg defines the linear gradient object for arbitrary placement and size func deflg(id string, w int, h int, legend string) { canvas.Gid(id) canvas.Rect(0, 0, w, h, "fill:url(#linear)") defcoordstr(0, 0, "x1%, y1%") defcoordstr(w, 0, "x2%, y2%") deflegend((w / 2), 0, h, legend) canvas.Gend() } // defrg defines the radial gradient object for arbitrary placement and size func defrg(id string, w int, h int, legend string) { canvas.Gid(id) canvas.Rect(0, 0, w, h, "fill:url(#radial)") defcoordstr(0, 0, "cx%, cy%") defcoordstr(w/2, h/2, "fx%, fy%") deflegend((w / 2), 0, h, legend) canvas.Gend() } // deftrans defines the trans object for arbitrary placement and size func deftrans(id string, w, h int, legend string) { tx := w / 3 canvas.Gid(id) defcoordstr(0, 0, "0, 0") defcoordstr(w-tx, 0, "x, y") deflegend(w/2, 0, h, legend) canvas.Rect(0, 0, tx, h, objstyle) canvas.Translate(w-tx, 0) canvas.Rect(0, 0, tx, h, fobjstyle) canvas.Gend() canvas.Gend() } // defgrid defines the grid object for arbitrary placement and size func defgrid(id string, w, h int, legend string) { n := h / 4 canvas.Gid(id) defcoord(0, 0, -textsize) canvas.Text(-textsize, (h / 2), "h", legendstyle) canvas.Text((w / 2), -textsize, "w", legendstyle) canvas.Text(n+textsize, n/2, "n", legendstyle) canvas.Grid(0, 0, w, h, n, "stroke:"+objcolor) deflegend((w / 2), 0, h, legend) canvas.Gend() } // deftext defines the text object for arbitrary placement and size func deftext(id string, w, h int, text string, legend string) { canvas.Gid(id) defcoord(0, h/2, textsize) canvas.Text(0, h/2, text, "text-anchor:start;font-size:32pt") deflegend(w/2, 0, h, legend) canvas.Gend() } // deftextpath defines the textpath object for arbitrary placement and size func deftextpath(id string, pathid string, s string, w, h int, legend string) { canvas.Gid(id) canvas.Textpath(s, pathid, `fill="`+objcolor+`"`, `text-anchor="start"`, `font-size="16pt"`) deflegend(w/2, 0, h, legend) canvas.Gend() } // defscale defines the scale object for arbitrary placement and size func defscale(id string, w, h int, n float64, legend string) { canvas.Gid(id) defcoordstr(0, 0, "0, 0") canvas.Rect(0, 0, w, h, objstyle) canvas.Scale(n) canvas.Rect(0, 0, w, h, fobjstyle) canvas.Gend() deflegend(w/2, 0, h, legend) canvas.Gend() } // defscaleXY defines the scaleXY object for arbitrary placement and size func defscaleXY(id string, w, h int, dx, dy float64, legend string) { canvas.Gid(id) defcoordstr(0, 0, "0, 0") canvas.Rect(0, 0, w, h, objstyle) canvas.ScaleXY(dx, dy) canvas.Rect(0, 0, w, h, fobjstyle) canvas.Gend() deflegend(w/2, 0, h, legend) canvas.Gend() } // defskewX defines the skewX object for arbitrary placement and size func defskewX(id string, w, h int, angle float64, legend string) { canvas.Gid(id) defcoordstr(0, 0, "0, 0") canvas.Rect(0, 0, w, h, objstyle) canvas.SkewX(angle) canvas.Rect(0, 0, w, h, fobjstyle) canvas.Gend() deflegend(w/2, 0, h, legend) canvas.Gend() } // defskewY defines the skewY object for arbitrary placement and size func defskewY(id string, w, h int, angle float64, legend string) { canvas.Gid(id) defcoordstr(0, 0, "0, 0") canvas.Rect(0, 0, w, h, objstyle) canvas.SkewY(angle) canvas.Rect(0, 0, w, h, fobjstyle) canvas.Gend() deflegend(w/2, 0, h, legend) canvas.Gend() } // defskewXY defines the skewXY object for arbitrary placement and size func defskewXY(id string, w, h int, ax, ay float64, legend string) { canvas.Gid(id) defcoordstr(0, 0, "0, 0") canvas.Rect(0, 0, w, h, objstyle) canvas.SkewXY(ax, ay) canvas.Rect(0, 0, w, h, fobjstyle) canvas.Gend() deflegend(w/2, 0, h, legend) canvas.Gend() } // defrotate defines the rotate object for arbitrary placement and size func defrotate(id string, w, h int, deg float64, legend string) { t := deg * (math.Pi / 180.0) r := float64(w / 2) rx := r * math.Cos(t) ry := r * math.Sin(t) canvas.Gid(id) defcoordstr(0, 0, "0, 0") deflegend(w/2, 0, h, legend) canvas.Rect(0, 0, w, h, objstyle) canvas.Qbez(w/2, 0, (w/2)+10, int(ry)/2, int(rx), int(ry), "fill:none;stroke:gray") canvas.Text(w/4, textsize, "r", legendstyle) canvas.Rotate(deg) canvas.Rect(0, 0, w, h, fobjstyle) canvas.Gend() canvas.Gend() } // defmeta defines the metadata objects func defmeta(id string, w int, name, desc []string, legend string) { canvas.Gid(id) canvas.Textlines(0, textsize, name, 24, 28, "black", "start") canvas.Textlines(w+32, textsize, desc, 24, 28, "rgb(127,127,127)", "start") deflegend(w, 0, 30*len(name), legend) canvas.Gend() } // defrgb defines the rgb object for arbitrary placement and size func defrgb(id string, w, h, r, g, b int, opacity float64, legend string) { size := h / 8 canvas.Gid(id) canvas.Gstyle(legendstyle) colordot(w/4, 0, size, r, 0, 0, 1.0) colordot(w/2, 0, size, 0, g, 0, 1.0) colordot(w*3/4, 0, size, 0, 0, b, 1.0) colordot(w, 0, size, r, g, b, opacity) if opacity < 1.0 { colordot(w+10, 0, size, r, g, b, opacity) canvas.Text(w, h/2, "alpha") } canvas.Text(w/4, h/2, "r") canvas.Text(w/2, h/2, "g") canvas.Text(w*3/4, h/2, "b") canvas.Text(w-(w/8), size-size/2, "->") canvas.Gend() deflegend(w/2, 0, h, legend) canvas.Gend() } // defobjects defines a set of objects with the specified dimensions, // once defined, the objects are referenced for placement func defobjects(w, h int) { var ( metatext = []string{ "New(w io Writer)", "Start(w, h int, options ...string)/End()", "Startview(w, h, minx, miny, vw, vh int)", "Group(s ...string)/Gend()", "Gstyle(s string)/Gend()", "Gtransform(s string)/Gend()", "Gid(id string)/Gend()", "ClipPath(s ...string)/ClipEnd()", "Def()/DefEnd()", "Marker()/MarkerEnd()", "Pattern()/PatternEnd()", "Desc(s string)", "Title(s string)", "Script(type, data ...string)", "Mask(id string, x, y, w, h int, style ...string)/MaskEnd()", "Link(href string, title string)/LinkEnd()", "Use(x int, y int, link string, style ...string)", } metadesc = []string{ "specify destination", "begin/end the document", "begin/end the document with viewport", "begin/end group with attributes", "begin/end group style", "begin/end group transform", "begin/end group id", "begin/end clip path", "begin/end a defintion block", "begin/end markers", "begin/end pattern", "set the description element", "set the title element", "define a script", "begin/end mask element", "begin/end link to href, with a title", "use defined objects", } ) h2 := h / 2 canvas.Desc("Object Definitions") canvas.Def() canvas.LinearGradient("linear", 0, 0, 100, 0, ga) canvas.RadialGradient("radial", 0, 0, 100, 50, 50, ga) canvas.Path("M 0,0 A62,62 0 0 1 250,0", `id="tpath"`) defsquare("square", h, "Square(x, y, w int, style ...string)") defrect("rect", w, h, "Rect(x, y, w, h int, style ...string)") defcrect("crect", w, h, "CenterRect(x, y, w, h int, style ...string)") defroundrect("roundrect", w, h, 25, 25, "Roundrect(x, y, w, h, rx, ry int, style ...string)") defpolygon("polygon", w, h, "Polygon(x, y []int, style ...string)") defcircle("circle", h, h2, "Circle(x, y, r int, style ...string)") defellipse("ellipse", h, h2, "Ellipse(x, y, rx, ry int, style ...string)") defline("line", w, h, "Line(x1, y1, x2, y2 int, style ...string)") defpolyline("polyline", w, h, "Polyline(x, y []int, style ...string)") defarc("arc", h, h2, "Arc(sx, sy, ax, ay, r int, lflag, sflag bool, ex, ey int, style ...string)") defpath("path", h, h2, "Path(s string, style ...string)") defqbez("qbez", h, h2, h, "Qbez(sx, sy, cx, cy, ex, ey int, style ...string)") defbez("bezier", h, h2, h, "Bezier(sx, sy, cx, cy, px, py, ex, ey int, style ...string)") defimage("image", 128, 128, "gophercolor128x128.png", "Image(x, y, w, h, int path string, style ...string)") deflg("lgrad", w, h, "LinearGradient(s string, x1, y1, x2, y2 uint8, oc []Offcolor)") defrg("rgrad", w, h, "RadialGradient(s string, cx, cy, r, fx, fy uint8, oc []Offcolor)") deftrans("trans", w, h, "Translate(x, y int)") defgrid("grid", w, h, "Grid(x, y, w, h, n int, style ...string)") deftext("text", w, h, "hello, this is SVG", "Text(x, y int, s string, style ...string)") defscale("scale", w, h, 0.5, "Scale(n float64)") defscaleXY("scalexy", w, h, 0.5, 0.75, "ScaleXY(x, y float64)") defskewX("skewx", w, h, 30, "SkewX(a float64)") defskewY("skewy", w, h, 10, "SkewY(a float64)") defskewXY("skewxy", w, h, 10, 10, "SkewXY(x, y float64)") defrotate("rotate", w, h, 30, "Rotate(r float64)") deftextpath("textpath", "#tpath", tpathstring, w, h, "Textpath(s, pathid string, style ...string)") defmeta("meta", w*2, metatext, metadesc, "Textlines(x, y int, s []string, size, spacing int, fill, align string)") defrgb("rgb", w, h, 44, 77, 232, 1.0, "RGB(r, g, b int)") defrgb("rgba", w, h, 44, 77, 232, 0.33, "RGBA(r, g, b int, opacity float64)") canvas.DefEnd() } // colordot makes a colored dot, with opacity func colordot(x, y, r, red, green, blue int, a float64) { // canvas.Circle(x,y,r+textsize/6,"fill:none;stroke:"+objcolor) if a == 1.0 { canvas.Circle(x, y, r, canvas.RGB(red, green, blue)) } else { canvas.Circle(x, y, r, canvas.RGBA(red, green, blue, a)) } } // placerow is a helper for placeobjects, placing to previously // defined objects row-wise func placerow(w int, s []string) { for x, name := range s { canvas.Use(x*w, 0, "#"+name) } } // placeobjects places a grid of objects on the canvas as specified // by a string array. func placeobjects(x, y, w, h int, data [][]string) { canvas.Desc("Object Usage") for _, object := range data { canvas.Translate(x, y) placerow(w, object) canvas.Gend() y += h } } var roworder = [][]string{ {"rect", "crect", "roundrect", "square", "line", "polyline"}, {"polygon", "circle", "ellipse", "arc", "qbez", "bezier"}, {"trans", "scale", "scalexy", "skewx", "skewy", "skewxy"}, {"rotate", "text", "textpath", "path", "image", "grid"}, {"lgrad", "rgrad", "rgb", "rgba", "meta"}, } func main() { width := 3650 height := (width * 3) / 4 canvas.Start(width, height) defobjects(250, 125) canvas.Title("SVG Go Library Description") canvas.Rect(0, 0, width, height, "fill:white;stroke:black;stroke-width:2") canvas.Gstyle(gtextstyle) canvas.Link("http://github.com/ajstarks/svgo", "SVGo Library") canvas.Text(width/2, 150, "SVG Go Library", "font-size:125px") canvas.Text(width/2, 200, "github.com/ajstarks/svgo", "font-size:50px;fill:gray") canvas.LinkEnd() placeobjects(200, 400, 600, 450, roworder) canvas.Gend() canvas.End() } charm-2.1.1/src/github.com/ajstarks/svgo/fe/0000775000175000017500000000000012672604563017626 5ustar marcomarcocharm-2.1.1/src/github.com/ajstarks/svgo/fe/fe.go0000664000175000017500000000327312672604563020554 0ustar marcomarco// fe: SVG Filter Effect example from http://www.w3.org/TR/SVG/filters.html#AnExample // +build !appengine package main import ( "github.com/ajstarks/svgo" "os" ) func main() { canvas := svg.New(os.Stdout) width := 410 height := 120 canvas.Start(width, height) canvas.Title(`SVGo Filter Example`) canvas.Desc(`Combines multiple filter primitives to produce a 3D lighting effect`) gfs := svg.Filterspec{In: "SourceAlpha", Result: "blur"} ofs := svg.Filterspec{In: "blur", Result: "offsetBlur"} sfs := svg.Filterspec{In: "blur", Result: "specOut"} cfs1 := svg.Filterspec{In: "specOut", In2: "SourceAlpha", Result: "specOut"} cfs2 := svg.Filterspec{In: "SourceGraphic", In2: "specOut", Result: "litPaint"} // define the filters canvas.Def() canvas.Filter("myFilter") canvas.FeGaussianBlur(gfs, 4, 4) canvas.FeOffset(ofs, 4, 4) canvas.FeSpecularLighting(sfs, 5, .75, 20, "#bbbbbb") canvas.FePointLight(-5000, -10000, 20000) canvas.FeSpecEnd() canvas.FeComposite(cfs1, "in", 0, 0, 0, 0) canvas.FeComposite(cfs2, "arithmetic", 0, 1, 1, 0) canvas.FeMerge([]string{ofs.Result, cfs2.Result}) canvas.Fend() canvas.DefEnd() // specify the graphic canvas.Gid("SVG") canvas.Path("M50,90 C0,90 0,30 50,30 L150,30 C200,30 200,90 150,90 z", "fill:none;stroke:#D90000;stroke-width:10") canvas.Path("M60,80 C30,80 30,40 60,40 L140,40 C170,40 170,80 140,80 z", "fill:#D90000") canvas.Text(52, 76, "SVG", "fill:white;stroke:black;font-size:45;font-family:Verdana") canvas.Gend() canvas.Rect(0, 0, width, height, "stroke:black;fill:white") canvas.Use(0, 0, "#SVG") // plain graphic canvas.Use(200, 0, "#SVG", `filter="url(#myFilter)"`) // filter applied canvas.End() } charm-2.1.1/src/github.com/kisielk/0000775000175000017500000000000012672604367016071 5ustar marcomarcocharm-2.1.1/src/github.com/kisielk/gotool/0000775000175000017500000000000012672604367017374 5ustar marcomarcocharm-2.1.1/src/github.com/kisielk/gotool/tool.go0000664000175000017500000000136112672604367020701 0ustar marcomarco// Package gotool is a library of utility functions used to implement the standard "Go" tool provided // as a convenience to developers who want to write tools with similar semantics. package gotool // export functions as here to make it easier to keep the implementations up to date with upstream. // ImportPaths returns the import paths to use for the given arguments using default context. // // The path "all" is expanded to all packages in $GOPATH and $GOROOT. // The path "std" is expanded to all packages in the Go standard library. // The string "..." is treated as a wildcard within a path. // Relative import paths are not converted to full import paths. func ImportPaths(args []string) []string { return DefaultContext.ImportPaths(args) } charm-2.1.1/src/github.com/kisielk/gotool/LICENSE0000664000175000017500000000207012672604367020400 0ustar marcomarcoCopyright (c) 2013 Kamil Kisiel Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. charm-2.1.1/src/github.com/kisielk/gotool/LEGAL0000664000175000017500000000327112672604367020146 0ustar marcomarcoAll the files in this distribution are covered under either the MIT license (see the file LICENSE) except some files mentioned below. match.go: Copyright (c) 2012 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. charm-2.1.1/src/github.com/kisielk/gotool/README.md0000664000175000017500000000014312672604367020651 0ustar marcomarcogotool ====== A library of some of the utility functions provided by (but not exported) by cmd/go charm-2.1.1/src/github.com/kisielk/gotool/go14.go0000664000175000017500000000020312672604367020470 0ustar marcomarco// +build go1.4 package gotool import ( "path/filepath" "runtime" ) var gorootSrcPkg = filepath.Join(runtime.GOROOT(), "src") charm-2.1.1/src/github.com/kisielk/gotool/go13.go0000664000175000017500000000021312672604367020470 0ustar marcomarco// +build !go1.4 package gotool import ( "path/filepath" "runtime" ) var gorootSrcPkg = filepath.Join(runtime.GOROOT(), "src", "pkg") charm-2.1.1/src/github.com/kisielk/gotool/match.go0000664000175000017500000001732312672604367021025 0ustar marcomarcopackage gotool // This file contains code from the Go distribution. // Copyright (c) 2012 The Go Authors. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import ( "fmt" "go/build" "os" "path" "path/filepath" "regexp" "strings" ) var DefaultContext = Context{ BuildContext: build.Default, } type Context struct { BuildContext build.Context } // matchPattern(pattern)(name) reports whether // name matches pattern. Pattern is a limited glob // pattern in which '...' means 'any string' and there // is no other special syntax. func matchPattern(pattern string) func(name string) bool { re := regexp.QuoteMeta(pattern) re = strings.Replace(re, `\.\.\.`, `.*`, -1) // Special case: foo/... matches foo too. if strings.HasSuffix(re, `/.*`) { re = re[:len(re)-len(`/.*`)] + `(/.*)?` } reg := regexp.MustCompile(`^` + re + `$`) return func(name string) bool { return reg.MatchString(name) } } func (c Context) matchPackages(pattern string) []string { match := func(string) bool { return true } if pattern != "all" && pattern != "std" { match = matchPattern(pattern) } have := map[string]bool{ "builtin": true, // ignore pseudo-package that exists only for documentation } if !c.BuildContext.CgoEnabled { have["runtime/cgo"] = true // ignore during walk } var pkgs []string for _, src := range c.BuildContext.SrcDirs() { if pattern == "std" && src != gorootSrcPkg { continue } src = filepath.Clean(src) + string(filepath.Separator) filepath.Walk(src, func(path string, fi os.FileInfo, err error) error { if err != nil || !fi.IsDir() || path == src { return nil } // Avoid .foo, _foo, and testdata directory trees. _, elem := filepath.Split(path) if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" { return filepath.SkipDir } name := filepath.ToSlash(path[len(src):]) if pattern == "std" && strings.Contains(name, ".") { return filepath.SkipDir } if have[name] { return nil } have[name] = true if !match(name) { return nil } _, err = c.BuildContext.ImportDir(path, 0) if err != nil { if _, noGo := err.(*build.NoGoError); noGo { return nil } } pkgs = append(pkgs, name) return nil }) } return pkgs } // importPathsNoDotExpansion returns the import paths to use for the given // command line, but it does no ... expansion. func (c Context) importPathsNoDotExpansion(args []string) []string { if len(args) == 0 { return []string{"."} } var out []string for _, a := range args { // Arguments are supposed to be import paths, but // as a courtesy to Windows developers, rewrite \ to / // in command-line arguments. Handles .\... and so on. if filepath.Separator == '\\' { a = strings.Replace(a, `\`, `/`, -1) } // Put argument in canonical form, but preserve leading ./. if strings.HasPrefix(a, "./") { a = "./" + path.Clean(a) if a == "./." { a = "." } } else { a = path.Clean(a) } if a == "all" || a == "std" { out = append(out, c.allPackages(a)...) continue } out = append(out, a) } return out } // ImportPaths returns the import paths to use for the given arguments. // // The path "all" is expanded to all packages in $GOPATH and $GOROOT. // The path "std" is expanded to all packages in the Go standard library. // The string "..." is treated as a wildcard within a path. // Relative import paths are not converted to full import paths. func (c Context) ImportPaths(args []string) []string { args = c.importPathsNoDotExpansion(args) var out []string for _, a := range args { if strings.Contains(a, "...") { if build.IsLocalImport(a) { out = append(out, allPackagesInFS(a)...) } else { out = append(out, c.allPackages(a)...) } continue } out = append(out, a) } return out } // allPackages returns all the packages that can be found // under the $GOPATH directories and $GOROOT matching pattern. // The pattern is either "all" (all packages), "std" (standard packages) // or a path including "...". func (c Context) allPackages(pattern string) []string { pkgs := c.matchPackages(pattern) if len(pkgs) == 0 { fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) } return pkgs } // allPackagesInFS is like allPackages but is passed a pattern // beginning ./ or ../, meaning it should scan the tree rooted // at the given directory. There are ... in the pattern too. func allPackagesInFS(pattern string) []string { pkgs := matchPackagesInFS(pattern) if len(pkgs) == 0 { fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) } return pkgs } func matchPackagesInFS(pattern string) []string { // Find directory to begin the scan. // Could be smarter but this one optimization // is enough for now, since ... is usually at the // end of a path. i := strings.Index(pattern, "...") dir, _ := path.Split(pattern[:i]) // pattern begins with ./ or ../. // path.Clean will discard the ./ but not the ../. // We need to preserve the ./ for pattern matching // and in the returned import paths. prefix := "" if strings.HasPrefix(pattern, "./") { prefix = "./" } match := matchPattern(pattern) var pkgs []string filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { if err != nil || !fi.IsDir() { return nil } if path == dir { // filepath.Walk starts at dir and recurses. For the recursive case, // the path is the result of filepath.Join, which calls filepath.Clean. // The initial case is not Cleaned, though, so we do this explicitly. // // This converts a path like "./io/" to "io". Without this step, running // "cd $GOROOT/src/pkg; go list ./io/..." would incorrectly skip the io // package, because prepending the prefix "./" to the unclean path would // result in "././io", and match("././io") returns false. path = filepath.Clean(path) } // Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..". _, elem := filepath.Split(path) dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".." if dot || strings.HasPrefix(elem, "_") || elem == "testdata" { return filepath.SkipDir } name := prefix + filepath.ToSlash(path) if !match(name) { return nil } if _, err = build.ImportDir(path, 0); err != nil { return nil } pkgs = append(pkgs, name) return nil }) return pkgs }