diff --git a/.gitignore b/.gitignore index ab245b5..6ff7175 100644 --- a/.gitignore +++ b/.gitignore @@ -19,7 +19,7 @@ webroot # built binaries go -srndv2 +./srndv2 # private key *.key diff --git a/contrib/backends/srndv2/.gitignore b/contrib/backends/srndv2/.gitignore new file mode 100644 index 0000000..b9ab693 --- /dev/null +++ b/contrib/backends/srndv2/.gitignore @@ -0,0 +1 @@ +nntpchand \ No newline at end of file diff --git a/contrib/backends/srndv2/Makefile b/contrib/backends/srndv2/Makefile new file mode 100644 index 0000000..d6c9166 --- /dev/null +++ b/contrib/backends/srndv2/Makefile @@ -0,0 +1,10 @@ +GOPATH=$(PWD) + +all: nntpchand + +nntpchand: + go build -o nntpchand -v nntpchan/cmd/nntpchan + +clean: + go clean -v + rm -f nntpchand diff --git a/contrib/backends/srndv2/src/nntpchan/cmd/nntpchan/main.go b/contrib/backends/srndv2/src/nntpchan/cmd/nntpchan/main.go new file mode 100644 index 0000000..4c09f4b --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/cmd/nntpchan/main.go @@ -0,0 +1,122 @@ +package main + +import ( + log "github.com/Sirupsen/logrus" + "nntpchan/lib/config" + "nntpchan/lib/nntp" + "nntpchan/lib/store" + "nntpchan/lib/webhooks" + "net" + "net/http" + _ "net/http/pprof" + "os" + "os/signal" + "syscall" + "time" +) + +func main() { + go func() { + err := http.ListenAndServe("127.0.0.1:7700", nil) + if err != nil { + log.Fatal(err) + } + }() + log.Info("starting up nntpchan...") + cfg_fname := "nntpchan.json" + conf, err := config.Ensure(cfg_fname) + if err != nil { + log.Fatal(err) + } + + if conf.Log == "debug" { + log.SetLevel(log.DebugLevel) + } + + sconfig := conf.Store + + if sconfig == nil { + log.Fatal("no article storage configured") + } + + nconfig := conf.NNTP + + if nconfig == nil { + log.Fatal("no nntp server configured") + } + + dconfig := conf.Database + + if dconfig == nil { + log.Fatal("no database configured") + } + + // create nntp server + nserv := nntp.NewServer() + nserv.Config = nconfig + nserv.Feeds = conf.Feeds + + if nconfig.LoginsFile != "" { + nserv.Auth = nntp.FlatfileAuth(nconfig.LoginsFile) + } + + // create article storage + nserv.Storage, err = store.NewFilesytemStorage(sconfig.Path, true) + if err != nil { + log.Fatal(err) + } + + if conf.WebHooks != nil && len(conf.WebHooks) > 0 { + // put webhooks into nntp server event hooks + nserv.Hooks = webhooks.NewWebhooks(conf.WebHooks, nserv.Storage) + } + + if conf.NNTPHooks != nil && len(conf.NNTPHooks) > 0 { + var hooks nntp.MulitHook + if nserv.Hooks != nil { + hooks = append(hooks, nserv.Hooks) + } + for _, h := range conf.NNTPHooks { + hooks = append(hooks, nntp.NewHook(h)) + } + nserv.Hooks = hooks + } + + // nntp server loop + go func() { + for { + naddr := conf.NNTP.Bind + log.Infof("Bind nntp server to %s", naddr) + nl, err := net.Listen("tcp", naddr) + if err == nil { + err = nserv.Serve(nl) + if err != nil { + nl.Close() + log.Errorf("nntpserver.serve() %s", err.Error()) + } + } else { + log.Errorf("nntp server net.Listen failed: %s", err.Error()) + } + time.Sleep(time.Second) + } + }() + + // start persisting feeds + go nserv.PersistFeeds() + + // handle signals + sigchnl := make(chan os.Signal, 1) + signal.Notify(sigchnl, syscall.SIGHUP) + for { + s := <-sigchnl + if s == syscall.SIGHUP { + // handle SIGHUP + conf, err := config.Ensure(cfg_fname) + if err == nil { + log.Infof("reloading config: %s", cfg_fname) + nserv.ReloadServer(conf.NNTP) + nserv.ReloadFeeds(conf.Feeds) + } + } + } +} diff --git a/contrib/backends/srndv2/src/nntpchan/cmd/nntpserver/main.go b/contrib/backends/srndv2/src/nntpchan/cmd/nntpserver/main.go new file mode 100644 index 0000000..9fa80e5 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/cmd/nntpserver/main.go @@ -0,0 +1,42 @@ +package main + +// simple nntp server + +import ( + log "github.com/Sirupsen/logrus" + "nntpchan/lib/config" + "nntpchan/lib/nntp" + "nntpchan/lib/store" + "net" +) + +func main() { + + log.Info("starting NNTP server...") + conf, err := config.Ensure("settings.json") + if err != nil { + log.Fatal(err) + } + + if conf.Log == "debug" { + log.SetLevel(log.DebugLevel) + } + + serv := &nntp.Server{ + Config: conf.NNTP, + Feeds: conf.Feeds, + } + serv.Storage, err = store.NewFilesytemStorage(conf.Store.Path, false) + if err != nil { + log.Fatal(err) + } + l, err := net.Listen("tcp", conf.NNTP.Bind) + if err != nil { + log.Fatal(err) + } + log.Info("listening on ", l.Addr()) + err = serv.Serve(l) + if err != nil { + log.Fatal(err) + } +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/admin/doc.go b/contrib/backends/srndv2/src/nntpchan/lib/admin/doc.go new file mode 100644 index 0000000..372f1f3 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/admin/doc.go @@ -0,0 +1,4 @@ +// +// server admin panel +// +package admin diff --git a/contrib/backends/srndv2/src/nntpchan/lib/admin/server.go b/contrib/backends/srndv2/src/nntpchan/lib/admin/server.go new file mode 100644 index 0000000..0f6611b --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/admin/server.go @@ -0,0 +1,16 @@ +package admin + +import ( + "net/http" +) + +type Server struct { +} + +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + +} + +func NewServer() *Server { + return &Server{} +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/api/api.go b/contrib/backends/srndv2/src/nntpchan/lib/api/api.go new file mode 100644 index 0000000..c904083 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/api/api.go @@ -0,0 +1,9 @@ +package api + +import ( + "nntpchan/lib/model" +) +// json api +type API interface { + MakePost(p model.Post) +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/api/doc.go b/contrib/backends/srndv2/src/nntpchan/lib/api/doc.go new file mode 100644 index 0000000..c2402fc --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/api/doc.go @@ -0,0 +1,2 @@ +// json api +package api diff --git a/contrib/backends/srndv2/src/nntpchan/lib/api/server.go b/contrib/backends/srndv2/src/nntpchan/lib/api/server.go new file mode 100644 index 0000000..6db4efa --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/api/server.go @@ -0,0 +1,20 @@ +package api + +import ( + "github.com/gorilla/mux" + "net/http" +) + +// api server +type Server struct { +} + +func (s *Server) HandlePing(w http.ResponseWriter, r *http.Request) { + +} + +// inject api routes +func (s *Server) SetupRoutes(r *mux.Router) { + // setup api pinger + r.Path("/ping").HandlerFunc(s.HandlePing) +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/cache/cache.go b/contrib/backends/srndv2/src/nntpchan/lib/cache/cache.go new file mode 100644 index 0000000..f058546 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/cache/cache.go @@ -0,0 +1,25 @@ +package cache + +import ( + "github.com/majestrate/srndv2/lib/config" + "strings" +) + +// create cache from config structure +func FromConfig(c *config.CacheConfig) (cache CacheInterface, err error) { + // set up cache + if c != nil { + // get cache backend + cacheBackend := strings.ToLower(c.Backend) + if cacheBackend == "redis" { + // redis cache + cache, err = NewRedisCache(c.Addr, c.Password) + } else { + // fall through + } + } + if cache == nil { + cache = NewNullCache() + } + return +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/cache/cache_interface.go b/contrib/backends/srndv2/src/nntpchan/lib/cache/cache_interface.go new file mode 100644 index 0000000..9caace7 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/cache/cache_interface.go @@ -0,0 +1,17 @@ +package cache + +import ( + "io" + "net/http" +) + +// recache markup to io.Writer +type RecacheHandler func(io.Writer) error + +type CacheInterface interface { + ServeCached(w http.ResponseWriter, r *http.Request, key string, handler RecacheHandler) + DeleteCache(key string) + Cache(key string, body io.Reader) + Has(key string) bool + Close() +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/cache/file_cache.go b/contrib/backends/srndv2/src/nntpchan/lib/cache/file_cache.go new file mode 100644 index 0000000..46a10b6 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/cache/file_cache.go @@ -0,0 +1,68 @@ +// +build !disable_File + +package cache + +import ( + log "github.com/Sirupsen/logrus" + "io" + "net/http" + "os" + "time" +) + +type FileCache struct { +} + +func (self *FileCache) Has(key string) bool { + _, err := os.Stat(key) + return !os.IsNotExist(err) +} + +func (self *FileCache) ServeCached(w http.ResponseWriter, r *http.Request, key string, handler RecacheHandler) { + _, err := os.Stat(key) + if os.IsNotExist(err) { + modtime := time.Now().UTC() + ts := modtime.Format(http.TimeFormat) + + w.Header().Set("Last-Modified", ts) + f, err := os.Create(key) + if err == nil { + defer f.Close() + mw := io.MultiWriter(f, w) + err = handler(mw) + } + return + } + + http.ServeFile(w, r, key) +} + +func (self *FileCache) DeleteCache(key string) { + err := os.Remove(key) + if err != nil { + log.Warnf("cannot remove file %s: %s", key, err.Error()) + } +} + +func (self *FileCache) Cache(key string, body io.Reader) { + f, err := os.Create(key) + if err != nil { + log.Warnf("cannot cache %s: %s", key, err.Error()) + return + } + defer f.Close() + + _, err = io.Copy(f, body) + if err != nil { + log.Warnf("cannot cache key %s: %s", key, err.Error()) + } +} + +func (self *FileCache) Close() { +} + +func NewFileCache() CacheInterface { + cache := new(FileCache) + + return cache +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/cache/null_cache.go b/contrib/backends/srndv2/src/nntpchan/lib/cache/null_cache.go new file mode 100644 index 0000000..2060430 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/cache/null_cache.go @@ -0,0 +1,33 @@ +package cache + +import ( + "io" + "io/ioutil" + "net/http" +) + +type NullCache struct { +} + +func (self *NullCache) ServeCached(w http.ResponseWriter, r *http.Request, key string, handler RecacheHandler) { + handler(w) +} + +func (self *NullCache) DeleteCache(key string) { +} + +func (self *NullCache) Cache(key string, body io.Reader) { + io.Copy(ioutil.Discard, body) +} + +func (self *NullCache) Close() { +} + +func (self *NullCache) Has(key string) bool { + return false +} + +func NewNullCache() CacheInterface { + cache := new(NullCache) + return cache +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/config/article.go b/contrib/backends/srndv2/src/nntpchan/lib/config/article.go new file mode 100644 index 0000000..1c8e8d1 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/config/article.go @@ -0,0 +1,73 @@ +package config + +import "regexp" + +// configration for local article policies +type ArticleConfig struct { + // explicitly allow these newsgroups (regexp) + AllowGroups []string `json:"whitelist"` + // explicitly disallow these newsgroups (regexp) + DisallowGroups []string `json:"blacklist"` + // only allow explicitly allowed groups + ForceWhitelist bool `json:"force-whitelist"` + // allow anonymous posts? + AllowAnon bool `json:"anon"` + // allow attachments? + AllowAttachments bool `json:"attachments"` + // allow anonymous attachments? + AllowAnonAttachments bool `json:"anon-attachments"` +} + +func (c *ArticleConfig) AllowGroup(group string) bool { + + for _, g := range c.DisallowGroups { + r := regexp.MustCompile(g) + if r.MatchString(group) && c.ForceWhitelist { + // disallowed + return false + } + } + + // check allowed groups first + for _, g := range c.AllowGroups { + r := regexp.MustCompile(g) + if r.MatchString(g) { + return true + } + } + + return !c.ForceWhitelist +} + +// allow an article? +func (c *ArticleConfig) Allow(msgid, group string, anon, attachment bool) bool { + + // check attachment policy + if c.AllowGroup(group) { + allow := true + // no anon ? + if anon && !c.AllowAnon { + allow = false + } + // no attachments ? + if allow && attachment && !c.AllowAttachments { + allow = false + } + // no anon attachments ? + if allow && attachment && anon && !c.AllowAnonAttachments { + allow = false + } + return allow + } else { + return false + } +} + +var DefaultArticlePolicy = ArticleConfig{ + AllowGroups: []string{"ctl", "overchan.test"}, + DisallowGroups: []string{"overchan.cp"}, + ForceWhitelist: false, + AllowAnon: true, + AllowAttachments: true, + AllowAnonAttachments: false, +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/config/cache.go b/contrib/backends/srndv2/src/nntpchan/lib/config/cache.go new file mode 100644 index 0000000..b6b1312 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/config/cache.go @@ -0,0 +1,13 @@ +package config + +// caching interface configuration +type CacheConfig struct { + // backend cache driver name + Backend string `json:"backend"` + // address for cache + Addr string `json:"addr"` + // username for login + User string `json:"user"` + // password for login + Password string `json:"password"` +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/config/config.go b/contrib/backends/srndv2/src/nntpchan/lib/config/config.go new file mode 100644 index 0000000..d15a5b2 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/config/config.go @@ -0,0 +1,85 @@ +package config + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "os" +) + +// main configuration +type Config struct { + // nntp server configuration + NNTP *NNTPServerConfig `json:"nntp"` + // log level + Log string `json:"log"` + // article storage config + Store *StoreConfig `json:"storage"` + // web hooks to call + WebHooks []*WebhookConfig `json:"webhooks"` + // external scripts to call + NNTPHooks []*NNTPHookConfig `json:"nntphooks"` + // database backend configuration + Database *DatabaseConfig `json:"db"` + // list of feeds to add on runtime + Feeds []*FeedConfig `json:"feeds"` + + // unexported fields ... + + // absolute filepath to configuration + fpath string +} + +// default configuration +var DefaultConfig = Config{ + Store: &DefaultStoreConfig, + NNTP: &DefaultNNTPConfig, + Database: &DefaultDatabaseConfig, + WebHooks: []*WebhookConfig{DefaultWebHookConfig}, + NNTPHooks: []*NNTPHookConfig{DefaultNNTPHookConfig}, + Feeds: DefaultFeeds, + Log: "debug", +} + +// reload configuration +func (c *Config) Reload() (err error) { + var b []byte + b, err = ioutil.ReadFile(c.fpath) + if err == nil { + err = json.Unmarshal(b, c) + } + return +} + +// ensure that a config file exists +// creates one if it does not exist +func Ensure(fname string) (cfg *Config, err error) { + _, err = os.Stat(fname) + if os.IsNotExist(err) { + err = nil + var d []byte + d, err = json.Marshal(&DefaultConfig) + if err == nil { + b := new(bytes.Buffer) + err = json.Indent(b, d, "", " ") + if err == nil { + err = ioutil.WriteFile(fname, b.Bytes(), 0600) + } + } + } + if err == nil { + cfg, err = Load(fname) + } + return +} + +// load configuration file +func Load(fname string) (cfg *Config, err error) { + cfg = new(Config) + cfg.fpath = fname + err = cfg.Reload() + if err != nil { + cfg = nil + } + return +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/config/database.go b/contrib/backends/srndv2/src/nntpchan/lib/config/database.go new file mode 100644 index 0000000..6fe92c5 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/config/database.go @@ -0,0 +1,18 @@ +package config + +type DatabaseConfig struct { + // url or address for database connector + Addr string `json:"addr"` + // password to use + Password string `json:"password"` + // username to use + Username string `json:"username"` + // type of database to use + Type string `json:"type"` +} + +var DefaultDatabaseConfig = DatabaseConfig{ + Type: "postgres", + Addr: "/var/run/postgresql", + Password: "", +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/config/doc.go b/contrib/backends/srndv2/src/nntpchan/lib/config/doc.go new file mode 100644 index 0000000..db5c659 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/config/doc.go @@ -0,0 +1,4 @@ +// +// package for parsing config files +// +package config diff --git a/contrib/backends/srndv2/src/nntpchan/lib/config/feed.go b/contrib/backends/srndv2/src/nntpchan/lib/config/feed.go new file mode 100644 index 0000000..6ae4bfa --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/config/feed.go @@ -0,0 +1,33 @@ +package config + +// configuration for 1 nntp feed +type FeedConfig struct { + // feed's policy, filters articles + Policy *ArticleConfig `json:"policy"` + // remote server's address + Addr string `json:"addr"` + // proxy server config + Proxy *ProxyConfig `json:"proxy"` + // nntp username to log in with + Username string `json:"username"` + // nntp password to use when logging in + Password string `json:"password"` + // do we want to use tls? + TLS bool `json:"tls"` + // the name of this feed + Name string `json:"name"` + // how often to pull articles from the server in minutes + // 0 for never + PullInterval int `json:"pull"` +} + +var DuummyFeed = FeedConfig{ + Policy: &DefaultArticlePolicy, + Addr: "nntp.dummy.tld:1119", + Proxy: &DefaultTorProxy, + Name: "dummy", +} + +var DefaultFeeds = []*FeedConfig{ + &DuummyFeed, +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/config/frontend.go b/contrib/backends/srndv2/src/nntpchan/lib/config/frontend.go new file mode 100644 index 0000000..d100656 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/config/frontend.go @@ -0,0 +1,21 @@ +package config + +type FrontendConfig struct { + // bind to address + BindAddr string `json:"bind"` + // frontend cache + Cache *CacheConfig `json:"cache"` + // frontend ssl settings + SSL *SSLSettings `json:"ssl"` + // static files directory + Static string `json:"static_dir"` + // http middleware configuration + Middleware *MiddlewareConfig `json:"middleware"` +} + +// default Frontend Configuration +var DefaultFrontendConfig = FrontendConfig{ + BindAddr: "127.0.0.1:18888", + Static: "./files/static/", + Middleware: &DefaultMiddlewareConfig, +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/config/hook.go b/contrib/backends/srndv2/src/nntpchan/lib/config/hook.go new file mode 100644 index 0000000..81200ca --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/config/hook.go @@ -0,0 +1,15 @@ +package config + +// config for external callback for nntp articles +type NNTPHookConfig struct { + // name of hook + Name string `json:"name"` + // executable script path to be called with arguments: /path/to/article + Exec string `json:"exec"` +} + +// default dummy hook +var DefaultNNTPHookConfig = &NNTPHookConfig{ + Name: "dummy", + Exec: "/bin/true", +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/config/middleware.go b/contrib/backends/srndv2/src/nntpchan/lib/config/middleware.go new file mode 100644 index 0000000..413bf2a --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/config/middleware.go @@ -0,0 +1,14 @@ +package config + +// configuration for http middleware +type MiddlewareConfig struct { + // middleware type, currently just 1 is available: overchan + Type string `json:"type"` + // directory for our html templates + Templates string `json:"templates_dir"` +} + +var DefaultMiddlewareConfig = MiddlewareConfig{ + Type: "overchan", + Templates: "./files/templates/overchan/", +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/config/nntp.go b/contrib/backends/srndv2/src/nntpchan/lib/config/nntp.go new file mode 100644 index 0000000..a3d4f25 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/config/nntp.go @@ -0,0 +1,24 @@ +package config + +type NNTPServerConfig struct { + // address to bind to + Bind string `json:"bind"` + // name of the nntp server + Name string `json:"name"` + // default inbound article policy + Article *ArticleConfig `json:"policy"` + // do we allow anonymous NNTP sync? + AnonNNTP bool `json:"anon-nntp"` + // ssl settings for nntp + SSL *SSLSettings + // file with login credentials + LoginsFile string `json:"authfile"` +} + +var DefaultNNTPConfig = NNTPServerConfig{ + AnonNNTP: false, + Bind: "0.0.0.0:1119", + Name: "nntp.server.tld", + Article: &DefaultArticlePolicy, + LoginsFile: "", +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/config/proxy.go b/contrib/backends/srndv2/src/nntpchan/lib/config/proxy.go new file mode 100644 index 0000000..7ed7003 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/config/proxy.go @@ -0,0 +1,13 @@ +package config + +// proxy configuration +type ProxyConfig struct { + Type string `json:"type"` + Addr string `json:"addr"` +} + +// default tor proxy +var DefaultTorProxy = ProxyConfig{ + Type: "socks", + Addr: "127.0.0.1:9050", +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/config/ssl.go b/contrib/backends/srndv2/src/nntpchan/lib/config/ssl.go new file mode 100644 index 0000000..1e45ed4 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/config/ssl.go @@ -0,0 +1,11 @@ +package config + +// settings for setting up ssl +type SSLSettings struct { + // path to ssl private key + SSLKeyFile string `json:"key"` + // path to ssl certificate signed by CA + SSLCertFile string `json:"cert"` + // domain name to use for ssl + DomainName string `json:"fqdn"` +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/config/store.go b/contrib/backends/srndv2/src/nntpchan/lib/config/store.go new file mode 100644 index 0000000..31abf86 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/config/store.go @@ -0,0 +1,10 @@ +package config + +type StoreConfig struct { + // path to article directory + Path string `json:"path"` +} + +var DefaultStoreConfig = StoreConfig{ + Path: "storage", +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/config/webhooks.go b/contrib/backends/srndv2/src/nntpchan/lib/config/webhooks.go new file mode 100644 index 0000000..9c7b266 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/config/webhooks.go @@ -0,0 +1,17 @@ +package config + +// configuration for a single web hook +type WebhookConfig struct { + // user provided name for this hook + Name string `json:"name"` + // callback URL for webhook + URL string `json:"url"` + // dialect to use when calling webhook + Dialect string `json:"dialect"` +} + +var DefaultWebHookConfig = &WebhookConfig{ + Name: "vichan", + Dialect: "vichan", + URL: "http://localhost/webhook.php", +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/crypto/doc.go b/contrib/backends/srndv2/src/nntpchan/lib/crypto/doc.go new file mode 100644 index 0000000..a912506 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/crypto/doc.go @@ -0,0 +1,5 @@ +// +// nntpchan crypto package +// wraps all external crypro libs +// +package crypto diff --git a/contrib/backends/srndv2/src/nntpchan/lib/crypto/hash.go b/contrib/backends/srndv2/src/nntpchan/lib/crypto/hash.go new file mode 100644 index 0000000..f0bea5b --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/crypto/hash.go @@ -0,0 +1,8 @@ +package crypto + +import ( + "github.com/dchest/blake256" +) + +// common hash function is blake2 +var Hash = blake256.New diff --git a/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl.go b/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl.go new file mode 100644 index 0000000..74890ff --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl.go @@ -0,0 +1,83 @@ +package crypto + +import ( + "crypto/sha512" + "hash" + + "nntpchan/lib/crypto/nacl" +) + +type fuckyNacl struct { + k []byte + hash hash.Hash +} + +func (fucky *fuckyNacl) Write(d []byte) (int, error) { + return fucky.hash.Write(d) +} + +func (fucky *fuckyNacl) Sign() (s Signature) { + h := fucky.hash.Sum(nil) + if h == nil { + panic("fuck.hash.Sum == nil") + } + kp := nacl.LoadSignKey(fucky.k) + defer kp.Free() + sk := kp.Secret() + sig := nacl.CryptoSignFucky(h, sk) + if sig == nil { + panic("fucky signer's call to nacl.CryptoSignFucky returned nil") + } + s = Signature(sig) + fucky.resetState() + return +} + +// reset inner state so we can reuse this fuckyNacl for another operation +func (fucky *fuckyNacl) resetState() { + fucky.hash = sha512.New() +} + +func (fucky *fuckyNacl) Verify(sig Signature) (valid bool) { + h := fucky.hash.Sum(nil) + if h == nil { + panic("fuck.hash.Sum == nil") + } + valid = nacl.CryptoVerifyFucky(h, sig, fucky.k) + fucky.resetState() + return +} + +func createFucky(k []byte) *fuckyNacl { + return &fuckyNacl{ + k: k, + hash: sha512.New(), + } +} + +// create a standard signer given a secret key +func CreateSigner(sk []byte) Signer { + return createFucky(sk) +} + +// create a standard verifier given a public key +func CreateVerifier(pk []byte) Verifer { + return createFucky(pk) +} + +// get the public component given the secret key +func ToPublic(sk []byte) (pk []byte) { + kp := nacl.LoadSignKey(sk) + defer kp.Free() + pk = kp.Public() + return +} + +// create a standard keypair +func GenKeypair() (pk, sk []byte) { + kp := nacl.GenSignKeypair() + defer kp.Free() + pk = kp.Public() + sk = kp.Seed() + return +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/box.go b/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/box.go new file mode 100644 index 0000000..cf07a9a --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/box.go @@ -0,0 +1,95 @@ +package nacl + +// #cgo freebsd CFLAGS: -I/usr/local/include +// #cgo freebsd LDFLAGS: -L/usr/local/lib +// #cgo LDFLAGS: -lsodium +// #include +import "C" + +import ( + "errors" +) + +// encrypts a message to a user given their public key is known +// returns an encrypted box +func CryptoBox(msg, nounce, pk, sk []byte) ([]byte, error) { + msgbuff := NewBuffer(msg) + defer msgbuff.Free() + + // check sizes + if len(pk) != int(C.crypto_box_publickeybytes()) { + err := errors.New("len(pk) != crypto_box_publickey_bytes") + return nil, err + } + if len(sk) != int(C.crypto_box_secretkeybytes()) { + err := errors.New("len(sk) != crypto_box_secretkey_bytes") + return nil, err + } + if len(nounce) != int(C.crypto_box_macbytes()) { + err := errors.New("len(nounce) != crypto_box_macbytes()") + return nil, err + } + + pkbuff := NewBuffer(pk) + defer pkbuff.Free() + skbuff := NewBuffer(sk) + defer skbuff.Free() + nouncebuff := NewBuffer(nounce) + defer nouncebuff.Free() + + resultbuff := malloc(msgbuff.size + nouncebuff.size) + defer resultbuff.Free() + res := C.crypto_box_easy(resultbuff.uchar(), msgbuff.uchar(), C.ulonglong(msgbuff.size), nouncebuff.uchar(), pkbuff.uchar(), skbuff.uchar()) + if res != 0 { + err := errors.New("crypto_box_easy failed") + return nil, err + } + return resultbuff.Bytes(), nil +} + +// open an encrypted box +func CryptoBoxOpen(box, nounce, sk, pk []byte) ([]byte, error) { + boxbuff := NewBuffer(box) + defer boxbuff.Free() + + // check sizes + if len(pk) != int(C.crypto_box_publickeybytes()) { + err := errors.New("len(pk) != crypto_box_publickey_bytes") + return nil, err + } + if len(sk) != int(C.crypto_box_secretkeybytes()) { + err := errors.New("len(sk) != crypto_box_secretkey_bytes") + return nil, err + } + if len(nounce) != int(C.crypto_box_macbytes()) { + err := errors.New("len(nounce) != crypto_box_macbytes()") + return nil, err + } + + pkbuff := NewBuffer(pk) + defer pkbuff.Free() + skbuff := NewBuffer(sk) + defer skbuff.Free() + nouncebuff := NewBuffer(nounce) + defer nouncebuff.Free() + resultbuff := malloc(boxbuff.size - nouncebuff.size) + defer resultbuff.Free() + + // decrypt + res := C.crypto_box_open_easy(resultbuff.uchar(), boxbuff.uchar(), C.ulonglong(boxbuff.size), nouncebuff.uchar(), pkbuff.uchar(), skbuff.uchar()) + if res != 0 { + return nil, errors.New("crypto_box_open_easy failed") + } + // return result + return resultbuff.Bytes(), nil +} + +// generate a new nounce +func NewBoxNounce() []byte { + return RandBytes(NounceLen()) +} + +// length of a nounce +func NounceLen() int { + return int(C.crypto_box_macbytes()) +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/buffer.go b/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/buffer.go new file mode 100644 index 0000000..64b0a1c --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/buffer.go @@ -0,0 +1,86 @@ +package nacl + +// #cgo freebsd CFLAGS: -I/usr/local/include +// #cgo freebsd LDFLAGS: -L/usr/local/lib +// #cgo LDFLAGS: -lsodium +// #include +// +// unsigned char * deref_uchar(void * ptr) { return (unsigned char*) ptr; } +// +import "C" + +import ( + "encoding/hex" + "reflect" + "unsafe" +) + +// wrapper arround malloc/free +type Buffer struct { + ptr unsafe.Pointer + length C.int + size C.size_t +} + +// wrapper arround nacl.malloc +func Malloc(size int) *Buffer { + if size > 0 { + return malloc(C.size_t(size)) + } + return nil +} + +// does not check for negatives +func malloc(size C.size_t) *Buffer { + ptr := C.malloc(size) + C.sodium_memzero(ptr, size) + buffer := &Buffer{ptr: ptr, size: size, length: C.int(size)} + return buffer +} + +// create a new buffer copying from a byteslice +func NewBuffer(buff []byte) *Buffer { + buffer := Malloc(len(buff)) + if buffer == nil { + return nil + } + if copy(buffer.Data(), buff) != len(buff) { + return nil + } + return buffer +} + +func (self *Buffer) uchar() *C.uchar { + return C.deref_uchar(self.ptr) +} + +func (self *Buffer) Length() int { + return int(self.length) +} + +// get immutable byte slice +func (self *Buffer) Bytes() []byte { + buff := make([]byte, self.Length()) + copy(buff, self.Data()) + return buff +} + +// get underlying byte slice +func (self *Buffer) Data() []byte { + hdr := reflect.SliceHeader{ + Data: uintptr(self.ptr), + Len: self.Length(), + Cap: self.Length(), + } + return *(*[]byte)(unsafe.Pointer(&hdr)) +} + +func (self *Buffer) String() string { + return hex.EncodeToString(self.Data()) +} + +// zero out memory and then free +func (self *Buffer) Free() { + C.sodium_memzero(self.ptr, self.size) + C.free(self.ptr) +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/key.go b/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/key.go new file mode 100644 index 0000000..6c03265 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/key.go @@ -0,0 +1,178 @@ +package nacl + +// #cgo freebsd CFLAGS: -I/usr/local/include +// #cgo freebsd LDFLAGS: -L/usr/local/lib +// #cgo LDFLAGS: -lsodium +// #include +import "C" + +import ( + "encoding/hex" + "errors" + "fmt" +) + +type KeyPair struct { + pk *Buffer + sk *Buffer +} + +// free this keypair from memory +func (self *KeyPair) Free() { + self.pk.Free() + self.sk.Free() +} + +func (self *KeyPair) Secret() []byte { + return self.sk.Bytes() +} + +func (self *KeyPair) Public() []byte { + return self.pk.Bytes() +} + +func (self *KeyPair) Seed() []byte { + seed_len := C.crypto_sign_seedbytes() + return self.sk.Bytes()[:seed_len] +} + +// generate a keypair +func GenSignKeypair() *KeyPair { + sk_len := C.crypto_sign_secretkeybytes() + sk := malloc(sk_len) + pk_len := C.crypto_sign_publickeybytes() + pk := malloc(pk_len) + res := C.crypto_sign_keypair(pk.uchar(), sk.uchar()) + if res == 0 { + return &KeyPair{pk, sk} + } + pk.Free() + sk.Free() + return nil +} + +// get public key from secret key +func GetSignPubkey(sk []byte) ([]byte, error) { + sk_len := C.crypto_sign_secretkeybytes() + if C.size_t(len(sk)) != sk_len { + return nil, errors.New(fmt.Sprintf("nacl.GetSignPubkey() invalid secret key size %d != %d", len(sk), sk_len)) + } + + pk_len := C.crypto_sign_publickeybytes() + pkbuff := malloc(pk_len) + defer pkbuff.Free() + + skbuff := NewBuffer(sk) + defer skbuff.Free() + //XXX: hack + res := C.crypto_sign_seed_keypair(pkbuff.uchar(), skbuff.uchar(), skbuff.uchar()) + + if res != 0 { + return nil, errors.New(fmt.Sprintf("nacl.GetSignPubkey() failed to get public key from secret key: %d", res)) + } + + return pkbuff.Bytes(), nil +} + +// make keypair from seed +func LoadSignKey(seed []byte) *KeyPair { + seed_len := C.crypto_sign_seedbytes() + if C.size_t(len(seed)) != seed_len { + return nil + } + seedbuff := NewBuffer(seed) + defer seedbuff.Free() + pk_len := C.crypto_sign_publickeybytes() + sk_len := C.crypto_sign_secretkeybytes() + pkbuff := malloc(pk_len) + skbuff := malloc(sk_len) + res := C.crypto_sign_seed_keypair(pkbuff.uchar(), skbuff.uchar(), seedbuff.uchar()) + if res != 0 { + pkbuff.Free() + skbuff.Free() + return nil + } + return &KeyPair{pkbuff, skbuff} +} + +func GenBoxKeypair() *KeyPair { + sk_len := C.crypto_box_secretkeybytes() + sk := malloc(sk_len) + pk_len := C.crypto_box_publickeybytes() + pk := malloc(pk_len) + res := C.crypto_box_keypair(pk.uchar(), sk.uchar()) + if res == 0 { + return &KeyPair{pk, sk} + } + pk.Free() + sk.Free() + return nil +} + +// get public key from secret key +func GetBoxPubkey(sk []byte) []byte { + sk_len := C.crypto_box_seedbytes() + if C.size_t(len(sk)) != sk_len { + return nil + } + + pk_len := C.crypto_box_publickeybytes() + pkbuff := malloc(pk_len) + defer pkbuff.Free() + + skbuff := NewBuffer(sk) + defer skbuff.Free() + + // compute the public key + C.crypto_scalarmult_base(pkbuff.uchar(), skbuff.uchar()) + + return pkbuff.Bytes() +} + +// load keypair from secret key +func LoadBoxKey(sk []byte) *KeyPair { + pk := GetBoxPubkey(sk) + if pk == nil { + return nil + } + pkbuff := NewBuffer(pk) + skbuff := NewBuffer(sk) + return &KeyPair{pkbuff, skbuff} +} + +// make keypair from seed +func SeedBoxKey(seed []byte) *KeyPair { + seed_len := C.crypto_box_seedbytes() + if C.size_t(len(seed)) != seed_len { + return nil + } + seedbuff := NewBuffer(seed) + defer seedbuff.Free() + pk_len := C.crypto_box_publickeybytes() + sk_len := C.crypto_box_secretkeybytes() + pkbuff := malloc(pk_len) + skbuff := malloc(sk_len) + res := C.crypto_box_seed_keypair(pkbuff.uchar(), skbuff.uchar(), seedbuff.uchar()) + if res != 0 { + pkbuff.Free() + skbuff.Free() + return nil + } + return &KeyPair{pkbuff, skbuff} +} + +func (self *KeyPair) String() string { + return fmt.Sprintf("pk=%s sk=%s", hex.EncodeToString(self.pk.Data()), hex.EncodeToString(self.sk.Data())) +} + +func CryptoSignPublicLen() int { + return int(C.crypto_sign_publickeybytes()) +} + +func CryptoSignSecretLen() int { + return int(C.crypto_sign_secretkeybytes()) +} + +func CryptoSignSeedLen() int { + return int(C.crypto_sign_seedbytes()) +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/nacl.go b/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/nacl.go new file mode 100644 index 0000000..ee50d18 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/nacl.go @@ -0,0 +1,44 @@ +package nacl + +// #cgo freebsd CFLAGS: -I/usr/local/include +// #cgo freebsd LDFLAGS: -L/usr/local/lib +// #cgo LDFLAGS: -lsodium +// #include +import "C" + +import ( + "log" +) + +// return how many bytes overhead does CryptoBox have +func CryptoBoxOverhead() int { + return int(C.crypto_box_macbytes()) +} + +// size of crypto_box public keys +func CryptoBoxPubKeySize() int { + return int(C.crypto_box_publickeybytes()) +} + +// size of crypto_box private keys +func CryptoBoxPrivKeySize() int { + return int(C.crypto_box_secretkeybytes()) +} + +// size of crypto_sign public keys +func CryptoSignPubKeySize() int { + return int(C.crypto_sign_publickeybytes()) +} + +// size of crypto_sign private keys +func CryptoSignPrivKeySize() int { + return int(C.crypto_sign_secretkeybytes()) +} + +// initialize sodium +func init() { + status := C.sodium_init() + if status == -1 { + log.Fatalf("failed to initialize libsodium status=%d", status) + } +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/rand.go b/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/rand.go new file mode 100644 index 0000000..05e0884 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/rand.go @@ -0,0 +1,24 @@ +package nacl + +// #cgo freebsd CFLAGS: -I/usr/local/include +// #cgo freebsd LDFLAGS: -L/usr/local/lib +// #cgo LDFLAGS: -lsodium +// #include +import "C" + +func randbytes(size C.size_t) *Buffer { + + buff := malloc(size) + C.randombytes_buf(buff.ptr, size) + return buff + +} + +func RandBytes(size int) []byte { + if size > 0 { + buff := randbytes(C.size_t(size)) + defer buff.Free() + return buff.Bytes() + } + return nil +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/sign.go b/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/sign.go new file mode 100644 index 0000000..f80ade6 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/sign.go @@ -0,0 +1,58 @@ +package nacl + +// #cgo freebsd CFLAGS: -I/usr/local/include +// #cgo freebsd LDFLAGS: -L/usr/local/lib +// #cgo LDFLAGS: -lsodium +// #include +import "C" + +// sign data detached with secret key sk +func CryptoSignDetached(msg, sk []byte) []byte { + msgbuff := NewBuffer(msg) + defer msgbuff.Free() + skbuff := NewBuffer(sk) + defer skbuff.Free() + if skbuff.size != C.crypto_sign_bytes() { + return nil + } + + // allocate the signature buffer + sig := malloc(C.crypto_sign_bytes()) + defer sig.Free() + // compute signature + siglen := C.ulonglong(0) + res := C.crypto_sign_detached(sig.uchar(), &siglen, msgbuff.uchar(), C.ulonglong(msgbuff.size), skbuff.uchar()) + if res == 0 && siglen == C.ulonglong(C.crypto_sign_bytes()) { + // return copy of signature buffer + return sig.Bytes() + } + // failure to sign + return nil +} + +// sign data with secret key sk +// return detached sig +// this uses crypto_sign instead pf crypto_sign_detached +func CryptoSignFucky(msg, sk []byte) []byte { + msgbuff := NewBuffer(msg) + defer msgbuff.Free() + skbuff := NewBuffer(sk) + defer skbuff.Free() + if skbuff.size != C.crypto_sign_bytes() { + return nil + } + + // allocate the signed message buffer + sig := malloc(C.crypto_sign_bytes() + msgbuff.size) + defer sig.Free() + // compute signature + siglen := C.ulonglong(0) + res := C.crypto_sign(sig.uchar(), &siglen, msgbuff.uchar(), C.ulonglong(msgbuff.size), skbuff.uchar()) + if res == 0 { + // return copy of signature inside the signed message + offset := int(C.crypto_sign_bytes()) + return sig.Bytes()[:offset] + } + // failure to sign + return nil +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/stream.go b/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/stream.go new file mode 100644 index 0000000..af3b181 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/stream.go @@ -0,0 +1,342 @@ +package nacl + +import ( + "bytes" + "errors" + "io" + "net" + "time" +) + +// TOY encrypted authenticated stream protocol like tls + +var BadHandshake = errors.New("Bad handshake") +var ShortWrite = errors.New("short write") +var ShortRead = errors.New("short read") +var Closed = errors.New("socket closed") + +// write boxes at 512 bytes at a time +const DefaultMTU = 512 + +// wrapper arround crypto_box +// provides an authenticated encrypted stream +// this is a TOY +type CryptoStream struct { + // underlying stream to write on + stream io.ReadWriteCloser + // secret key seed + key *KeyPair + // public key of who we expect on the other end + remote_pk []byte + tx_nonce []byte + rx_nonce []byte + // box size + mtu int +} + +func (cs *CryptoStream) Close() (err error) { + if cs.key != nil { + cs.key.Free() + cs.key = nil + } + return cs.stream.Close() +} + +// implements io.Writer +func (cs *CryptoStream) Write(data []byte) (n int, err error) { + // let's split it up + for n < len(data) && err == nil { + if n+cs.mtu < len(data) { + err = cs.writeSegment(data[n : n+cs.mtu]) + n += cs.mtu + } else { + err = cs.writeSegment(data[n:]) + if err == nil { + n = len(data) + } + } + } + return +} + +func (cs *CryptoStream) public() (p []byte) { + p = cs.key.Public() + return +} + +func (cs *CryptoStream) secret() (s []byte) { + s = cs.key.Secret() + return +} + +// read 1 segment +func (cs *CryptoStream) readSegment() (s []byte, err error) { + var stream_read int + var seg []byte + nl := NounceLen() + msg := make([]byte, cs.mtu+nl) + stream_read, err = cs.stream.Read(msg) + seg, err = CryptoBoxOpen(msg[:stream_read], cs.rx_nonce, cs.secret(), cs.remote_pk) + if err == nil { + copy(cs.rx_nonce, seg[:nl]) + s = seg[nl:] + } + return +} + +// write 1 segment encrypted +// update nounce +func (cs *CryptoStream) writeSegment(data []byte) (err error) { + var segment []byte + nl := NounceLen() + msg := make([]byte, len(data)+nl) + // generate next nounce + nextNounce := NewBoxNounce() + copy(msg, nextNounce) + copy(msg[nl:], data) + // encrypt segment with current nounce + segment, err = CryptoBox(data, cs.tx_nonce, cs.remote_pk, cs.secret()) + var n int + n, err = cs.stream.Write(segment) + if n != len(segment) { + // short write? + err = ShortWrite + return + } + // update nounce + copy(cs.tx_nonce, nextNounce) + return +} + +// implements io.Reader +func (cs *CryptoStream) Read(data []byte) (n int, err error) { + var seg []byte + seg, err = cs.readSegment() + if err == nil { + if len(seg) <= len(data) { + copy(data, seg) + n = len(seg) + } else { + // too big? + err = ShortRead + } + } + return +} + +// version 0 protocol magic +var protocol_magic = []byte("BENIS|00") + +// verify that a handshake is signed right and is in the correct format etc +func verifyHandshake(hs, pk []byte) (valid bool) { + ml := len(protocol_magic) + // valid handshake? + if bytes.Equal(hs[0:ml], protocol_magic) { + // check pk + pl := CryptoSignPublicLen() + nl := NounceLen() + if bytes.Equal(pk, hs[ml:ml+pl]) { + // check signature + msg := hs[0 : ml+pl+nl] + sig := hs[ml+pl+nl:] + valid = CryptoVerifyFucky(msg, sig, pk) + } + } + return +} + +// get claimed public key from handshake +func getPubkey(hs []byte) (pk []byte) { + ml := len(protocol_magic) + pl := CryptoSignPublicLen() + pk = hs[ml : ml+pl] + return +} + +func (cs *CryptoStream) genHandshake() (d []byte) { + // protocol magic string version 00 + // Benis Encrypted Network Information Stream + // :-DDDDD meme crypto + d = append(d, protocol_magic...) + // our public key + d = append(d, cs.public()...) + // nounce + cs.tx_nonce = NewBoxNounce() + d = append(d, cs.tx_nonce...) + // sign protocol magic string, nounce and pubkey + sig := CryptoSignFucky(d, cs.secret()) + // if sig is nil we'll just die + d = append(d, sig...) + return +} + +// extract nounce from handshake +func getNounce(hs []byte) (n []byte) { + ml := len(protocol_magic) + pl := CryptoSignPublicLen() + nl := NounceLen() + n = hs[ml+pl : ml+pl+nl] + return +} + +// initiate protocol handshake +func (cs *CryptoStream) Handshake() (err error) { + // send them our info + hs := cs.genHandshake() + var n int + n, err = cs.stream.Write(hs) + if n != len(hs) { + err = ShortWrite + return + } + // read thier info + buff := make([]byte, len(hs)) + _, err = io.ReadFull(cs.stream, buff) + + if cs.remote_pk == nil { + // inbound + pk := getPubkey(buff) + cs.remote_pk = make([]byte, len(pk)) + copy(cs.remote_pk, pk) + } + + if !verifyHandshake(buff, cs.remote_pk) { + // verification failed + err = BadHandshake + return + } + cs.rx_nonce = make([]byte, NounceLen()) + copy(cs.rx_nonce, getNounce(buff)) + return +} + +// create a client +func Client(stream io.ReadWriteCloser, local_sk, remote_pk []byte) (c *CryptoStream) { + c = &CryptoStream{ + stream: stream, + mtu: DefaultMTU, + } + c.remote_pk = make([]byte, len(remote_pk)) + copy(c.remote_pk, remote_pk) + c.key = LoadSignKey(local_sk) + if c.key == nil { + return nil + } + return c +} + +type CryptoConn struct { + stream *CryptoStream + conn net.Conn +} + +func (cc *CryptoConn) Close() (err error) { + err = cc.stream.Close() + return +} + +func (cc *CryptoConn) Write(d []byte) (n int, err error) { + return cc.stream.Write(d) +} + +func (cc *CryptoConn) Read(d []byte) (n int, err error) { + return cc.stream.Read(d) +} + +func (cc *CryptoConn) LocalAddr() net.Addr { + return cc.conn.LocalAddr() +} + +func (cc *CryptoConn) RemoteAddr() net.Addr { + return cc.conn.RemoteAddr() +} + +func (cc *CryptoConn) SetDeadline(t time.Time) (err error) { + return cc.conn.SetDeadline(t) +} + +func (cc *CryptoConn) SetReadDeadline(t time.Time) (err error) { + return cc.conn.SetReadDeadline(t) +} + +func (cc *CryptoConn) SetWriteDeadline(t time.Time) (err error) { + return cc.conn.SetWriteDeadline(t) +} + +type CryptoListener struct { + l net.Listener + handshake chan net.Conn + accepted chan *CryptoConn + trust func(pk []byte) bool + key *KeyPair +} + +func (cl *CryptoListener) Close() (err error) { + err = cl.l.Close() + close(cl.accepted) + close(cl.handshake) + cl.key.Free() + cl.key = nil + return +} + +func (cl *CryptoListener) acceptInbound() { + for { + c, err := cl.l.Accept() + if err == nil { + cl.handshake <- c + } else { + return + } + } +} + +func (cl *CryptoListener) runChans() { + for { + select { + case c := <-cl.handshake: + go func() { + s := &CryptoStream{ + stream: c, + mtu: DefaultMTU, + key: cl.key, + } + err := s.Handshake() + if err == nil { + // we gud handshake was okay + if cl.trust(s.remote_pk) { + // the key is trusted okay + cl.accepted <- &CryptoConn{stream: s, conn: c} + } else { + // not trusted, close connection + s.Close() + } + } + }() + } + } +} + +// accept inbound authenticated and trusted connections +func (cl *CryptoListener) Accept() (c net.Conn, err error) { + var ok bool + c, ok = <-cl.accepted + if !ok { + err = Closed + } + return +} + +// create a listener +func Server(l net.Listener, local_sk []byte, trust func(pk []byte) bool) (s *CryptoListener) { + s = &CryptoListener{ + l: l, + trust: trust, + handshake: make(chan net.Conn), + accepted: make(chan *CryptoConn), + } + s.key = LoadSignKey(local_sk) + go s.runChans() + go s.acceptInbound() + return +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/verfiy.go b/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/verfiy.go new file mode 100644 index 0000000..9b86347 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl/verfiy.go @@ -0,0 +1,53 @@ +package nacl + +// #cgo freebsd CFLAGS: -I/usr/local/include +// #cgo freebsd LDFLAGS: -L/usr/local/lib +// #cgo LDFLAGS: -lsodium +// #include +import "C" + +// verify a fucky detached sig +func CryptoVerifyFucky(msg, sig, pk []byte) bool { + var smsg []byte + smsg = append(smsg, sig...) + smsg = append(smsg, msg...) + return CryptoVerify(smsg, pk) +} + +// verify a signed message +func CryptoVerify(smsg, pk []byte) bool { + smsg_buff := NewBuffer(smsg) + defer smsg_buff.Free() + pk_buff := NewBuffer(pk) + defer pk_buff.Free() + + if pk_buff.size != C.crypto_sign_publickeybytes() { + return false + } + mlen := C.ulonglong(0) + msg := malloc(C.size_t(len(smsg))) + defer msg.Free() + smlen := C.ulonglong(smsg_buff.size) + return C.crypto_sign_open(msg.uchar(), &mlen, smsg_buff.uchar(), smlen, pk_buff.uchar()) != -1 +} + +// verfiy a detached signature +// return true on valid otherwise false +func CryptoVerifyDetached(msg, sig, pk []byte) bool { + msg_buff := NewBuffer(msg) + defer msg_buff.Free() + sig_buff := NewBuffer(sig) + defer sig_buff.Free() + pk_buff := NewBuffer(pk) + defer pk_buff.Free() + + if pk_buff.size != C.crypto_sign_publickeybytes() { + return false + } + + // invalid sig size + if sig_buff.size != C.crypto_sign_bytes() { + return false + } + return C.crypto_sign_verify_detached(sig_buff.uchar(), msg_buff.uchar(), C.ulonglong(len(msg)), pk_buff.uchar()) == 0 +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl_test.go b/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl_test.go new file mode 100644 index 0000000..6028654 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/crypto/nacl_test.go @@ -0,0 +1,34 @@ +package crypto + +import ( + "bytes" + "crypto/rand" + "io" + "testing" +) + +func TestNaclToPublic(t *testing.T) { + pk, sk := GenKeypair() + t_pk := ToPublic(sk) + if !bytes.Equal(pk, t_pk) { + t.Logf("%q != %q", pk, t_pk) + t.Fail() + } +} + +func TestNaclSignVerify(t *testing.T) { + var msg [1024]byte + pk, sk := GenKeypair() + io.ReadFull(rand.Reader, msg[:]) + + signer := CreateSigner(sk) + signer.Write(msg[:]) + sig := signer.Sign() + + verifier := CreateVerifier(pk) + verifier.Write(msg[:]) + if !verifier.Verify(sig) { + t.Logf("%q is invalid signature and is %dB long", sig, len(sig)) + t.Fail() + } +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/crypto/rand.go b/contrib/backends/srndv2/src/nntpchan/lib/crypto/rand.go new file mode 100644 index 0000000..2c854c5 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/crypto/rand.go @@ -0,0 +1,8 @@ +package crypto + +import ( + "nntpchan/lib/crypto/nacl" +) + +// generate random bytes +var RandBytes = nacl.RandBytes diff --git a/contrib/backends/srndv2/src/nntpchan/lib/crypto/sig.go b/contrib/backends/srndv2/src/nntpchan/lib/crypto/sig.go new file mode 100644 index 0000000..4fc3178 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/crypto/sig.go @@ -0,0 +1,25 @@ +package crypto + +import "io" + +// a detached signature +type Signature []byte + +type SigEncoder interface { + // encode a signature to an io.Writer + // return error if one occurrened while writing out signature + Encode(sig Signature, w io.Writer) error + // encode a signature to a string + EncodeString(sig Signature) string +} + +// a decoder of signatures +type SigDecoder interface { + // decode signature from io.Reader + // reads all data until io.EOF + // returns singaure or error if an error occured while reading + Decode(r io.Reader) (Signature, error) + // decode a signature from string + // returns signature or error if an error ocurred while decoding + DecodeString(str string) (Signature, error) +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/crypto/sign.go b/contrib/backends/srndv2/src/nntpchan/lib/crypto/sign.go new file mode 100644 index 0000000..2f00d62 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/crypto/sign.go @@ -0,0 +1,14 @@ +package crypto + +import "io" + +// +// provides generic signing interface for producing detached signatures +// call Write() to feed data to be signed, call Sign() to generate +// a detached signature +// +type Signer interface { + io.Writer + // generate detached Signature from previously fed body via Write() + Sign() Signature +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/crypto/verify.go b/contrib/backends/srndv2/src/nntpchan/lib/crypto/verify.go new file mode 100644 index 0000000..90b941b --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/crypto/verify.go @@ -0,0 +1,14 @@ +package crypto + +import "io" + +// provides generic signature +// call Write() to feed in message body +// once the entire body has been fed in via Write() call Verify() with detached +// signature to verify the detached signature against the previously fed body +type Verifer interface { + io.Writer + // verify detached signature from body previously fed via Write() + // return true if the detached signature is valid given the body + Verify(sig Signature) bool +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/database/database.go b/contrib/backends/srndv2/src/nntpchan/lib/database/database.go new file mode 100644 index 0000000..793176b --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/database/database.go @@ -0,0 +1,258 @@ +package database + +import ( + "errors" + "nntpchan/lib/config" + "nntpchan/lib/model" + "net" + "strings" +) + +// generic database driver +type DB interface { + // finalize all transactions and close connection + // after calling this db driver can no longer be used + Close() + // ensire database is well formed + Ensure() error + // do we have a newsgroup locally? + HasNewsgroup(group string) (bool, error) + // have we seen an article with message-id before? + SeenArticle(message_id string) (bool, error) + // do we have an article locally given message-id? + HasArticle(message_id string) (bool, error) + // register a newsgroup with database + RegisterNewsgroup(group string) error + // register an article + RegisterArticle(a *model.Article) error + // get all articles in a newsgroup + // send entries down a channel + // return error if one happens while fetching + GetAllArticlesInGroup(group string, send chan model.ArticleEntry) error + // count all the articles in a newsgroup + CountAllArticlesInGroup(group string) (int64, error) + // get all articles locally known + GetAllArticles() ([]model.ArticleEntry, error) + + // check if a newsgroup is banned + NewsgroupBanned(group string) (bool, error) + + // ban newsgroup + BanNewsgroup(group string) error + // unban newsgroup + UnbanNewsgroup(group string) error + + // return true if this is root post has expired + IsExpired(root_message_id string) (bool, error) + + // get an article's MessageID given the hash of the MessageID + // return an article entry or nil when it doesn't exist + and error if it happened + GetMessageIDByHash(hash string) (model.ArticleEntry, error) + + // get root message_id, newsgroup, pageno for a post regardless if it's rootpost or not + GetInfoForMessage(msgid string) (string, string, int64, error) + + // what page is the thread with this root post on? + // return newsgroup, pageno + GetPageForRootMessage(root_message_id string) (string, int64, error) + + // record that a message given a message id was posted signed by this pubkey + RegisterSigned(message_id, pubkey string) error + + // get the number of articles we have in all groups + ArticleCount() (int64, error) + + // return true if a thread with given root post with message-id has any replies + ThreadHasReplies(root_message_id string) (bool, error) + + // get the number of posts in a certain newsgroup since N seconds ago + // if N <= 0 then count all we have now + CountPostsInGroup(group string, time_frame int64) (int64, error) + + // get all replies' message-id to a thread + // if last > 0 then get that many of the last replies + // start at reply number start + GetThreadReplies(root_message_id string, start, last int) ([]string, error) + + // get a thread model given root post's message id + GetThread(root_message_id string) (model.Thread, error) + // get a thread model given root post hash + GetThreadByHash(hash string) (model.Thread, error) + + // count the number of replies to this thread + CountThreadReplies(root_message_id string) (int64, error) + + // get all attachments for a message given its message-id + GetPostAttachments(message_id string) ([]*model.Attachment, error) + + // return true if this newsgroup has posts + GroupHasPosts(newsgroup string) (bool, error) + + // get all active threads on a board + // send each thread's ArticleEntry down a channel + // return error if one happens while fetching + GetGroupThreads(newsgroup string, send chan model.ArticleEntry) error + + // get every message id for root posts that need to be expired in a newsgroup + // threadcount is the upperbound limit to how many root posts we keep + GetRootPostsForExpiration(newsgroup string, threadcount int) ([]string, error) + + // get the number of pages a board has + GetGroupPageCount(newsgroup string) (int64, error) + + // get board page number N + // fully loads all models + GetGroupForPage(newsgroup string, pageno, perpage int) (*model.BoardPage, error) + + // get the threads for ukko page + GetUkkoThreads(threadcount int) ([]*model.Thread, error) + + // get a post model for a single post + GetPost(messageID string) (*model.Post, error) + + // add a public key to the database + AddModPubkey(pubkey string) error + + // mark that a mod with this pubkey can act on all boards + MarkModPubkeyGlobal(pubkey string) error + + // revoke mod with this pubkey the privilege of being able to act on all boards + UnMarkModPubkeyGlobal(pubkey string) error + + // check if this mod pubkey can moderate at a global level + CheckModPubkeyGlobal(pubkey string) bool + + // check if a mod with this pubkey has permission to moderate at all + CheckModPubkey(pubkey string) (bool, error) + + // check if a mod with this pubkey can moderate on the given newsgroup + CheckModPubkeyCanModGroup(pubkey, newsgroup string) (bool, error) + + // add a pubkey to be able to mod a newsgroup + MarkModPubkeyCanModGroup(pubkey, newsgroup string) error + + // remote a pubkey to they can't mod a newsgroup + UnMarkModPubkeyCanModGroup(pubkey, newsgroup string) error + + // ban an article + BanArticle(messageID, reason string) error + + // check if an article is banned or not + ArticleBanned(messageID string) (bool, error) + + // Get ip address given the encrypted version + // return emtpy string if we don't have it + GetIPAddress(encAddr string) (string, error) + + // check if an ip is banned from our local + CheckIPBanned(addr string) (bool, error) + + // check if an encrypted ip is banned from our local + CheckEncIPBanned(encAddr string) (bool, error) + + // ban an ip address from the local + BanAddr(addr string) error + + // unban an ip address from the local + UnbanAddr(addr string) error + + // ban an encrypted ip address from the remote + BanEncAddr(encAddr string) error + + // return the encrypted version of an IPAddress + // if it's not already there insert it into the database + GetEncAddress(addr string) (string, error) + + // get the decryption key for an encrypted address + // return empty string if we don't have it + GetEncKey(encAddr string) (string, error) + + // delete an article from the database + // if the article is a root post then all replies are also deleted + DeleteArticle(msg_id string) error + + // forget that we tracked an article given the messgae-id + ForgetArticle(msg_id string) error + + // get threads per page for a newsgroup + GetThreadsPerPage(group string) (int, error) + + // get pages per board for a newsgroup + GetPagesPerBoard(group string) (int, error) + + // get every newsgroup we current carry + GetAllNewsgroups() ([]string, error) + + // get the numerical id of the last , first article for a given group + GetLastAndFirstForGroup(group string) (int64, int64, error) + + // get a message id give a newsgroup and the nntp id + GetMessageIDForNNTPID(group string, id int64) (string, error) + + // get nntp id for a given message-id + GetNNTPIDForMessageID(group, msgid string) (int64, error) + + // get the last N days post count in decending order + GetLastDaysPosts(n int64) ([]model.PostEntry, error) + + // get the last N days post count in decending order + GetLastDaysPostsForGroup(newsgroup string, n int64) ([]model.PostEntry, error) + + // get post history per month since beginning of time + GetMonthlyPostHistory() ([]model.PostEntry, error) + + // check if an nntp login cred is correct + CheckNNTPLogin(username, passwd string) (bool, error) + + // add an nntp login credential + AddNNTPLogin(username, passwd string) error + + // remove an nntp login credential + RemoveNNTPLogin(username string) error + + // check if an nntp login credential given a user exists + CheckNNTPUserExists(username string) (bool, error) + + // get the message ids of an article that has this header with the given value + GetMessageIDByHeader(name, value string) ([]string, error) + + // get the header for a message given its message-id + GetHeadersForMessage(msgid string) (model.ArticleHeader, error) + + // get all message-ids posted by posters in this cidr + GetMessageIDByCIDR(cidr *net.IPNet) ([]string, error) + + // get all message-ids posted by poster with encrypted ip + GetMessageIDByEncryptedIP(encaddr string) ([]string, error) +} + +// type for webhooks db backend +type WebhookDB interface { + + // mark article sent + MarkMessageSent(msgid, feedname string) error + + // check if an article was sent + CheckMessageSent(msgid, feedname string) (bool, error) +} + +// get new database connector from configuration +func NewDBFromConfig(c *config.DatabaseConfig) (db DB, err error) { + dbtype := strings.ToLower(c.Type) + if dbtype == "postgres" { + err = errors.New("postgres not supported") + } else { + err = errors.New("no such database driver: " + c.Type) + } + return +} + +func NewWebhooksDBFromConfig(c *config.DatabaseConfig) (db WebhookDB, err error) { + dbtype := strings.ToLower(c.Type) + if dbtype == "postgres" { + err = errors.New("postgres not supported") + } else { + err = errors.New("no such database driver: " + c.Type) + } + return +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/database/doc.go b/contrib/backends/srndv2/src/nntpchan/lib/database/doc.go new file mode 100644 index 0000000..bfd8b14 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/database/doc.go @@ -0,0 +1,4 @@ +// +// database driver +// +package database diff --git a/contrib/backends/srndv2/src/nntpchan/lib/database/postgres.go b/contrib/backends/srndv2/src/nntpchan/lib/database/postgres.go new file mode 100644 index 0000000..636bab8 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/database/postgres.go @@ -0,0 +1 @@ +package database diff --git a/contrib/backends/srndv2/src/nntpchan/lib/frontend/captcha.go b/contrib/backends/srndv2/src/nntpchan/lib/frontend/captcha.go new file mode 100644 index 0000000..6f5ede7 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/frontend/captcha.go @@ -0,0 +1,123 @@ +package frontend + +import ( + "encoding/json" + "errors" + "fmt" + "github.com/dchest/captcha" + "github.com/gorilla/mux" + "github.com/gorilla/sessions" + "github.com/majestrate/srndv2/lib/config" + "net/http" +) + +// server of captchas +// implements frontend.Middleware +type CaptchaServer struct { + h int + w int + store *sessions.CookieStore + prefix string + sessionName string +} + +// create new captcha server using existing session store +func NewCaptchaServer(w, h int, prefix string, store *sessions.CookieStore) *CaptchaServer { + return &CaptchaServer{ + h: h, + w: w, + prefix: prefix, + store: store, + sessionName: "captcha", + } +} + +func (cs *CaptchaServer) Reload(c *config.MiddlewareConfig) { + +} + +func (cs *CaptchaServer) SetupRoutes(m *mux.Router) { + m.Path("/new").HandlerFunc(cs.NewCaptcha) + m.Path("/img/{f}").Handler(captcha.Server(cs.w, cs.h)) + m.Path("/verify.json").HandlerFunc(cs.VerifyCaptcha) +} + +// return true if this session has solved the last captcha given provided solution, otherwise false +func (cs *CaptchaServer) CheckSession(w http.ResponseWriter, r *http.Request, solution string) (bool, error) { + s, err := cs.store.Get(r, cs.sessionName) + if err == nil { + id, ok := s.Values["captcha_id"] + if ok { + return captcha.VerifyString(id.(string), solution), nil + } + } + return false, err +} + +// verify a captcha +func (cs *CaptchaServer) VerifyCaptcha(w http.ResponseWriter, r *http.Request) { + dec := json.NewDecoder(r.Body) + defer r.Body.Close() + // request + req := make(map[string]string) + // response + resp := make(map[string]interface{}) + resp["solved"] = false + // decode request + err := dec.Decode(req) + if err == nil { + // decode okay + id, ok := req["id"] + if ok { + // we have id + solution, ok := req["solution"] + if ok { + // we have solution and id + resp["solved"] = captcha.VerifyString(id, solution) + } else { + // we don't have solution + err = errors.New("no captcha solution provided") + } + } else { + // we don't have id + err = errors.New("no captcha id provided") + } + } + if err != nil { + // error happened + resp["error"] = err.Error() + } + // send reply + w.Header().Set("Content-Type", "text/json; encoding=UTF-8") + enc := json.NewEncoder(w) + enc.Encode(resp) +} + +// generate a new captcha +func (cs *CaptchaServer) NewCaptcha(w http.ResponseWriter, r *http.Request) { + // obtain session + sess, err := cs.store.Get(r, cs.sessionName) + if err != nil { + // failed to obtain session + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + // new captcha + id := captcha.New() + // do we want to interpret as json? + use_json := r.URL.Query().Get("t") == "json" + // image url + url := fmt.Sprintf("%simg/%s.png", cs.prefix, id) + if use_json { + // send json + enc := json.NewEncoder(w) + enc.Encode(map[string]string{"id": id, "url": url}) + } else { + // set captcha id + sess.Values["captcha_id"] = id + // save session + sess.Save(r, w) + // rediect to image + http.Redirect(w, r, url, http.StatusFound) + } +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/frontend/doc.go b/contrib/backends/srndv2/src/nntpchan/lib/frontend/doc.go new file mode 100644 index 0000000..72b3f40 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/frontend/doc.go @@ -0,0 +1,5 @@ +// +// nntpchan frontend +// allows posting to nntpchan network via various implementations +// +package frontend diff --git a/contrib/backends/srndv2/src/nntpchan/lib/frontend/frontend.go b/contrib/backends/srndv2/src/nntpchan/lib/frontend/frontend.go new file mode 100644 index 0000000..73d758d --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/frontend/frontend.go @@ -0,0 +1,56 @@ +package frontend + +import ( + "github.com/majestrate/srndv2/lib/cache" + "github.com/majestrate/srndv2/lib/config" + "github.com/majestrate/srndv2/lib/database" + "github.com/majestrate/srndv2/lib/model" + "github.com/majestrate/srndv2/lib/nntp" + + "net" +) + +// a frontend that displays nntp posts and allows posting +type Frontend interface { + + // run mainloop using net.Listener + Serve(l net.Listener) error + + // do we accept this inbound post? + AllowPost(p model.PostReference) bool + + // trigger a manual regen of indexes for a root post + Regen(p model.PostReference) + + // implements nntp.EventHooks + GotArticle(msgid nntp.MessageID, group nntp.Newsgroup) + + // implements nntp.EventHooks + SentArticleVia(msgid nntp.MessageID, feedname string) + + // reload config + Reload(c *config.FrontendConfig) +} + +// create a new http frontend give frontend config +func NewHTTPFrontend(c *config.FrontendConfig, db database.DB) (f Frontend, err error) { + + var markupCache cache.CacheInterface + + markupCache, err = cache.FromConfig(c.Cache) + if err != nil { + return + } + + var mid Middleware + if c.Middleware != nil { + // middleware configured + mid, err = OverchanMiddleware(c.Middleware, markupCache, db) + } + + if err == nil { + // create http frontend only if no previous errors + f, err = createHttpFrontend(c, mid, db) + } + return +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/frontend/http.go b/contrib/backends/srndv2/src/nntpchan/lib/frontend/http.go new file mode 100644 index 0000000..98c65ee --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/frontend/http.go @@ -0,0 +1,131 @@ +package frontend + +import ( + "fmt" + log "github.com/Sirupsen/logrus" + "github.com/gorilla/mux" + "github.com/majestrate/srndv2/lib/admin" + "github.com/majestrate/srndv2/lib/api" + "github.com/majestrate/srndv2/lib/cache" + "github.com/majestrate/srndv2/lib/config" + "github.com/majestrate/srndv2/lib/database" + "github.com/majestrate/srndv2/lib/model" + "github.com/majestrate/srndv2/lib/nntp" + "net" + "net/http" +) + +// http frontend server +// provides glue layer between nntp and middleware +type httpFrontend struct { + // http mux + httpmux *mux.Router + // admin panel + adminPanel *admin.Server + // static files path + staticDir string + // http middleware + middleware Middleware + // api server + apiserve *api.Server + // database driver + db database.DB +} + +// reload http frontend +// reloads middleware +func (f *httpFrontend) Reload(c *config.FrontendConfig) { + if f.middleware == nil { + if c.Middleware != nil { + markupcache, err := cache.FromConfig(c.Cache) + if err == nil { + // no middleware set, create middleware + f.middleware, err = OverchanMiddleware(c.Middleware, markupcache, f.db) + if err != nil { + log.Errorf("overchan middleware reload failed: %s", err.Error()) + } + } else { + // error creating cache + log.Errorf("failed to create cache: %s", err.Error()) + } + } + } else { + // middleware exists + // do middleware reload + f.middleware.Reload(c.Middleware) + } + +} + +// serve http requests from net.Listener +func (f *httpFrontend) Serve(l net.Listener) (err error) { + // serve http + err = http.Serve(l, f.httpmux) + return +} + +// serve robots.txt page +func (f *httpFrontend) serveRobots(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "User-Agent: *\nDisallow: /\n") +} + +func (f *httpFrontend) AllowPost(p model.PostReference) bool { + // TODO: implement + return true +} + +func (f *httpFrontend) Regen(p model.PostReference) { + // TODO: implement +} + +func (f *httpFrontend) GotArticle(msgid nntp.MessageID, group nntp.Newsgroup) { + // TODO: implement +} + +func (f *httpFrontend) SentArticleVia(msgid nntp.MessageID, feedname string) { + // TODO: implement +} + +func createHttpFrontend(c *config.FrontendConfig, mid Middleware, db database.DB) (f *httpFrontend, err error) { + f = new(httpFrontend) + // set db + // db.Ensure() called elsewhere + f.db = db + + // set up mux + f.httpmux = mux.NewRouter() + + // set up admin panel + f.adminPanel = admin.NewServer() + + // set static files dir + f.staticDir = c.Static + + // set middleware + f.middleware = mid + + // set up routes + + if f.adminPanel != nil { + // route up admin panel + f.httpmux.PathPrefix("/admin/").Handler(f.adminPanel) + } + + if f.middleware != nil { + // route up middleware + f.middleware.SetupRoutes(f.httpmux) + } + + if f.apiserve != nil { + // route up api + f.apiserve.SetupRoutes(f.httpmux.PathPrefix("/api/").Subrouter()) + } + + // route up robots.txt + f.httpmux.Path("/robots.txt").HandlerFunc(f.serveRobots) + + // route up static files + f.httpmux.PathPrefix("/static/").Handler(http.FileServer(http.Dir(f.staticDir))) + + return +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/frontend/middleware.go b/contrib/backends/srndv2/src/nntpchan/lib/frontend/middleware.go new file mode 100644 index 0000000..84788f2 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/frontend/middleware.go @@ -0,0 +1,14 @@ +package frontend + +import ( + "github.com/gorilla/mux" + "github.com/majestrate/srndv2/lib/config" +) + +// http middleware +type Middleware interface { + // set up routes + SetupRoutes(m *mux.Router) + // reload with new configuration + Reload(c *config.MiddlewareConfig) +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/frontend/overchan.go b/contrib/backends/srndv2/src/nntpchan/lib/frontend/overchan.go new file mode 100644 index 0000000..bda3bc7 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/frontend/overchan.go @@ -0,0 +1,143 @@ +package frontend + +import ( + log "github.com/Sirupsen/logrus" + "github.com/gorilla/mux" + "github.com/gorilla/sessions" + "github.com/majestrate/srndv2/lib/cache" + "github.com/majestrate/srndv2/lib/config" + "github.com/majestrate/srndv2/lib/database" + "html/template" + "io" + "net/http" + "path/filepath" + "strconv" +) + +const nntpchan_cache_key = "NNTPCHAN_CACHE::" + +func cachekey(k string) string { + return nntpchan_cache_key + k +} + +func cachekey_for_thread(threadid string) string { + return cachekey("thread-" + threadid) +} + +func cachekey_for_board(name, page string) string { + return cachekey("board-" + page + "-" + name) +} + +// standard overchan imageboard middleware +type overchanMiddleware struct { + templ *template.Template + markupCache cache.CacheInterface + captcha *CaptchaServer + store *sessions.CookieStore + db database.DB +} + +func (m *overchanMiddleware) SetupRoutes(mux *mux.Router) { + // setup front page handler + mux.Path("/").HandlerFunc(m.ServeIndex) + // setup thread handler + mux.Path("/thread/{id}/").HandlerFunc(m.ServeThread) + // setup board page handler + mux.Path("/board/{name}/").HandlerFunc(m.ServeBoardPage) + // setup posting endpoint + mux.Path("/post") + // create captcha + captchaPrefix := "/captcha/" + m.captcha = NewCaptchaServer(200, 400, captchaPrefix, m.store) + // setup captcha endpoint + m.captcha.SetupRoutes(mux.PathPrefix(captchaPrefix).Subrouter()) +} + +// reload middleware +func (m *overchanMiddleware) Reload(c *config.MiddlewareConfig) { + // reload templates + templ, err := template.ParseGlob(filepath.Join(c.Templates, "*.tmpl")) + if err == nil { + log.Infof("middleware reloaded templates") + m.templ = templ + } else { + log.Errorf("middleware reload failed: %s", err.Error()) + } +} + +func (m *overchanMiddleware) ServeBoardPage(w http.ResponseWriter, r *http.Request) { + param := mux.Vars(r) + board := param["name"] + page := r.URL.Query().Get("page") + if page == "" { + page = "0" + } + pageno, err := strconv.Atoi(page) + if err == nil { + m.serveTemplate(w, r, "board.html.tmpl", cachekey_for_board(board, page), func() (interface{}, error) { + // get object for cache miss + // TODO: hardcoded page size + return m.db.GetGroupForPage(board, pageno, 10) + }) + } else { + // 404 + http.NotFound(w, r) + } +} + +// serve cached thread +func (m *overchanMiddleware) ServeThread(w http.ResponseWriter, r *http.Request) { + param := mux.Vars(r) + thread_id := param["id"] + m.serveTemplate(w, r, "thread.html.tmpl", cachekey_for_thread(thread_id), func() (interface{}, error) { + // get object for cache miss + return m.db.GetThreadByHash(thread_id) + }) +} + +// serve index page +func (m *overchanMiddleware) ServeIndex(w http.ResponseWriter, r *http.Request) { + m.serveTemplate(w, r, "index.html.tmpl", "index", nil) +} + +// serve a template +func (m *overchanMiddleware) serveTemplate(w http.ResponseWriter, r *http.Request, tname, cacheKey string, getObj func() (interface{}, error)) { + t := m.templ.Lookup(tname) + if t == nil { + log.WithFields(log.Fields{ + "template": tname, + }).Warning("template not found") + http.NotFound(w, r) + } else { + m.markupCache.ServeCached(w, r, cacheKey, func(wr io.Writer) error { + if getObj == nil { + return t.Execute(wr, nil) + } else { + // get model object + obj, err := getObj() + if err != nil { + // error getting model + log.WithFields(log.Fields{ + "error": err, + "template": tname, + "cacheKey": cacheKey, + }).Warning("failed to refresh template") + return err + } + return t.Execute(wr, obj) + } + }) + } +} + +// create standard overchan middleware +func OverchanMiddleware(c *config.MiddlewareConfig, markupCache cache.CacheInterface, db database.DB) (m Middleware, err error) { + om := new(overchanMiddleware) + om.markupCache = markupCache + om.templ, err = template.ParseGlob(filepath.Join(c.Templates, "*.tmpl")) + om.db = db + if err == nil { + m = om + } + return +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/frontend/post.go b/contrib/backends/srndv2/src/nntpchan/lib/frontend/post.go new file mode 100644 index 0000000..d698e76 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/frontend/post.go @@ -0,0 +1 @@ +package frontend diff --git a/contrib/backends/srndv2/src/nntpchan/lib/frontend/webhooks.go b/contrib/backends/srndv2/src/nntpchan/lib/frontend/webhooks.go new file mode 100644 index 0000000..d698e76 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/frontend/webhooks.go @@ -0,0 +1 @@ +package frontend diff --git a/contrib/backends/srndv2/src/nntpchan/lib/model/article.go b/contrib/backends/srndv2/src/nntpchan/lib/model/article.go new file mode 100644 index 0000000..237f9eb --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/model/article.go @@ -0,0 +1,15 @@ +package model + +type Article struct { + Subject string + Name string + Header map[string][]string + Text string + Attachments []Attachment + MessageID string + Newsgroup string + Reference string + Path string + Posted int64 + Addr string +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/model/attachment.go b/contrib/backends/srndv2/src/nntpchan/lib/model/attachment.go new file mode 100644 index 0000000..dd43fc7 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/model/attachment.go @@ -0,0 +1,10 @@ +package model + +type Attachment struct { + Path string + Name string + Mime string + Hash string + // only filled for api + Body string +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/model/board.go b/contrib/backends/srndv2/src/nntpchan/lib/model/board.go new file mode 100644 index 0000000..ea1f829 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/model/board.go @@ -0,0 +1,4 @@ +package model + +type Board struct { +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/model/boardpage.go b/contrib/backends/srndv2/src/nntpchan/lib/model/boardpage.go new file mode 100644 index 0000000..45c3bcf --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/model/boardpage.go @@ -0,0 +1,8 @@ +package model + +type BoardPage struct { + Board string + Page int + Pages int + Threads []Thread +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/model/doc.go b/contrib/backends/srndv2/src/nntpchan/lib/model/doc.go new file mode 100644 index 0000000..8bef866 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/model/doc.go @@ -0,0 +1,2 @@ +// MVC models +package model diff --git a/contrib/backends/srndv2/src/nntpchan/lib/model/misc.go b/contrib/backends/srndv2/src/nntpchan/lib/model/misc.go new file mode 100644 index 0000000..2259e85 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/model/misc.go @@ -0,0 +1,29 @@ +package model + +import ( + "time" +) + +type ArticleHeader map[string][]string + +// a ( MessageID , newsgroup ) tuple +type ArticleEntry [2]string + +func (self ArticleEntry) Newsgroup() string { + return self[1] +} + +func (self ArticleEntry) MessageID() string { + return self[0] +} + +// a ( time point, post count ) tuple +type PostEntry [2]int64 + +func (self PostEntry) Time() time.Time { + return time.Unix(self[0], 0) +} + +func (self PostEntry) Count() int64 { + return self[1] +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/model/post.go b/contrib/backends/srndv2/src/nntpchan/lib/model/post.go new file mode 100644 index 0000000..af88dda --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/model/post.go @@ -0,0 +1,29 @@ +package model + +type Post struct { + Board string + PostName string + PostSubject string + PostMessage string + message_rendered string + Message_id string + MessagePath string + Addr string + OP bool + Posted int64 + Parent string + Sage bool + Key string + Files []*Attachment + HashLong string + HashShort string + URL string + Tripcode string + BodyMarkup string + PostMarkup string + PostPrefix string + index int +} + +// ( message-id, references, newsgroup ) +type PostReference [3]string diff --git a/contrib/backends/srndv2/src/nntpchan/lib/model/thread.go b/contrib/backends/srndv2/src/nntpchan/lib/model/thread.go new file mode 100644 index 0000000..6a6b894 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/model/thread.go @@ -0,0 +1,6 @@ +package model + +type Thread struct { + Root *Post + Replies []*Post +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/network/dial.go b/contrib/backends/srndv2/src/nntpchan/lib/network/dial.go new file mode 100644 index 0000000..07b3861 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/network/dial.go @@ -0,0 +1,37 @@ +package network + +import ( + "errors" + "nntpchan/lib/config" + "net" + "strings" +) + +// operation timed out +var ErrTimeout = errors.New("timeout") + +// the operation was reset abruptly +var ErrReset = errors.New("reset") + +// the operation was actively refused +var ErrRefused = errors.New("refused") + +// generic dialer +// dials out to a remote address +// returns a net.Conn and nil on success +// returns nil and error if an error happens while dialing +type Dialer interface { + Dial(remote string) (net.Conn, error) +} + +// create a new dialer from configuration +func NewDialer(conf *config.ProxyConfig) (d Dialer) { + d = StdDialer + if conf != nil { + proxyType := strings.ToLower(conf.Type) + if proxyType == "socks" || proxyType == "socks4a" { + d = SocksDialer(conf.Addr) + } + } + return +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/network/doc.go b/contrib/backends/srndv2/src/nntpchan/lib/network/doc.go new file mode 100644 index 0000000..ab242ea --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/network/doc.go @@ -0,0 +1,4 @@ +// +// network utilities +// +package network diff --git a/contrib/backends/srndv2/src/nntpchan/lib/network/i2p.go b/contrib/backends/srndv2/src/nntpchan/lib/network/i2p.go new file mode 100644 index 0000000..1ae2e9d --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/network/i2p.go @@ -0,0 +1 @@ +package network diff --git a/contrib/backends/srndv2/src/nntpchan/lib/network/socks.go b/contrib/backends/srndv2/src/nntpchan/lib/network/socks.go new file mode 100644 index 0000000..44b5d4c --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/network/socks.go @@ -0,0 +1,140 @@ +package network + +import ( + "errors" + log "github.com/Sirupsen/logrus" + "io" + "net" + "strconv" + "strings" +) + +type socksDialer struct { + socksAddr string + dialer Dialer +} + +// try dialing out via socks proxy +func (sd *socksDialer) Dial(remote string) (c net.Conn, err error) { + log.WithFields(log.Fields{ + "addr": remote, + "socks": sd.socksAddr, + }).Debug("dailing out to socks proxy") + c, err = sd.dialer.Dial(sd.socksAddr) + if err == nil { + // dailed out to socks proxy good + remote_addr := remote + // generate request + idx := strings.LastIndex(remote_addr, ":") + if idx == -1 { + err = errors.New("invalid address: " + remote_addr) + return + } + var port uint64 + addr := remote_addr[:idx] + port, err = strconv.ParseUint(remote_addr[idx+1:], 10, 16) + if port >= 25536 { + err = errors.New("bad proxy port") + c.Close() + c = nil + return + } else if err != nil { + c.Close() + return + } + var proxy_port uint16 + proxy_port = uint16(port) + proxy_ident := "srndproxy" + req_len := len(addr) + 1 + len(proxy_ident) + 1 + 8 + + req := make([]byte, req_len) + // pack request + req[0] = '\x04' + req[1] = '\x01' + req[2] = byte(proxy_port & 0xff00 >> 8) + req[3] = byte(proxy_port & 0x00ff) + req[7] = '\x01' + idx = 8 + + proxy_ident_b := []byte(proxy_ident) + addr_b := []byte(addr) + + var bi int + for bi = range proxy_ident_b { + req[idx] = proxy_ident_b[bi] + idx += 1 + } + idx += 1 + for bi = range addr_b { + req[idx] = addr_b[bi] + idx += 1 + } + log.WithFields(log.Fields{ + "addr": remote, + "socks": sd.socksAddr, + "req": req, + }).Debug("write socks request") + n := 0 + n, err = c.Write(req) + if err == nil && n == len(req) { + // wrote request okay + resp := make([]byte, 8) + _, err = io.ReadFull(c, resp) + if err == nil { + // got reply okay + if resp[1] == '\x5a' { + // successful socks connection + log.WithFields(log.Fields{ + "addr": remote, + "socks": sd.socksAddr, + }).Debug("socks proxy connection successful") + } else { + // unsucessful socks connect + log.WithFields(log.Fields{ + "addr": remote, + "socks": sd.socksAddr, + "code": resp[1], + }).Warn("connect via socks proxy failed") + c.Close() + c = nil + } + } else { + // error reading reply + log.WithFields(log.Fields{ + "addr": remote, + "socks": sd.socksAddr, + }).Error("failed to read socks response ", err) + c.Close() + c = nil + } + } else { + if err == nil { + err = errors.New("short write") + } + + // error writing request + log.WithFields(log.Fields{ + "addr": remote, + "socks": sd.socksAddr, + }).Error("failed to write socks request ", err) + c.Close() + c = nil + + } + } else { + // dail fail + log.WithFields(log.Fields{ + "addr": remote, + "socks": sd.socksAddr, + }).Error("Cannot connect to socks proxy ", err) + } + return +} + +// create a socks dialer that dials out via socks proxy at address +func SocksDialer(addr string) Dialer { + return &socksDialer{ + socksAddr: addr, + dialer: StdDialer, + } +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/network/std.go b/contrib/backends/srndv2/src/nntpchan/lib/network/std.go new file mode 100644 index 0000000..134d079 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/network/std.go @@ -0,0 +1,14 @@ +package network + +import ( + "net" +) + +type stdDialer struct { +} + +func (sd *stdDialer) Dial(addr string) (c net.Conn, err error) { + return net.Dial("tcp", addr) +} + +var StdDialer = &stdDialer{} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/acceptor.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/acceptor.go new file mode 100644 index 0000000..fc74fdd --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/acceptor.go @@ -0,0 +1,69 @@ +package nntp + +import ( + "nntpchan/lib/nntp/message" +) + +const ( + // accepted article + ARTICLE_ACCEPT = iota + // reject article, don't send again + ARTICLE_REJECT + // defer article, send later + ARTICLE_DEFER + // reject + ban + ARTICLE_BAN +) + +type PolicyStatus int + +const PolicyAccept = PolicyStatus(ARTICLE_ACCEPT) +const PolicyReject = PolicyStatus(ARTICLE_REJECT) +const PolicyDefer = PolicyStatus(ARTICLE_DEFER) +const PolicyBan = PolicyStatus(ARTICLE_BAN) + +func (s PolicyStatus) String() string { + switch int(s) { + case ARTICLE_ACCEPT: + return "ACCEPTED" + case ARTICLE_REJECT: + return "REJECTED" + case ARTICLE_DEFER: + return "DEFERRED" + case ARTICLE_BAN: + return "BANNED" + default: + return "[invalid policy status]" + } +} + +// is this an accept code? +func (s PolicyStatus) Accept() bool { + return s == ARTICLE_ACCEPT +} + +// is this a defer code? +func (s PolicyStatus) Defer() bool { + return s == ARTICLE_DEFER +} + +// is this a ban code +func (s PolicyStatus) Ban() bool { + return s == ARTICLE_BAN +} + +// is this a reject code? +func (s PolicyStatus) Reject() bool { + return s == ARTICLE_BAN || s == ARTICLE_REJECT +} + +// type defining a policy that determines if we want to accept/reject/defer an +// incoming article +type ArticleAcceptor interface { + // check article given an article header + CheckHeader(hdr message.Header) PolicyStatus + // check article given a message id + CheckMessageID(msgid MessageID) PolicyStatus + // get max article size in bytes + MaxArticleSize() int64 +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/auth.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/auth.go new file mode 100644 index 0000000..65d0f00 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/auth.go @@ -0,0 +1,38 @@ +package nntp + +import ( + "bufio" + "fmt" + "os" + "strings" +) + +// defines server side authentication mechanism +type ServerAuth interface { + // check plaintext login + // returns nil on success otherwise error if one occurs during authentication + // returns true if authentication was successful and an error if a network io error happens + CheckLogin(username, passwd string) (bool, error) +} + +type FlatfileAuth string + +func (fname FlatfileAuth) CheckLogin(username, passwd string) (found bool, err error) { + cred := fmt.Sprintf("%s:%s", username, passwd) + var f *os.File + f, err = os.Open(string(fname)) + if err == nil { + defer f.Close() + r := bufio.NewReader(f) + for err == nil { + var line string + line, err = r.ReadString(10) + line = strings.Trim(line, "\r\n") + if line == cred { + found = true + break + } + } + } + return +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/client.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/client.go new file mode 100644 index 0000000..6fd5420 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/client.go @@ -0,0 +1,51 @@ +package nntp + +import ( + "errors" + "nntpchan/lib/nntp/message" +) + +var ErrArticleNotFound = errors.New("article not found") +var ErrPostRejected = errors.New("post rejected") + +// an nntp client +// obtains articles from remote nntp server +type Client interface { + // obtain article by message id + // returns an article and nil if obtained + // returns nil and an error if an error occured while obtaining the article, + // error is ErrArticleNotFound if the remote server doesn't have that article + Article(msgid MessageID) (*message.Article, error) + + // check if the remote server has an article given its message-id + // return true and nil if the server has the article + // return false and nil if the server doesn't have the article + // returns false and error if an error occured while checking + Check(msgid MessageID) (bool, error) + + // check if the remote server carries a newsgroup + // return true and nil if the server carries this newsgroup + // return false and nil if the server doesn't carry this newsgroup + // returns false and error if an error occured while checking + NewsgroupExists(group Newsgroup) (bool, error) + + // return true and nil if posting is allowed + // return false and nil if posting is not allowed + // return false and error if an error occured + PostingAllowed() (bool, error) + + // post an nntp article to remote server + // returns nil on success + // returns error if an error ocurred during post + // returns ErrPostRejected if the remote server rejected our post + Post(a *message.Article) error + + // connect to remote server + // returns nil on success + // returns error if one occurs during dial or handshake + Connect(d Dialer) error + + // send quit and disconnects from server + // blocks until done + Quit() +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/codes.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/codes.go new file mode 100644 index 0000000..fb0f9e6 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/codes.go @@ -0,0 +1,206 @@ +package nntp + +// 1xx codes + +// help info follows +const RPL_Help = "100" + +// capabilities info follows +const RPL_Capabilities = "101" + +// server date time follows +const RPL_Date = "111" + +// 2xx codes + +// posting is allowed +const RPL_PostingAllowed = "200" + +// posting is not allowed +const RPL_PostingNotAllowed = "201" + +// streaming mode enabled +const RPL_PostingStreaming = "203" + +// reply to QUIT command, we will close the connection +const RPL_Quit = "205" + +// reply for GROUP and LISTGROUP commands +const RPL_Group = "211" + +// info list follows +const RPL_List = "215" + +// index follows +const RPL_Index = "218" + +// article follows +const RPL_Article = "220" + +// article headers follows +const RPL_ArticleHeaders = "221" + +// article body follows +const RPL_ArticleBody = "222" + +// selected article exists +const RPL_ArticleSelectedExists = "223" + +// overview info follows +const RPL_Overview = "224" + +// list of article heards follows +const RPL_HeadersList = "225" + +// list of new articles follows +const RPL_NewArticles = "230" + +// list of newsgroups followes +const RPL_NewsgroupList = "231" + +// article was transfered okay by IHAVE command +const RPL_TransferOkay = "235" + +// article is not found by CHECK and we want it +const RPL_StreamingAccept = "238" + +// article was transfered via TAKETHIS successfully +const RPL_StreamingTransfered = "239" + +// article was transfered by POST command successfully +const RPL_PostReceived = "240" + +// AUTHINFO SIMPLE accepted +const RPL_AuthInfoAccepted = "250" + +// authentication creds have been accepted +const RPL_AuthAccepted = "281" + +// binary content follows +const RPL_Binary = "288" + +// line sent for posting allowed +const Line_PostingAllowed = RPL_PostingAllowed + " Posting Allowed" + +// line sent for posting not allowed +const Line_PostingNotAllowed = RPL_PostingNotAllowed + " Posting Not Allowed" + +// 3xx codes + +// article is accepted via IHAVE +const RPL_TransferAccepted = "335" + +// article was accepted via POST +const RPL_PostAccepted = "340" + +// continue with authorization +const RPL_ContinueAuthorization = "350" + +// more authentication info required +const RPL_MoreAuth = "381" + +// continue with tls handshake +const RPL_TLSContinue = "382" + +// 4xx codes + +// server says servive is not avaiable on initial connection +const RPL_NotAvaiable = "400" + +// server is in the wrong mode +const RPL_WrongMode = "401" + +// generic fault prevent action from being taken +const RPL_GenericError = "403" + +// newsgroup does not exist +const RPL_NoSuchGroup = "411" + +// no newsgroup has been selected +const RPL_NoGroupSelected = "412" + +// no tin style index available +const RPL_NoIndex = "418" + +// current article number is invalid +const RPL_NoArticleNum = "420" + +// no next article in this group (NEXT) +const RPL_NoNextArticle = "421" + +// no previous article in this group (LAST) +const RPL_NoPrevArticle = "422" + +// no article in specified range +const RPL_NoArticleRange = "423" + +// no article with that message-id +const RPL_NoArticleMsgID = "430" + +// defer article asked by CHECK comamnd +const RPL_StreamingDefer = "431" + +// article is not wanted (1st stage of IHAVE) +const RPL_TransferNotWanted = "435" + +// article was not sent defer sending (either stage of IHAVE) +const RPL_TransferDefer = "436" + +// reject transfer do not retry (2nd stage IHAVE) +const RPL_TransferReject = "437" + +// reject article and don't ask again (CHECK command) +const RPL_StreamingReject = "438" + +// article transfer via streaming failed (TAKETHIS) +const RPL_StreamingFailed = "439" + +// posting not permitted (1st stage of POST command) +const RPL_PostingNotPermitted = "440" + +// posting failed (2nd stage of POST command) +const RPL_PostingFailed = "441" + +// authorization required +const RPL_AuthorizeRequired = "450" + +// authorization rejected +const RPL_AuthorizeRejected = "452" + +// command unavaibale until client has authenticated +const RPL_AuthenticateRequired = "480" + +// authentication creds rejected +const RPL_AuthenticateRejected = "482" + +// command unavailable until connection is encrypted +const RPL_EncryptionRequired = "483" + +// 5xx codes + +// got an unknown command +const RPL_UnknownCommand = "500" + +// got a command with invalid syntax +const RPL_SyntaxError = "501" + +// fatal error happened and connection will close +const RPL_GenericFatal = "502" + +// feature is not supported +const RPL_FeatureNotSupported = "503" + +// message encoding is bad +const RPL_EncodingError = "504" + +// starttls can not be done +const RPL_TLSRejected = "580" + +// line sent on invalid mode +const Line_InvalidMode = RPL_SyntaxError + " Invalid Mode Selected" + +// line sent on successful streaming +const Line_StreamingAllowed = RPL_PostingStreaming + " aw yeh streamit brah" + +// send this when we handle a QUIT command +const Line_RPLQuit = RPL_Quit + " bai" diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/commands.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/commands.go new file mode 100644 index 0000000..2d344c4 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/commands.go @@ -0,0 +1,27 @@ +package nntp + +type Command string + +func (c Command) String() string { + return string(c) +} + +// command to list newsgroups +const CMD_Newsgroups = Command("NEWSGROUPS 0 0 GMT") + +// create group command for a newsgroup +func CMD_Group(g Newsgroup) Command { + return Command("GROUP " + g.String()) +} + +const CMD_XOver = Command("XOVER 0") + +func CMD_Article(msgid MessageID) Command { + return Command("ARTICLE " + msgid.String()) +} + +func CMD_Head(msgid MessageID) Command { + return Command("HEAD " + msgid.String()) +} + +const CMD_Capabilities = Command("CAPABILITIES") diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/common.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/common.go new file mode 100644 index 0000000..adb42d7 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/common.go @@ -0,0 +1,75 @@ +package nntp + +import ( + "nntpchan/lib/crypto" + "crypto/sha1" + "fmt" + "io" + "regexp" + "strings" + "time" +) + +var exp_valid_message_id = regexp.MustCompilePOSIX(`^<[a-zA-Z0-9$.]{2,128}@[a-zA-Z0-9\-.]{2,63}>$`) + +type MessageID string + +// return true if this message id is well formed, otherwise return false +func (msgid MessageID) Valid() bool { + return exp_valid_message_id.Copy().MatchString(msgid.String()) +} + +// get message id as string +func (msgid MessageID) String() string { + return string(msgid) +} + +// compute long form hash of message id +func (msgid MessageID) LongHash() string { + return fmt.Sprintf("%x", sha1.Sum([]byte(msgid))) +} + +// compute truncated form of message id hash +func (msgid MessageID) ShortHash() string { + return strings.ToLower(msgid.LongHash()[:18]) +} + +// compute blake2 hash of message id +func (msgid MessageID) Blake2Hash() string { + h := crypto.Hash() + io.WriteString(h, msgid.String()) + return strings.ToLower(fmt.Sprintf("%x", h.Sum(nil))) +} + +// generate a new message id given name of server +func GenMessageID(name string) MessageID { + r := crypto.RandBytes(4) + t := time.Now() + return MessageID(fmt.Sprintf("<%x$%d@%s>", r, t.Unix(), name)) +} + +var exp_valid_newsgroup = regexp.MustCompilePOSIX(`^[a-zA-Z0-9.]{1,128}$`) + +// an nntp newsgroup +type Newsgroup string + +// return true if this newsgroup is well formed otherwise false +func (g Newsgroup) Valid() bool { + return exp_valid_newsgroup.Copy().MatchString(g.String()) +} + +// get newsgroup as string +func (g Newsgroup) String() string { + return string(g) +} + +// (message-id, newsgroup) tuple +type ArticleEntry [2]string + +func (e ArticleEntry) MessageID() MessageID { + return MessageID(e[0]) +} + +func (e ArticleEntry) Newsgroup() Newsgroup { + return Newsgroup(e[1]) +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/common_test.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/common_test.go new file mode 100644 index 0000000..607af9b --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/common_test.go @@ -0,0 +1,44 @@ +package nntp + +import ( + "testing" +) + +func TestGenMessageID(t *testing.T) { + msgid := GenMessageID("test.tld") + t.Logf("generated id %s", msgid) + if !msgid.Valid() { + t.Logf("invalid generated message-id %s", msgid) + t.Fail() + } + msgid = GenMessageID("<><><>") + t.Logf("generated id %s", msgid) + if msgid.Valid() { + t.Logf("generated valid message-id when it should've been invalid %s", msgid) + t.Fail() + } +} + +func TestMessageIDHash(t *testing.T) { + msgid := GenMessageID("test.tld") + lh := msgid.LongHash() + sh := msgid.ShortHash() + bh := msgid.Blake2Hash() + t.Logf("long=%s short=%s blake2=%s", lh, sh, bh) +} + +func TestValidNewsgroup(t *testing.T) { + g := Newsgroup("overchan.test") + if !g.Valid() { + t.Logf("%s is invalid?", g) + t.Fail() + } +} + +func TestInvalidNewsgroup(t *testing.T) { + g := Newsgroup("asd.asd.asd.&&&") + if g.Valid() { + t.Logf("%s should be invalid", g) + t.Fail() + } +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/conn.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/conn.go new file mode 100644 index 0000000..39cd91f --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/conn.go @@ -0,0 +1,53 @@ +package nntp + +// an nntp connection +type Conn interface { + + // negotiate an nntp session on this connection + // returns nil if we negitated successfully + // returns ErrAuthRejected if the remote server rejected any authentication + // we sent or another error if one occured while negotiating + Negotiate(stream bool) error + + // obtain connection state + GetState() *ConnState + + // retutrn true if posting is allowed + // return false if posting is not allowed + PostingAllowed() bool + + // handle inbound non-streaming connection + // call event hooks on event + ProcessInbound(hooks EventHooks) + + // does this connection want to do nntp streaming? + WantsStreaming() bool + + // what mode are we in? + // returns mode in all caps + Mode() Mode + + // initiate nntp streaming + // after calling this the caller MUST call StreamAndQuit() + // returns a channel for message ids, true if caller sends on the channel or + // returns nil and ErrStreamingNotAllowed if streaming is not allowed on this + // connection or another error if one occurs while trying to start streaming + StartStreaming() (chan ArticleEntry, error) + + // stream articles and quit when the channel obtained by StartStreaming() is + // closed, after which this nntp connection is no longer open + StreamAndQuit() + + // is this nntp connection open? + IsOpen() bool + + // send quit command and close connection + Quit() + + // download all articles in a newsgroup + // returns error if a network error occurs + DownloadGroup(g Newsgroup) error + + // get list of active newsgroups + ListNewsgroups() ([]Newsgroup, error) +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/conn_v1.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/conn_v1.go new file mode 100644 index 0000000..c6ec316 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/conn_v1.go @@ -0,0 +1,1482 @@ +package nntp + +import ( + "bufio" + "bytes" + "crypto/tls" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + log "github.com/Sirupsen/logrus" + "nntpchan/lib/config" + "nntpchan/lib/database" + "nntpchan/lib/nntp/message" + "nntpchan/lib/store" + "nntpchan/lib/util" + "io" + "mime" + "mime/multipart" + "net" + "net/textproto" + "os" + "strings" +) + +// handles 1 line of input from a connection +type lineHandlerFunc func(c *v1Conn, line string, hooks EventHooks) error + +// base nntp connection +type v1Conn struct { + // buffered connection + C *textproto.Conn + + // unexported fields ... + + // connection state (mutable) + state ConnState + // tls connection if tls is established + tlsConn *tls.Conn + // tls config for this connection, nil if we don't support tls + tlsConfig *tls.Config + // has this connection authenticated yet? + authenticated bool + // the username logged in with if it has authenticated via user/pass + username string + // underlying network socket + conn net.Conn + // server's name + serverName string + // article acceptor checks if we want articles + acceptor ArticleAcceptor + // headerIO for read/write of article header + hdrio *message.HeaderIO + // article storage + storage store.Storage + // database driver + db database.DB + // event callbacks + hooks EventHooks + // inbound connection authenticator + auth ServerAuth + // command handlers + cmds map[string]lineHandlerFunc +} + +// json representation of this connection +// format is: +// { +// "state" : (connection state object), +// "authed" : bool, +// "tls" : (tls info or null if plaintext connection) +// } +func (c *v1Conn) MarshalJSON() ([]byte, error) { + j := make(map[string]interface{}) + j["state"] = c.state + j["authed"] = c.authenticated + if c.tlsConn == nil { + j["tls"] = nil + } else { + j["tls"] = c.tlsConn.ConnectionState() + } + return json.Marshal(j) +} + +// get the current state of our connection (immutable) +func (c *v1Conn) GetState() (state *ConnState) { + return &ConnState{ + FeedName: c.state.FeedName, + ConnName: c.state.ConnName, + HostName: c.state.HostName, + Mode: c.state.Mode, + Group: c.state.Group, + Article: c.state.Article, + Policy: &FeedPolicy{ + Whitelist: c.state.Policy.Whitelist, + Blacklist: c.state.Policy.Blacklist, + AllowAnonPosts: c.state.Policy.AllowAnonPosts, + AllowAnonAttachments: c.state.Policy.AllowAnonAttachments, + AllowAttachments: c.state.Policy.AllowAttachments, + UntrustedRequiresPoW: c.state.Policy.UntrustedRequiresPoW, + }, + } +} + +func (c *v1Conn) Group() string { + return c.state.Group.String() +} + +func (c *v1Conn) IsOpen() bool { + return c.state.Open +} + +func (c *v1Conn) Mode() Mode { + return c.state.Mode +} + +// is posting allowed rignt now? +func (c *v1Conn) PostingAllowed() bool { + return c.Authed() +} + +// process incoming commands +// call event hooks as needed +func (c *v1Conn) Process(hooks EventHooks) { + var err error + var line string + for err == nil { + line, err = c.readline() + if len(line) == 0 { + // eof (proably?) + c.Close() + return + } + + uline := strings.ToUpper(line) + parts := strings.Split(uline, " ") + handler, ok := c.cmds[parts[0]] + if ok { + // we know the command + err = handler(c, line, hooks) + } else { + // we don't know the command + err = c.printfLine("%s Unknown Command: %s", RPL_UnknownCommand, line) + } + } +} + +type v1OBConn struct { + C v1Conn + supports_stream bool + streamChnl chan ArticleEntry + conf *config.FeedConfig +} + +func (c *v1OBConn) IsOpen() bool { + return c.IsOpen() +} + +func (c *v1OBConn) Mode() Mode { + return c.Mode() +} + +func (c *v1OBConn) DownloadGroup(g Newsgroup) (err error) { + err = c.C.printfLine(CMD_Group(g).String()) + if err == nil { + var line string + line, err = c.C.readline() + if strings.HasPrefix(line, RPL_NoSuchGroup) { + // group does not exist + // don't error this is not a network io error + return + } + // send XOVER + err = c.C.printfLine(CMD_XOver.String()) + if err == nil { + line, err = c.C.readline() + if err == nil { + if !strings.HasPrefix(line, RPL_Overview) { + // bad response + // not a network io error, don't error + return + } + var msgids []MessageID + // read reply + for err == nil && line != "." { + line, err = c.C.readline() + parts := strings.Split(line, "\t") + if len(parts) != 6 { + // incorrect size + continue + } + m := MessageID(parts[4]) + r := MessageID(parts[5]) + if c.C.acceptor == nil { + // no acceptor take it if store doesn't have it + if c.C.storage.HasArticle(m.String()) == store.ErrNoSuchArticle { + msgids = append(msgids, m) + } + } else { + // check if thread is banned + if c.C.acceptor.CheckMessageID(r).Ban() { + continue + } + // check if message is wanted + if c.C.acceptor.CheckMessageID(m).Accept() { + msgids = append(msgids, m) + } + } + } + var accepted []MessageID + + for _, msgid := range msgids { + + if err != nil { + return // io error + } + + if !msgid.Valid() { + // invalid message id + continue + } + // get message header + err = c.C.printfLine(CMD_Head(msgid).String()) + if err == nil { + line, err = c.C.readline() + if err == nil { + if !strings.HasPrefix(line, RPL_ArticleHeaders) { + // bad response + continue + } + // read message header + dr := c.C.C.DotReader() + var hdr message.Header + hdr, err = c.C.hdrio.ReadHeader(dr) + if err == nil { + if c.C.acceptor == nil { + accepted = append(accepted, msgid) + } else if c.C.acceptor.CheckHeader(hdr).Accept() { + accepted = append(accepted, msgid) + } + } + } + } + } + // download wanted messages + for _, msgid := range accepted { + if err != nil { + // io error + return + } + // request message + err = c.C.printfLine(CMD_Article(msgid).String()) + if err == nil { + line, err = c.C.readline() + if err == nil { + if !strings.HasPrefix(line, RPL_Article) { + // bad response + continue + } + // read article + _, err = c.C.readArticle(false, c.C.hooks) + if err == nil { + // we read it okay + } + } + } + } + } + } + } + return +} + +func (c *v1OBConn) ListNewsgroups() (groups []Newsgroup, err error) { + err = c.C.printfLine(CMD_Newsgroups.String()) + if err == nil { + var line string + line, err = c.C.readline() + if err == nil { + if !strings.HasPrefix(line, RPL_NewsgroupList) { + // bad stuff + err = errors.New("invalid reply for NEWSGROUPS command: " + line) + return + } + for err == nil && line != "." { + line, err = c.C.readline() + if err == nil { + parts := strings.Split(line, " ") + if len(parts) != 4 { + // bad format + continue + } + groups = append(groups, Newsgroup(parts[0])) + } + } + } + } + return +} + +// negioate outbound connection +func (c *v1OBConn) Negotiate(stream bool) (err error) { + var line string + // discard first line + _, err = c.C.readline() + if err == nil { + // request capabilities + err = c.C.printfLine(CMD_Capabilities.String()) + dr := c.C.C.DotReader() + var b bytes.Buffer + _, err = io.Copy(&b, dr) + if err == nil { + // try login if specified + if c.conf.Username != "" && c.conf.Password != "" { + err = c.C.printfLine("AUTHINFO USER %s", c.conf.Username) + if err != nil { + return + } + line, err = c.C.readline() + if strings.HasPrefix(line, RPL_MoreAuth) { + err = c.C.printfLine("AUTHINFO PASS %s", c.conf.Password) + if err != nil { + return + } + line, err = c.C.readline() + if err != nil { + return + } + if strings.HasPrefix(line, RPL_AuthAccepted) { + log.WithFields(log.Fields{ + "name": c.conf.Name, + "user": c.conf.Username, + }).Info("authentication accepted") + } else { + // not accepted? + err = errors.New(line) + } + } else { + // bad user? + err = errors.New(line) + } + } + if err == nil { + if stream { + // set mode stream + err = c.C.printfLine(ModeStream.String()) + if err == nil { + line, err = c.C.readline() + if err == nil && !strings.HasPrefix(line, RPL_PostingStreaming) { + err = errors.New("streaiming not allowed") + } + } + } + } + } + } + return +} + +func (c *v1OBConn) PostingAllowed() bool { + return c.C.PostingAllowed() +} + +func (c *v1OBConn) ProcessInbound(hooks EventHooks) { + +} + +func (c *v1OBConn) WantsStreaming() bool { + return c.supports_stream +} + +func (c *v1OBConn) StreamAndQuit() { + for { + e, ok := <-c.streamChnl + if ok { + // do CHECK + msgid := e.MessageID() + if !msgid.Valid() { + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "state": c.C.state, + "msgid": msgid, + }).Warn("Dropping stream event with invalid message-id") + continue + } + // send line + err := c.C.printfLine("%s %s", stream_CHECK, msgid) + if err == nil { + // read response + var line string + line, err = c.C.readline() + ev := StreamEvent(line) + if ev.Valid() { + cmd := ev.Command() + if cmd == RPL_StreamingAccept { + // accepted to send + + // check if we really have it in storage + err = c.C.storage.HasArticle(msgid.String()) + if err == nil { + var r io.ReadCloser + r, err = c.C.storage.OpenArticle(msgid.String()) + if err == nil { + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "state": c.C.state, + "msgid": msgid, + }).Debug("article accepted will send via TAKETHIS now") + _ = c.C.printfLine("%s %s", stream_TAKETHIS, msgid) + br := bufio.NewReader(r) + n := int64(0) + for err == nil { + var line string + line, err = br.ReadString(10) + if err == io.EOF { + err = nil + break + } + line = strings.Trim(line, "\r") + line = strings.Trim(line, "\n") + err = c.C.printfLine(line) + n += int64(len(line)) + } + r.Close() + err = c.C.printfLine(".") + if err == nil { + // successful takethis sent + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "state": c.C.state, + "msgid": msgid, + "bytes": n, + }).Debug("article transfer done") + // read response + line, err = c.C.readline() + ev := StreamEvent(line) + if ev.Valid() { + // valid reply + cmd := ev.Command() + if cmd == RPL_StreamingTransfered { + // successful transfer + log.WithFields(log.Fields{ + "feed": c.C.state.FeedName, + "msgid": msgid, + "bytes": n, + }).Debug("Article Transferred") + // call hooks + if c.C.hooks != nil { + c.C.hooks.SentArticleVia(msgid, c.C.state.FeedName) + } + } else { + // failed transfer + log.WithFields(log.Fields{ + "feed": c.C.state.FeedName, + "msgid": msgid, + "bytes": n, + }).Debug("Article Rejected") + } + } + } else { + log.WithFields(log.Fields{ + "feed": c.C.state.FeedName, + "msgid": msgid, + }).Errorf("failed to transfer: %s", err.Error()) + } + } + } else { + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "state": c.C.state, + "msgid": msgid, + }).Warn("article not in storage, not sending") + } + } + } else { + // invalid reply + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "state": c.C.state, + "msgid": msgid, + "line": line, + }).Error("invalid streaming response") + // close + return + } + } else { + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "state": c.C.state, + "msgid": msgid, + }).Error("streaming error during CHECK", err) + return + } + } else { + // channel closed + return + } + } +} + +func (c *v1OBConn) Quit() { + c.C.printfLine("QUIT yo") + c.C.readline() + c.C.Close() +} + +func (c *v1OBConn) StartStreaming() (chnl chan ArticleEntry, err error) { + if c.streamChnl == nil { + c.streamChnl = make(chan ArticleEntry) + + } + chnl = c.streamChnl + return +} + +func (c *v1OBConn) GetState() *ConnState { + return c.GetState() +} + +// create a new connection from an established connection +func newOutboundConn(c net.Conn, s *Server, conf *config.FeedConfig) Conn { + + sname := s.Name() + + if len(sname) == 0 { + sname = "nntp.anon.tld" + } + storage := s.Storage + if storage == nil { + storage = store.NewNullStorage() + } + return &v1OBConn{ + conf: conf, + C: v1Conn{ + hooks: s, + state: ConnState{ + FeedName: conf.Name, + HostName: conf.Addr, + Open: true, + }, + serverName: sname, + storage: storage, + C: textproto.NewConn(c), + conn: c, + hdrio: message.NewHeaderIO(), + }, + } +} + +type v1IBConn struct { + C v1Conn +} + +func (c *v1IBConn) DownloadGroup(g Newsgroup) error { + return nil +} + +func (c *v1IBConn) ListNewsgroups() (groups []Newsgroup, err error) { + return +} + +func (c *v1IBConn) GetState() *ConnState { + return c.C.GetState() +} + +// negotiate an inbound connection +func (c *v1IBConn) Negotiate(stream bool) (err error) { + var line string + if c.PostingAllowed() { + line = Line_PostingAllowed + } else { + line = Line_PostingNotAllowed + } + err = c.C.printfLine(line) + return +} + +func (c *v1IBConn) PostingAllowed() bool { + return c.C.PostingAllowed() +} + +func (c *v1IBConn) IsOpen() bool { + return c.C.IsOpen() +} + +func (c *v1IBConn) Quit() { + // inbound connections quit without warning + log.WithFields(log.Fields{ + "pkg": "nntp-ibconn", + "addr": c.C.conn.RemoteAddr(), + }).Info("closing inbound connection") + c.C.Close() +} + +// is this connection authenticated? +func (c *v1Conn) Authed() bool { + return c.tlsConn != nil || c.authenticated +} + +// unconditionally close connection +func (c *v1Conn) Close() { + if c.tlsConn == nil { + // tls is not on + c.C.Close() + } else { + // tls is on + // we should close tls cleanly + c.tlsConn.Close() + } + c.state.Open = false +} + +func (c *v1IBConn) WantsStreaming() bool { + return c.C.state.Mode.Is(MODE_STREAM) +} + +func (c *v1Conn) printfLine(format string, args ...interface{}) error { + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "version": 1, + "state": &c.state, + "io": "send", + }).Debugf(format, args...) + return c.C.PrintfLine(format, args...) +} + +func (c *v1Conn) readline() (line string, err error) { + line, err = c.C.ReadLine() + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "version": 1, + "state": &c.state, + "io": "recv", + }).Debug(line) + return +} + +// handle switching nntp modes for inbound connection +func switchModeInbound(c *v1Conn, line string, hooks EventHooks) (err error) { + cmd := ModeCommand(line) + m := c.Mode() + if cmd.Is(ModeReader) { + if m.Is(MODE_STREAM) { + // we need to stop streaming + } + var line string + if c.PostingAllowed() { + line = Line_PostingAllowed + } else { + line = Line_PostingNotAllowed + } + err = c.printfLine(line) + if err == nil { + c.state.Mode = MODE_READER + } + } else if cmd.Is(ModeStream) { + // we want to switch to streaming mode + err = c.printfLine(Line_StreamingAllowed) + if err == nil { + c.state.Mode = MODE_STREAM + } + } else { + err = c.printfLine(Line_InvalidMode) + } + return +} + +// handle quit command +func quitConnection(c *v1Conn, line string, hooks EventHooks) (err error) { + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "version": "1", + "state": &c.state, + }).Debug("quit requested") + err = c.printfLine(Line_RPLQuit) + c.Close() + return +} + +// send our capabailities +func sendCapabilities(c *v1Conn, line string, hooks EventHooks) (err error) { + var caps []string + + caps = append(caps, "MODE-READER", "IMPLEMENTATION nntpchand", "STREAMING") + if c.tlsConfig != nil { + caps = append(caps, "STARTTLS") + } + + err = c.printfLine("%s We can do things", RPL_Capabilities) + if err == nil { + for _, l := range caps { + err = c.printfLine(l) + if err != nil { + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "version": "1", + "state": &c.state, + }).Error(err) + } + } + err = c.printfLine(".") + } + return +} + +// read an article via dotreader +func (c *v1Conn) readArticle(newpost bool, hooks EventHooks) (ps PolicyStatus, err error) { + store_r, store_w := io.Pipe() + article_r, article_w := io.Pipe() + article_body_r, article_body_w := io.Pipe() + + accept_chnl := make(chan PolicyStatus) + store_info_chnl := make(chan ArticleEntry) + store_result_chnl := make(chan error) + + hdr_chnl := make(chan message.Header) + + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + }).Debug("start reading") + done_chnl := make(chan PolicyStatus) + go func() { + var err error + dr := c.C.DotReader() + var buff [1024]byte + var n int64 + n, err = io.CopyBuffer(article_w, dr, buff[:]) + log.WithFields(log.Fields{ + "n": n, + }).Debug("read from connection") + if err != nil && err != io.EOF { + article_w.CloseWithError(err) + } else { + article_w.Close() + } + st := <-accept_chnl + close(accept_chnl) + // get result from storage + err2, ok := <-store_result_chnl + if ok && err2 != io.EOF { + err = err2 + } + close(store_result_chnl) + done_chnl <- st + }() + + // parse message and store attachments in bg + go func(msgbody io.ReadCloser) { + defer msgbody.Close() + hdr, ok := <-hdr_chnl + if !ok { + return + } + // all text in this post + // txt := new(bytes.Buffer) + // the article itself + // a := new(model.Article) + var err error + if hdr.IsMultipart() { + var params map[string]string + _, params, err = hdr.GetMediaType() + if err == nil { + boundary, ok := params["boundary"] + if ok { + part_r := multipart.NewReader(msgbody, boundary) + for err == nil { + var part *multipart.Part + part, err = part_r.NextPart() + if err == io.EOF { + // we done + break + } else if err == nil { + // we gots a part + + // get header + part_hdr := part.Header + + // check for base64 encoding + var part_body io.Reader + if part_hdr.Get("Content-Transfer-Encoding") == "base64" { + part_body = base64.NewDecoder(base64.StdEncoding, part) + } else { + part_body = part + } + + // get content type + content_type := part_hdr.Get("Content-Type") + if len(content_type) == 0 { + // assume text/plain + content_type = "text/plain; charset=UTF8" + } + var part_type string + // extract mime type + part_type, _, err = mime.ParseMediaType(content_type) + if err == nil { + + if part_type == "text/plain" { + // if we are plaintext save it to the text buffer + _, err = io.Copy(util.Discard, part_body) + } else { + var fpath string + fname := part.FileName() + fpath, err = c.storage.StoreAttachment(part_body, fname) + if err == nil { + // stored attachment good + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "state": &c.state, + "version": "1", + "filename": fname, + "filepath": fpath, + }).Debug("attachment stored") + } else { + // failed to save attachment + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "state": &c.state, + "version": "1", + }).Error("failed to save attachment ", err) + } + } + } else { + // cannot read part header + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "state": &c.state, + "version": "1", + }).Error("bad attachment in multipart message ", err) + } + err = nil + part.Close() + } else if err != io.EOF { + // error reading part + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "state": &c.state, + "version": "1", + }).Error("error reading part ", err) + } + } + } + } + } else if hdr.IsSigned() { + // signed message + + // discard for now + _, err = io.Copy(util.Discard, msgbody) + } else { + // plaintext message + var n int64 + n, err = io.Copy(util.Discard, msgbody) + log.WithFields(log.Fields{ + "bytes": n, + "pkg": "nntp-conn", + }).Debug("text body copied") + } + if err != nil && err != io.EOF { + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "state": &c.state, + }).Error("error handing message body", err) + } + }(article_body_r) + + // store function + go func(r io.ReadCloser) { + e, ok := <-store_info_chnl + if !ok { + // failed to get info + // don't read anything + r.Close() + store_result_chnl <- io.EOF + return + } + msgid := e.MessageID() + if msgid.Valid() { + // valid message-id + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "msgid": msgid, + "version": "1", + "state": &c.state, + }).Debug("storing article") + + fpath, err := c.storage.StoreArticle(r, msgid.String(), e.Newsgroup().String()) + r.Close() + if err == nil { + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "msgid": msgid, + "version": "1", + "state": &c.state, + }).Debug("stored article okay to ", fpath) + // we got the article + if hooks != nil { + hooks.GotArticle(msgid, e.Newsgroup()) + } + store_result_chnl <- io.EOF + log.Debugf("store informed") + } else { + // error storing article + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "msgid": msgid, + "state": &c.state, + "version": "1", + }).Error("failed to store article ", err) + io.Copy(util.Discard, r) + store_result_chnl <- err + } + } else { + // invalid message-id + // discard + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "msgid": msgid, + "state": &c.state, + "version": "1", + }).Warn("store will discard message with invalid message-id") + io.Copy(util.Discard, r) + store_result_chnl <- nil + r.Close() + } + }(store_r) + + // acceptor function + go func(r io.ReadCloser, out_w, body_w io.WriteCloser) { + var w io.WriteCloser + defer r.Close() + status := PolicyAccept + hdr, err := c.hdrio.ReadHeader(r) + if err == nil { + // append path + hdr.AppendPath(c.serverName) + // get message-id + var msgid MessageID + if newpost { + // new post + // generate it + msgid = GenMessageID(c.serverName) + hdr.Set("Message-ID", msgid.String()) + } else { + // not a new post, get from header + msgid = MessageID(hdr.MessageID()) + if msgid.Valid() { + // check store fo existing article + err = c.storage.HasArticle(msgid.String()) + if err == store.ErrNoSuchArticle { + // we don't have the article + status = PolicyAccept + log.Infof("accept article %s", msgid) + } else if err == nil { + // we do have the article, reject it we don't need it again + status = PolicyReject + } else { + // some other error happened + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "state": c.state, + }).Error("failed to check store for article ", err) + } + err = nil + } else { + // bad article + status = PolicyBan + } + } + // check the header if we have an acceptor and the previous checks are good + if status.Accept() && c.acceptor != nil { + status = c.acceptor.CheckHeader(hdr) + } + if status.Accept() { + // we have accepted the article + // store to disk + w = out_w + } else { + // we have not accepted the article + // discard + w = util.Discard + out_w.Close() + } + store_info_chnl <- ArticleEntry{msgid.String(), hdr.Newsgroup()} + hdr_chnl <- hdr + // close the channel for headers + close(hdr_chnl) + // write header out to storage + err = c.hdrio.WriteHeader(hdr, w) + if err == nil { + mw := io.MultiWriter(body_w, w) + // we wrote header + var n int64 + if c.acceptor == nil { + // write the rest of the body + // we don't care about article size + log.WithFields(log.Fields{}).Debug("copying body") + var buff [128]byte + n, err = io.CopyBuffer(mw, r, buff[:]) + } else { + // we care about the article size + max := c.acceptor.MaxArticleSize() + var n int64 + // copy it out + n, err = io.CopyN(mw, r, max) + if err == nil { + if n < max { + // under size limit + // we gud + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "bytes": n, + "state": &c.state, + }).Debug("body fits") + } else { + // too big, discard the rest + _, err = io.Copy(util.Discard, r) + // ... and ban it + status = PolicyBan + } + } + } + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "bytes": n, + "state": &c.state, + }).Debug("body wrote") + // TODO: inform store to delete article and attachments + } else { + // error writing header + log.WithFields(log.Fields{ + "msgid": msgid, + }).Error("error writing header ", err) + } + } else { + // error reading header + // possibly a read error? + status = PolicyDefer + } + // close info channel for store + close(store_info_chnl) + w.Close() + // close body pipe + body_w.Close() + // inform result + log.Debugf("status %s", status) + accept_chnl <- status + log.Debugf("informed") + }(article_r, store_w, article_body_w) + + ps = <-done_chnl + close(done_chnl) + log.Debug("read article done") + return +} + +// handle IHAVE command +func nntpRecvArticle(c *v1Conn, line string, hooks EventHooks) (err error) { + parts := strings.Split(line, " ") + if len(parts) == 2 { + msgid := MessageID(parts[1]) + if msgid.Valid() { + // valid message-id + err = c.printfLine("%s send article to be transfered", RPL_TransferAccepted) + // read in article + if err == nil { + var status PolicyStatus + status, err = c.readArticle(false, hooks) + if err == nil { + // we read in article + if status.Accept() { + // accepted + err = c.printfLine("%s transfer wuz gud", RPL_TransferOkay) + } else if status.Defer() { + // deferred + err = c.printfLine("%s transfer defer", RPL_TransferDefer) + } else if status.Reject() { + // rejected + err = c.printfLine("%s transfer rejected, don't send it again brah", RPL_TransferReject) + } + } else { + // could not transfer article + err = c.printfLine("%s transfer failed; try again later", RPL_TransferDefer) + } + } + } else { + // invalid message-id + err = c.printfLine("%s article not wanted", RPL_TransferNotWanted) + } + } else { + // invaldi syntax + err = c.printfLine("%s invalid syntax", RPL_SyntaxError) + } + return +} + +// handle POST command +func nntpPostArticle(c *v1Conn, line string, hooks EventHooks) (err error) { + if c.PostingAllowed() { + if c.Mode().Is(MODE_READER) { + err = c.printfLine("%s go ahead yo", RPL_PostAccepted) + var status PolicyStatus + status, err = c.readArticle(true, hooks) + if err == nil { + // read okay + if status.Accept() { + err = c.printfLine("%s post was recieved", RPL_PostReceived) + } else { + err = c.printfLine("%s posting failed", RPL_PostingFailed) + } + } else { + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "state": &c.state, + "version": "1", + }).Error("POST failed ", err) + err = c.printfLine("%s post failed: %s", RPL_PostingFailed, err) + } + } else { + // not in reader mode + err = c.printfLine("%s not in reader mode", RPL_WrongMode) + } + } else { + err = c.printfLine("%s posting is disallowed", RPL_PostingNotPermitted) + } + return +} + +// handle streaming line +func streamingLine(c *v1Conn, line string, hooks EventHooks) (err error) { + ev := StreamEvent(line) + if c.Mode().Is(MODE_STREAM) { + if ev.Valid() { + // valid stream line + cmd := ev.Command() + msgid := ev.MessageID() + if cmd == stream_CHECK { + if c.acceptor == nil { + // no acceptor, we'll take them all + err = c.printfLine("%s %s", RPL_StreamingAccept, msgid) + } else { + status := PolicyAccept + if c.storage.HasArticle(msgid.String()) == nil { + // we have this article + status = PolicyReject + } + if status.Accept() && c.acceptor != nil { + status = c.acceptor.CheckMessageID(ev.MessageID()) + } + if status.Accept() { + // accepted + err = c.printfLine("%s %s", RPL_StreamingAccept, msgid) + } else if status.Defer() { + // deferred + err = c.printfLine("%s %s", RPL_StreamingDefer, msgid) + } else { + // rejected + err = c.printfLine("%s %s", RPL_StreamingReject, msgid) + } + } + } else if cmd == stream_TAKETHIS { + var status PolicyStatus + status, err = c.readArticle(false, hooks) + if status.Accept() { + // this article was accepted + err = c.printfLine("%s %s", RPL_StreamingTransfered, msgid) + } else { + // this article was not accepted + err = c.printfLine("%s %s", RPL_StreamingReject, msgid) + } + } + } else { + // invalid line + err = c.printfLine("%s Invalid syntax", RPL_SyntaxError) + } + } else { + if ev.MessageID().Valid() { + // not in streaming mode + err = c.printfLine("%s %s", RPL_StreamingDefer, ev.MessageID()) + } else { + // invalid message id + err = c.printfLine("%s Invalid Syntax", RPL_SyntaxError) + } + } + return +} + +func newsgroupList(c *v1Conn, line string, hooks EventHooks, rpl string) (err error) { + var groups []string + if c.db == nil { + // no database driver available + // let's say we carry overchan.test for now + groups = append(groups, "overchan.test") + } else { + groups, err = c.db.GetAllNewsgroups() + } + + if err == nil { + // we got newsgroups from the db + dw := c.C.DotWriter() + fmt.Fprintf(dw, "%s list of newsgroups follows\n", rpl) + for _, g := range groups { + hi := int64(1) + lo := int64(0) + if c.db != nil { + hi, lo, err = c.db.GetLastAndFirstForGroup(g) + } + if err != nil { + // log error if it occurs + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "group": g, + "state": c.state, + }).Warn("cannot get high low water marks for LIST command") + + } else { + fmt.Fprintf(dw, "%s %d %d y", g, hi, lo) + } + } + // flush dotwriter + err = dw.Close() + } else { + // db error while getting newsgroup list + err = c.printfLine("%s cannot list newsgroups %s", RPL_GenericError, err.Error()) + } + return +} + +// handle inbound STARTTLS command +func upgradeTLS(c *v1Conn, line string, hooks EventHooks) (err error) { + if c.tlsConfig == nil { + err = c.printfLine("%s TLS not supported", RPL_TLSRejected) + } else { + err = c.printfLine("%s Continue with TLS Negotiation", RPL_TLSContinue) + if err == nil { + tconn := tls.Server(c.conn, c.tlsConfig) + err = tconn.Handshake() + if err == nil { + // successful tls handshake + c.tlsConn = tconn + c.C = textproto.NewConn(c.tlsConn) + } else { + // tls failed + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "addr": c.conn.RemoteAddr(), + "state": c.state, + }).Warn("TLS Handshake failed ", err) + // fall back to plaintext + err = nil + } + } + } + return +} + +// switch to another newsgroup +func switchNewsgroup(c *v1Conn, line string, hooks EventHooks) (err error) { + parts := strings.Split(line, " ") + var has bool + var group Newsgroup + if len(parts) == 2 { + group = Newsgroup(parts[1]) + if group.Valid() { + // correct format + if c.db == nil { + // no database driver + has = true + } else { + has, err = c.db.HasNewsgroup(group.String()) + } + } + } + if has { + // we have it + hi := int64(1) + lo := int64(0) + if c.db != nil { + // check database for water marks + hi, lo, err = c.db.GetLastAndFirstForGroup(group.String()) + } + if err == nil { + // XXX: ensure hi > lo + err = c.printfLine("%s %d %d %d %s", RPL_Group, hi-lo, lo, hi, group.String()) + if err == nil { + // line was sent + c.state.Group = group + log.WithFields(log.Fields{ + "pkg": "nntp-conn", + "group": group, + "state": c.state, + }).Debug("switched newsgroups") + } + } else { + err = c.printfLine("%s error checking for newsgroup %s", RPL_GenericError, err.Error()) + } + } else if err != nil { + // error + err = c.printfLine("%s error checking for newsgroup %s", RPL_GenericError, err.Error()) + } else { + // incorrect format + err = c.printfLine("%s no such newsgroup", RPL_NoSuchGroup) + } + return +} + +func handleAuthInfo(c *v1Conn, line string, hooks EventHooks) (err error) { + subcmd := line[9:] + if strings.HasPrefix(strings.ToUpper(subcmd), "USER") { + c.username = subcmd[5:] + err = c.printfLine("%s password required", RPL_MoreAuth) + } else if strings.HasPrefix(strings.ToUpper(subcmd), "PASS") { + var success bool + if c.username == "" { + // out of order commands + c.printfLine("%s auth info sent out of order yo", RPL_GenericError) + return + } else if c.auth == nil { + // no auth mechanism, this will be set to true if anon nntp is enabled + success = c.authenticated + } else { + // check login + success, err = c.auth.CheckLogin(c.username, subcmd[5:]) + } + if success { + // login good + err = c.printfLine("%s login gud, proceed yo", RPL_AuthAccepted) + c.authenticated = true + } else if err == nil { + // login bad + err = c.printfLine("%s bad login", RPL_AuthenticateRejected) + } else { + // error + err = c.printfLine("%s error processing login: %s", RPL_GenericError, err.Error()) + } + } else { + err = c.printfLine("%s only USER/PASS accepted with AUTHINFO", RPL_SyntaxError) + } + return +} + +func handleXOVER(c *v1Conn, line string, hooks EventHooks) (err error) { + group := c.Group() + if group == "" { + err = c.printfLine("%s no group selected", RPL_NoGroupSelected) + return + } + if !Newsgroup(group).Valid() { + err = c.printfLine("%s Invalid Newsgroup format", RPL_GenericError) + return + } + err = c.printfLine("%s overview follows", RPL_Overview) + if err != nil { + return + } + chnl := make(chan string) + go func() { + c.storage.ForEachInGroup(group, chnl) + close(chnl) + }() + i := 0 + for err == nil { + m, ok := <-chnl + if !ok { + break + } + msgid := MessageID(m) + if !msgid.Valid() { + continue + } + var f *os.File + f, err = c.storage.OpenArticle(m) + if f != nil { + h, e := c.hdrio.ReadHeader(f) + f.Close() + if e == nil { + i++ + err = c.printfLine("%.6d\t%s\t%s\t%s\t%s\t%s", i, h.Get("Subject", "None"), h.Get("From", "anon "), h.Get("Date", "???"), h.MessageID(), h.Reference()) + } + } + } + if err == nil { + err = c.printfLine(".") + } + return +} + +func handleArticle(c *v1Conn, line string, hooks EventHooks) (err error) { + msgid := MessageID(line[8:]) + if msgid.Valid() && c.storage.HasArticle(msgid.String()) == nil { + // valid id and we have it + var r io.ReadCloser + var buff [1024]byte + r, err = c.storage.OpenArticle(msgid.String()) + if err == nil { + err = c.printfLine("%s %s", RPL_Article, msgid) + for err == nil { + _, err = io.CopyBuffer(c.C.W, r, buff[:]) + } + if err == io.EOF { + err = nil + } + if err == nil { + err = c.printfLine(".") + } + r.Close() + return + } + } + // invalid id or we don't have it + err = c.printfLine("%s %s", RPL_NoArticleMsgID, msgid) + return +} + +// inbound streaming start +func (c *v1IBConn) StartStreaming() (chnl chan ArticleEntry, err error) { + if c.Mode().Is(MODE_STREAM) { + chnl = make(chan ArticleEntry) + } else { + err = ErrInvalidMode + } + return +} + +func (c *v1IBConn) Mode() Mode { + return c.C.Mode() +} + +func (c *v1IBConn) ProcessInbound(hooks EventHooks) { + c.C.Process(hooks) +} + +// inbound streaming handling +func (c *v1IBConn) StreamAndQuit() { +} + +func newInboundConn(s *Server, c net.Conn) Conn { + sname := s.Name() + storage := s.Storage + if storage == nil { + storage = store.NewNullStorage() + } + anon := false + if s.Config != nil { + anon = s.Config.AnonNNTP + } + return &v1IBConn{ + C: v1Conn{ + state: ConnState{ + FeedName: "inbound-feed", + HostName: c.RemoteAddr().String(), + Open: true, + }, + auth: s.Auth, + authenticated: anon, + serverName: sname, + storage: storage, + acceptor: s.Acceptor, + hdrio: message.NewHeaderIO(), + C: textproto.NewConn(c), + conn: c, + cmds: map[string]lineHandlerFunc{ + "STARTTLS": upgradeTLS, + "IHAVE": nntpRecvArticle, + "POST": nntpPostArticle, + "MODE": switchModeInbound, + "QUIT": quitConnection, + "CAPABILITIES": sendCapabilities, + "CHECK": streamingLine, + "TAKETHIS": streamingLine, + "LIST": func(c *v1Conn, line string, h EventHooks) error { + return newsgroupList(c, line, h, RPL_List) + }, + "NEWSGROUPS": func(c *v1Conn, line string, h EventHooks) error { + return newsgroupList(c, line, h, RPL_NewsgroupList) + }, + "GROUP": switchNewsgroup, + "AUTHINFO": handleAuthInfo, + "XOVER": handleXOVER, + "ARTICLE": handleArticle, + }, + }, + } +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/dial.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/dial.go new file mode 100644 index 0000000..50dec23 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/dial.go @@ -0,0 +1,15 @@ +package nntp + +import ( + "crypto/tls" + "nntpchan/lib/network" +) + +// establishes an outbound nntp connection to a remote server +type Dialer interface { + // dial out with a dialer + // if cfg is not nil, try to establish a tls connection with STARTTLS + // returns a new nntp connection and nil on successful handshake and login + // returns nil and an error if an error happened + Dial(d network.Dialer, cfg *tls.Config) (*Conn, error) +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/doc.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/doc.go new file mode 100644 index 0000000..99bfdcf --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/doc.go @@ -0,0 +1,4 @@ +// +// nntp client/server +// +package nntp diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/event_test.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/event_test.go new file mode 100644 index 0000000..9e1d318 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/event_test.go @@ -0,0 +1,60 @@ +package nntp + +import ( + "testing" +) + +func TestTAKETHISParse(t *testing.T) { + msgid := GenMessageID("test.tld") + ev := stream_cmd_TAKETHIS(msgid) + t.Logf("event: %s", ev) + if ev.MessageID() != msgid { + t.Logf("%s != %s, event was %s", msgid, ev.MessageID(), ev) + t.Fail() + } + if ev.Command() != "TAKETHIS" { + t.Logf("%s != TAKETHIS, event was %s", ev.Command(), ev) + t.Fail() + } + if !ev.Valid() { + t.Logf("%s is invalid stream event", ev) + t.Fail() + } +} + +func TestCHECKParse(t *testing.T) { + msgid := GenMessageID("test.tld") + ev := stream_cmd_CHECK(msgid) + t.Logf("event: %s", ev) + if ev.MessageID() != msgid { + t.Logf("%s != %s, event was %s", msgid, ev.MessageID(), ev) + t.Fail() + } + if ev.Command() != "CHECK" { + t.Logf("%s != CHECK, event was %s", ev.Command(), ev) + t.Fail() + } + if !ev.Valid() { + t.Logf("%s is invalid stream event", ev) + t.Fail() + } +} + +func TestInvalidStremEvent(t *testing.T) { + str := "asd" + ev := StreamEvent(str) + t.Logf("invalid str=%s ev=%s", str, ev) + if ev.Valid() { + t.Logf("invalid CHECK command is valid? %s", ev) + t.Fail() + } + + str = "asd asd" + ev = StreamEvent(str) + t.Logf("invalid str=%s ev=%s", str, ev) + + if ev.Valid() { + t.Logf("invalid CHECK command is valid? %s", ev) + t.Fail() + } +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/filter.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/filter.go new file mode 100644 index 0000000..0c2ba75 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/filter.go @@ -0,0 +1,19 @@ +package nntp + +import ( + "nntpchan/lib/nntp/message" + "io" +) + +// defines interface for filtering an nntp article +// filters can (and does) modify the article it operates on +type ArticleFilter interface { + // filter the article header + // returns the modified Header and an error if one occurs + FilterHeader(hdr message.Header) (message.Header, error) + + // reads the article's body and write the filtered version to an io.Writer + // returns the number of bytes written to the io.Writer, true if the body was + // modifed (or false if body is unchanged) and an error if one occurs + FilterAndWriteBody(body io.Reader, wr io.Writer) (int64, bool, error) +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/hook.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/hook.go new file mode 100644 index 0000000..857009e --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/hook.go @@ -0,0 +1,30 @@ +package nntp + +import ( + log "github.com/Sirupsen/logrus" + "nntpchan/lib/config" + "os/exec" +) + +type Hook struct { + cfg *config.NNTPHookConfig +} + +func NewHook(cfg *config.NNTPHookConfig) *Hook { + return &Hook{ + cfg: cfg, + } +} + +func (h *Hook) GotArticle(msgid MessageID, group Newsgroup) { + c := exec.Command(h.cfg.Exec, group.String(), msgid.String()) + log.Infof("calling hook %s", h.cfg.Name) + err := c.Run() + if err != nil { + log.Errorf("error in nntp hook %s: %s", h.cfg.Name, err.Error()) + } +} + +func (*Hook) SentArticleVia(msgid MessageID, feedname string) { + +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/hooks.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/hooks.go new file mode 100644 index 0000000..d1c18d4 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/hooks.go @@ -0,0 +1,9 @@ +package nntp + +// callback hooks fired on certain events +type EventHooks interface { + // called when we have obtained an article given its message-id + GotArticle(msgid MessageID, group Newsgroup) + // called when we have sent an article to a single remote feed + SentArticleVia(msgid MessageID, feedname string) +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/message/article.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/message/article.go new file mode 100644 index 0000000..c22a702 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/message/article.go @@ -0,0 +1,11 @@ +package message + +// an nntp article +type Article struct { + + // the article's mime header + Header Header + + // unexported fields ... + +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/message/attachment.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/message/attachment.go new file mode 100644 index 0000000..c98ab5b --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/message/attachment.go @@ -0,0 +1,16 @@ +package message + +import ( + "io" +) + +// attachment in an nntp article +type Attachment struct { + // mimetype + Mime string + // the filename + FileName string + // the fully decoded attachment body + // must close when done + Body io.ReadCloser +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/message/doc.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/message/doc.go new file mode 100644 index 0000000..347d382 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/message/doc.go @@ -0,0 +1,2 @@ +// package for parsing, packing, signing, verifying nntp articles +package message diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/message/header.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/message/header.go new file mode 100644 index 0000000..7511fc1 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/message/header.go @@ -0,0 +1,174 @@ +package message + +import ( + "io" + "mime" + "strings" +) + +// an nntp message header +type Header map[string][]string + +// get message-id header +func (self Header) MessageID() (v string) { + for _, hdr := range []string{"MessageID", "Message-ID", "Message-Id", "message-id"} { + v = self.Get(hdr, "") + if v != "" { + break + } + } + return +} + +func (self Header) Reference() (ref string) { + return self.Get("Reference", self.MessageID()) +} + +// extract media type from content-type header +func (self Header) GetMediaType() (mediatype string, params map[string]string, err error) { + return mime.ParseMediaType(self.Get("Content-Type", "text/plain")) +} + +// is this header for a multipart message? +func (self Header) IsMultipart() bool { + return strings.HasPrefix(self.Get("Content-Type", "text/plain"), "multipart/mixed") +} + +func (self Header) IsSigned() bool { + return self.Get("X-Pubkey-Ed25519", "") != "" +} + +func (self Header) Newsgroup() string { + return self.Get("Newsgroups", "overchan.discard") +} + +// do we have a key in this header? +func (self Header) Has(key string) bool { + _, ok := self[key] + return ok +} + +// set key value +func (self Header) Set(key, val string) { + self[key] = []string{val} +} + +func (self Header) AppendPath(name string) { + p := self.Get("Path", name) + if p != name { + p = name + "!" + p + } + self.Set("Path", p) +} + +// append value to key +func (self Header) Add(key, val string) { + if self.Has(key) { + self[key] = append(self[key], val) + } else { + self.Set(key, val) + } +} + +// get via key or return fallback value +func (self Header) Get(key, fallback string) string { + val, ok := self[key] + if ok { + str := "" + for _, k := range val { + str += k + ", " + } + return str[:len(str)-2] + } else { + return fallback + } +} + +// interface for types that can read an nntp header +type HeaderReader interface { + // blocking read an nntp header from an io.Reader + // return the read header and nil on success + // return nil and an error if an error occurred while reading + ReadHeader(r io.Reader) (Header, error) +} + +// interface for types that can write an nntp header +type HeaderWriter interface { + // blocking write an nntp header to an io.Writer + // returns an error if one occurs otherwise nil + WriteHeader(hdr Header, w io.Writer) error +} + +// implements HeaderReader and HeaderWriter +type HeaderIO struct { + delim byte +} + +// read header +func (s *HeaderIO) ReadHeader(r io.Reader) (hdr Header, err error) { + hdr = make(Header) + var k, v string + var buf [1]byte + for err == nil { + // read key + for err == nil { + _, err = r.Read(buf[:]) + if err != nil { + return + } + if buf[0] == 58 { // colin + // consume space + _, err = r.Read(buf[:]) + for err == nil { + _, err = r.Read(buf[:]) + if buf[0] == s.delim { + // got delimiter + hdr.Add(k, v) + k = "" + v = "" + break + } else { + v += string(buf[:]) + } + } + break + } else if buf[0] == s.delim { + // done + return + } else { + k += string(buf[:]) + } + } + } + return +} + +// write header +func (s *HeaderIO) WriteHeader(hdr Header, wr io.Writer) (err error) { + for k, vs := range hdr { + for _, v := range vs { + var line []byte + // key + line = append(line, []byte(k)...) + // ": " + line = append(line, 58, 32) + // value + line = append(line, []byte(v)...) + // delimiter + line = append(line, s.delim) + // write line + _, err = wr.Write(line) + if err != nil { + return + } + } + } + _, err = wr.Write([]byte{s.delim}) + return +} + +func NewHeaderIO() *HeaderIO { + return &HeaderIO{ + delim: 10, + } +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/mode.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/mode.go new file mode 100644 index 0000000..11b30f5 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/mode.go @@ -0,0 +1,69 @@ +package nntp + +import ( + "errors" + "strings" +) + +var ErrInvalidMode = errors.New("invalid mode set") + +// a mode set by an nntp client +type Mode string + +// reader mode +const MODE_READER = Mode("reader") + +// streaming mode +const MODE_STREAM = Mode("stream") + +// mode is not set +const MODE_UNSET = Mode("") + +// get as string +func (m Mode) String() string { + return strings.ToUpper(string(m)) +} + +// is this a valid mode of operation? +func (m Mode) Valid() bool { + return m.Is(MODE_READER) || m.Is(MODE_STREAM) +} + +// is this mode equal to another mode +func (m Mode) Is(other Mode) bool { + return m.String() == other.String() +} + +// a switch mode command +type ModeCommand string + +// get as string +func (m ModeCommand) String() string { + return strings.ToUpper(string(m)) +} + +// is this mode command well formed? +// does not check the actual mode sent. +func (m ModeCommand) Valid() bool { + s := m.String() + return strings.Count(s, " ") == 1 && strings.HasPrefix(s, "MODE ") +} + +// get the mode selected in this mode command +func (m ModeCommand) Mode() Mode { + return Mode(strings.Split(m.String(), " ")[1]) +} + +// check if this mode command is equal to an existing one +func (m ModeCommand) Is(cmd ModeCommand) bool { + return m.String() == cmd.String() +} + +// reader mode command +const ModeReader = ModeCommand("mode reader") + +// streaming mode command +const ModeStream = ModeCommand("mode stream") + +// line prefix for mode +const LinePrefix_Mode = "MODE " diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/multi.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/multi.go new file mode 100644 index 0000000..e0480bc --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/multi.go @@ -0,0 +1,16 @@ +package nntp + +// multiplexed event hook +type MulitHook []EventHooks + +func (m MulitHook) GotArticle(msgid MessageID, group Newsgroup) { + for _, h := range m { + h.GotArticle(msgid, group) + } +} + +func (m MulitHook) SentArticleVia(msgid MessageID, feedname string) { + for _, h := range m { + h.SentArticleVia(msgid, feedname) + } +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/policy.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/policy.go new file mode 100644 index 0000000..8ef0c5a --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/policy.go @@ -0,0 +1,29 @@ +package nntp + +// +// a policy that governs whether we federate an article via a feed +// +type FeedPolicy struct { + // list of whitelist regexps for newsgorups + Whitelist []string `json:"whitelist"` + // list of blacklist regexps for newsgroups + Blacklist []string `json:"blacklist"` + // are anon posts of any kind allowed? + AllowAnonPosts bool `json:"anon"` + // are anon posts with attachments allowed? + AllowAnonAttachments bool `json:"anon_attachments"` + // are any attachments allowed? + AllowAttachments bool `json:"attachments"` + // do we require Proof Of Work for untrusted connections? + UntrustedRequiresPoW bool `json:"pow"` +} + +// default feed policy to be used if not configured explicitly +var DefaultFeedPolicy = &FeedPolicy{ + Whitelist: []string{"ctl", "overchan.test"}, + Blacklist: []string{`!^overchan\.`}, + AllowAnonPosts: true, + AllowAnonAttachments: false, + UntrustedRequiresPoW: true, + AllowAttachments: true, +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/server.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/server.go new file mode 100644 index 0000000..e592507 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/server.go @@ -0,0 +1,329 @@ +package nntp + +import ( + log "github.com/Sirupsen/logrus" + "nntpchan/lib/config" + "nntpchan/lib/database" + "nntpchan/lib/network" + "nntpchan/lib/store" + "net" + "time" +) + +// nntp outfeed state +type nntpFeed struct { + conn Conn + send chan ArticleEntry + conf *config.FeedConfig +} + +// an nntp server +type Server struct { + // user callback + Hooks EventHooks + // filters to apply + Filters []ArticleFilter + // database driver + DB database.DB + // global article acceptor + Acceptor ArticleAcceptor + // article storage + Storage store.Storage + // nntp config + Config *config.NNTPServerConfig + // outfeeds to connect to + Feeds []*config.FeedConfig + // inbound authentiaction mechanism + Auth ServerAuth + // send to outbound feed channel + send chan ArticleEntry + // register inbound feed channel + regis chan *nntpFeed + // deregister inbound feed channel + deregis chan *nntpFeed +} + +func NewServer() *Server { + return &Server{ + // XXX: buffered? + send: make(chan ArticleEntry), + regis: make(chan *nntpFeed), + deregis: make(chan *nntpFeed), + } +} + +// reload server configuration +func (s *Server) ReloadServer(c *config.NNTPServerConfig) { + +} + +// reload feeds +func (s *Server) ReloadFeeds(feeds []*config.FeedConfig) { + +} + +func (s *Server) GotArticle(msgid MessageID, group Newsgroup) { + log.WithFields(log.Fields{ + "pkg": "nntp-server", + "msgid": msgid, + "group": group, + }).Info("obtained article") + if s.Hooks != nil { + s.Hooks.GotArticle(msgid, group) + } + // send to outbound feeds + s.send <- ArticleEntry{msgid.String(), group.String()} +} + +func (s *Server) SentArticleVia(msgid MessageID, feedname string) { + log.WithFields(log.Fields{ + "pkg": "nntp-server", + "msgid": msgid, + "feed": feedname, + }).Info("article sent") + if s.Hooks != nil { + s.Hooks.SentArticleVia(msgid, feedname) + } +} + +func (s *Server) Name() string { + if s.Config == nil || s.Config.Name == "" { + return "nntp.anon.tld" + } + return s.Config.Name +} + +// persist 1 feed forever +func (s *Server) persist(cfg *config.FeedConfig) { + delay := time.Second + + log.WithFields(log.Fields{ + "name": cfg.Name, + }).Debug("Persist Feed") + for { + dialer := network.NewDialer(cfg.Proxy) + c, err := dialer.Dial(cfg.Addr) + if err == nil { + // successful connect + delay = time.Second + conn := newOutboundConn(c, s, cfg) + err = conn.Negotiate(true) + if err == nil { + // negotiation good + log.WithFields(log.Fields{ + "name": cfg.Name, + }).Debug("Negotitation good") + // start streaming + var chnl chan ArticleEntry + chnl, err = conn.StartStreaming() + if err == nil { + // register new connection + f := &nntpFeed{ + conn: conn, + send: chnl, + conf: cfg, + } + s.regis <- f + // start streaming + conn.StreamAndQuit() + // deregister + s.deregis <- f + continue + } + } else { + log.WithFields(log.Fields{ + "name": cfg.Name, + }).Info("outbound nntp connection failed to negotiate ", err) + } + conn.Quit() + } else { + // failed dial, do exponential backoff up to 1 hour + if delay <= time.Hour { + delay *= 2 + } + log.WithFields(log.Fields{ + "name": cfg.Name, + }).Info("feed backoff for ", delay) + time.Sleep(delay) + } + } +} + +// download all new posts from a remote server +func (s *Server) downloadPosts(cfg *config.FeedConfig) error { + dialer := network.NewDialer(cfg.Proxy) + c, err := dialer.Dial(cfg.Addr) + if err != nil { + return err + } + conn := newOutboundConn(c, s, cfg) + err = conn.Negotiate(false) + if err != nil { + conn.Quit() + return err + } + groups, err := conn.ListNewsgroups() + if err != nil { + conn.Quit() + return err + } + for _, g := range groups { + if cfg.Policy != nil && cfg.Policy.AllowGroup(g.String()) { + log.WithFields(log.Fields{ + "group": g, + "pkg": "nntp-server", + }).Debug("downloading group") + err = conn.DownloadGroup(g) + if err != nil { + conn.Quit() + return err + } + } + } + conn.Quit() + return nil +} + +func (s *Server) periodicDownload(cfg *config.FeedConfig) { + for cfg.PullInterval > 0 { + err := s.downloadPosts(cfg) + if err != nil { + // report error + log.WithFields(log.Fields{ + "feed": cfg.Name, + "pkg": "nntp-server", + "error": err, + }).Error("periodic download failed") + } + time.Sleep(time.Minute * time.Duration(cfg.PullInterval)) + } +} + +// persist all outbound feeds +func (s *Server) PersistFeeds() { + for _, f := range s.Feeds { + go s.persist(f) + go s.periodicDownload(f) + } + + feeds := make(map[string]*nntpFeed) + + for { + select { + case e, ok := <-s.send: + if !ok { + break + } + msgid := e.MessageID().String() + group := e.Newsgroup().String() + // TODO: determine anon + anon := false + // TODO: determine attachments + attachments := false + + for _, f := range feeds { + if f.conf.Policy != nil && !f.conf.Policy.Allow(msgid, group, anon, attachments) { + // not allowed in this feed + continue + } + log.WithFields(log.Fields{ + "name": f.conf.Name, + "msgid": msgid, + "group": group, + }).Debug("sending article") + f.send <- e + } + break + case f, ok := <-s.regis: + if ok { + log.WithFields(log.Fields{ + "name": f.conf.Name, + }).Debug("register feed") + feeds[f.conf.Name] = f + } + break + case f, ok := <-s.deregis: + if ok { + log.WithFields(log.Fields{ + "name": f.conf.Name, + }).Debug("deregister feed") + delete(feeds, f.conf.Name) + } + break + } + } +} + +// serve connections from listener +func (s *Server) Serve(l net.Listener) (err error) { + log.WithFields(log.Fields{ + "pkg": "nntp-server", + "addr": l.Addr(), + }).Debug("Serving") + for err == nil { + var c net.Conn + c, err = l.Accept() + if err == nil { + // we got a new connection + go s.handleInboundConnection(c) + } else { + log.WithFields(log.Fields{ + "pkg": "nntp-server", + }).Error("failed to accept inbound connection", err) + } + } + return +} + +// get the article policy for a connection given its state +func (s *Server) getPolicyFor(state *ConnState) ArticleAcceptor { + return s.Acceptor +} + +// recv inbound streaming messages +func (s *Server) recvInboundStream(chnl chan ArticleEntry) { + for { + e, ok := <-chnl + if ok { + s.GotArticle(e.MessageID(), e.Newsgroup()) + } else { + return + } + } +} + +// process an inbound connection +func (s *Server) handleInboundConnection(c net.Conn) { + log.WithFields(log.Fields{ + "pkg": "nntp-server", + "addr": c.RemoteAddr(), + }).Debug("handling inbound connection") + var nc Conn + nc = newInboundConn(s, c) + err := nc.Negotiate(true) + if err == nil { + // do they want to stream? + if nc.WantsStreaming() { + // yeeeeeh let's stream + var chnl chan ArticleEntry + chnl, err = nc.StartStreaming() + // for inbound we will recv messages + go s.recvInboundStream(chnl) + nc.StreamAndQuit() + log.WithFields(log.Fields{ + "pkg": "nntp-server", + "addr": c.RemoteAddr(), + }).Info("streaming finished") + return + } else { + // handle non streaming commands + nc.ProcessInbound(s) + } + } else { + log.WithFields(log.Fields{ + "pkg": "nntp-server", + "addr": c.RemoteAddr(), + }).Warn("failed to negotiate with inbound connection", err) + c.Close() + } +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/state.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/state.go new file mode 100644 index 0000000..babb030 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/state.go @@ -0,0 +1,21 @@ +package nntp + +// state of an nntp connection +type ConnState struct { + // name of parent feed + FeedName string `json:"feedname"` + // name of the connection + ConnName string `json:"connname"` + // hostname of remote connection + HostName string `json:"hostname"` + // current nntp mode + Mode Mode `json:"mode"` + // current selected nntp newsgroup + Group Newsgroup `json:"newsgroup"` + // current selected nntp article + Article string `json:"article"` + // parent feed's policy + Policy *FeedPolicy `json:"feedpolicy"` + // is this connection open? + Open bool `json:"open"` +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/nntp/streaming.go b/contrib/backends/srndv2/src/nntpchan/lib/nntp/streaming.go new file mode 100644 index 0000000..8d267ef --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/nntp/streaming.go @@ -0,0 +1,65 @@ +package nntp + +import ( + "fmt" + "strings" +) + +// an nntp stream event +// these are pipelined between nntp servers +type StreamEvent string + +func (ev StreamEvent) MessageID() MessageID { + parts := strings.Split(string(ev), " ") + if len(parts) > 1 { + return MessageID(parts[1]) + } + return "" +} + +func (ev StreamEvent) String() string { + return string(ev) +} + +func (ev StreamEvent) Command() string { + return strings.Split(ev.String(), " ")[0] +} + +func (ev StreamEvent) Valid() bool { + return strings.Count(ev.String(), " ") == 1 && ev.MessageID().Valid() +} + +var stream_TAKETHIS = "TAKETHIS" +var stream_CHECK = "CHECK" + +func createStreamEvent(cmd string, msgid MessageID) StreamEvent { + if msgid.Valid() { + return StreamEvent(fmt.Sprintf("%s %s", cmd, msgid)) + } else { + return "" + } +} + +func stream_rpl_Accept(msgid MessageID) StreamEvent { + return createStreamEvent(RPL_StreamingAccept, msgid) +} + +func stream_rpl_Reject(msgid MessageID) StreamEvent { + return createStreamEvent(RPL_StreamingReject, msgid) +} + +func stream_rpl_Defer(msgid MessageID) StreamEvent { + return createStreamEvent(RPL_StreamingDefer, msgid) +} + +func stream_rpl_Failed(msgid MessageID) StreamEvent { + return createStreamEvent(RPL_StreamingFailed, msgid) +} + +func stream_cmd_TAKETHIS(msgid MessageID) StreamEvent { + return createStreamEvent(stream_TAKETHIS, msgid) +} + +func stream_cmd_CHECK(msgid MessageID) StreamEvent { + return createStreamEvent(stream_CHECK, msgid) +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/srnd/doc.go b/contrib/backends/srndv2/src/nntpchan/lib/srnd/doc.go new file mode 100644 index 0000000..bbfbf9e --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/srnd/doc.go @@ -0,0 +1,5 @@ +// +// main package for srndv2 +// called from main +// +package srnd diff --git a/contrib/backends/srndv2/src/nntpchan/lib/store/doc.go b/contrib/backends/srndv2/src/nntpchan/lib/store/doc.go new file mode 100644 index 0000000..fcce618 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/store/doc.go @@ -0,0 +1,4 @@ +// +// nntp article storage +// +package store diff --git a/contrib/backends/srndv2/src/nntpchan/lib/store/fs.go b/contrib/backends/srndv2/src/nntpchan/lib/store/fs.go new file mode 100644 index 0000000..b9d7d2f --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/store/fs.go @@ -0,0 +1,309 @@ +package store + +import ( + "encoding/base32" + "fmt" + log "github.com/Sirupsen/logrus" + "nntpchan/lib/crypto" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" +) + +// filesystem storage of nntp articles and attachments +type FilesystemStorage struct { + root string + discardAttachments bool +} + +func (fs FilesystemStorage) String() string { + return fs.root +} + +// ensure the filesystem storage exists and is well formed and read/writable +func (fs FilesystemStorage) Ensure() (err error) { + _, err = os.Stat(fs.String()) + if os.IsNotExist(err) { + // directory does not exist, create it + err = os.Mkdir(fs.String(), 0755) + if err != nil { + log.WithFields(log.Fields{ + "pkg": "fs-store", + "filepath": fs.String(), + }).Error("failed to ensure directory", err) + // failed to create initial directory + return + } + } + + // ensure subdirectories + for _, subdir := range []string{"att", "thm", "articles", "tmp"} { + fpath := filepath.Join(fs.String(), subdir) + _, err = os.Stat(fpath) + if os.IsNotExist(err) { + // make subdirectory + err = os.Mkdir(fpath, 0755) + if err != nil { + log.WithFields(log.Fields{ + "pkg": "fs-store", + "filepath": fpath, + }).Error("failed to ensure sub-directory", err) + // failed to create subdirectory + return + } + } + } + return +} + +// get the temp file directory +func (fs FilesystemStorage) TempDir() string { + return filepath.Join(fs.String(), "tmp") +} + +// get the directory path for attachments +func (fs FilesystemStorage) AttachmentDir() string { + return filepath.Join(fs.String(), "att") +} + +// get the directory path for articles +func (fs FilesystemStorage) ArticleDir() string { + return filepath.Join(fs.String(), "articles") +} + +// get a temporary file we can use for read/write that deletes itself on close +func (fs FilesystemStorage) obtainTempFile() (f *os.File, err error) { + fname := fmt.Sprintf("tempfile-%x-%d", crypto.RandBytes(4), time.Now().Unix()) + log.WithFields(log.Fields{ + "pkg": "fs-store", + "filepath": fname, + }).Debug("opening temp file") + f, err = os.OpenFile(filepath.Join(fs.TempDir(), fname), os.O_RDWR|os.O_CREATE, 0400) + return +} + +// store an article from a reader to disk +func (fs FilesystemStorage) StoreArticle(r io.Reader, msgid, newsgroup string) (fpath string, err error) { + err = fs.HasArticle(msgid) + if err == nil { + // discard the body as we have it stored already + _, err = io.Copy(ioutil.Discard, r) + log.WithFields(log.Fields{ + "pkg": "fs-store", + "msgid": msgid, + }).Debug("discard article") + } else if err == ErrNoSuchArticle { + log.WithFields(log.Fields{ + "pkg": "fs-store", + "msgid": msgid, + }).Debug("storing article") + // don't have an article with this message id, write it to disk + var f *os.File + fpath = filepath.Join(fs.ArticleDir(), msgid) + f, err = os.OpenFile(fpath, os.O_CREATE|os.O_WRONLY, 0644) + if err == nil { + // file opened okay, defer the close + defer f.Close() + // write to disk + log.WithFields(log.Fields{ + "pkg": "fs-store", + "msgid": msgid, + }).Debug("writing to disk") + var n int64 + n, err = io.Copy(f, r) + if err == nil { + log.WithFields(log.Fields{ + "pkg": "fs-store", + "msgid": msgid, + "written": n, + }).Debug("wrote article to disk") + // symlink + g := fs.newsgroupDir(newsgroup) + _, e := os.Stat(g) + if os.IsNotExist(e) { + err = os.Mkdir(g, 0700) + } + if err == nil { + err = os.Symlink(filepath.Join("..", msgid), filepath.Join(g, msgid)) + } + if err != nil { + log.WithFields(log.Fields{ + "pkg": "fs-store", + "msgid": msgid, + "group": newsgroup, + }).Debug("failed to link article") + } + } else { + log.WithFields(log.Fields{ + "pkg": "fs-store", + "msgid": msgid, + "written": n, + }).Error("write to disk failed") + } + } else { + log.WithFields(log.Fields{ + "pkg": "fs-store", + "msgid": msgid, + "filepath": fpath, + }).Error("did not open file for storage", err) + } + } + return +} + +func (fs FilesystemStorage) newsgroupDir(group string) string { + return filepath.Join(fs.ArticleDir(), group) +} + +// check if we have the artilce with this message id +func (fs FilesystemStorage) HasArticle(msgid string) (err error) { + fpath := fs.ArticleDir() + fpath = filepath.Join(fpath, msgid) + log.WithFields(log.Fields{ + "pkg": "fs-store", + "msgid": msgid, + "filepath": fpath, + }).Debug("check for article") + _, err = os.Stat(fpath) + if os.IsNotExist(err) { + err = ErrNoSuchArticle + } + return +} + +func (fs FilesystemStorage) DeleteArticle(msgid string) (err error) { + err = os.Remove(filepath.Join(fs.ArticleDir(), msgid)) + return +} + +// store attachment onto filesystem +func (fs FilesystemStorage) StoreAttachment(r io.Reader, filename string) (fpath string, err error) { + if fs.discardAttachments { + _, err = io.Copy(ioutil.Discard, r) + return + } + // open temp file for storage + var tf *os.File + tf, err = fs.obtainTempFile() + if err == nil { + // we have the temp file + + // close tempfile when done + defer func() { + n := tf.Name() + tf.Close() + os.Remove(n) + }() + + // create hasher + h := crypto.Hash() + // create multiwriter + mw := io.MultiWriter(tf, h) + + log.WithFields(log.Fields{ + "pkg": "fs-store", + "filename": filename, + }).Debug("writing to disk") + var n int64 + // write all of the reader to the multiwriter + n, err = io.Copy(mw, r) + + if err == nil { + // successful write + + // get file checksum + d := h.Sum(nil) + + // rename file to hash + extension from filename + fpath = base32.StdEncoding.EncodeToString(d) + filepath.Ext(filename) + fpath = filepath.Join(fs.AttachmentDir(), fpath) + + _, err = os.Stat(fpath) + // is that file there? + if os.IsNotExist(err) { + // it's not there, let's write it + var f *os.File + f, err = os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE, 0644) + if err == nil { + // file opened + defer f.Close() + // seek to beginning of tempfile + tf.Seek(0, os.SEEK_SET) + // write all of the temp file to the storage file + n, err = io.Copy(f, tf) + // if err == nil by here it's all good + l := log.WithFields(log.Fields{ + "pkg": "fs-store", + "filename": filename, + "hash": d, + "filepath": fpath, + "size": n, + }) + if err == nil { + l.Debug("wrote attachment to disk") + } else { + l.Error("failed to write attachment to disk", err) + } + } else { + log.WithFields(log.Fields{ + "pkg": "fs-store", + "filename": filename, + "hash": d, + "filepath": fpath, + }).Error("failed to open file") + } + } else { + log.WithFields(log.Fields{ + "pkg": "fs-store", + "filename": filename, + "hash": d, + "filepath": fpath, + "size": n, + }).Debug("attachment exists on disk") + } + } + } else { + log.WithFields(log.Fields{ + "pkg": "fs-store", + "filename": filename, + }).Error("cannot open temp file for attachment", err) + } + return +} + +// open article given message-id +// does not check validity +func (fs FilesystemStorage) OpenArticle(msgid string) (r *os.File, err error) { + r, err = os.Open(filepath.Join(fs.ArticleDir(), msgid)) + return +} + +func (fs FilesystemStorage) ForEachInGroup(group string, chnl chan string) { + g := fs.newsgroupDir(group) + filepath.Walk(g, func(path string, info os.FileInfo, err error) error { + if info != nil { + chnl <- info.Name() + } + return nil + }) +} + +// create a new filesystem storage directory +// ensure directory and subdirectories +func NewFilesytemStorage(dirname string, unpackAttachments bool) (fs FilesystemStorage, err error) { + dirname, err = filepath.Abs(dirname) + if err == nil { + log.WithFields(log.Fields{ + "pkg": "fs-store", + "filepath": dirname, + }).Info("Creating New Filesystem Storage") + fs = FilesystemStorage{ + root: dirname, + discardAttachments: unpackAttachments, + } + err = fs.Ensure() + } + return +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/store/log.go b/contrib/backends/srndv2/src/nntpchan/lib/store/log.go new file mode 100644 index 0000000..72440ea --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/store/log.go @@ -0,0 +1 @@ +package store diff --git a/contrib/backends/srndv2/src/nntpchan/lib/store/null.go b/contrib/backends/srndv2/src/nntpchan/lib/store/null.go new file mode 100644 index 0000000..fee6fb4 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/store/null.go @@ -0,0 +1,49 @@ +package store + +import ( + "nntpchan/lib/util" + "io" + "os" +) + +type nullStore struct{} + +func (n *nullStore) discard(r io.Reader) (s string, err error) { + _, err = io.Copy(util.Discard, r) + s = "/dev/null" + return +} + +func (n *nullStore) HasArticle(msgid string) error { + return ErrNoSuchArticle +} + +func (n *nullStore) StoreAttachment(r io.Reader, filename string) (string, error) { + return n.discard(r) +} + +func (n *nullStore) StoreArticle(r io.Reader, msgid, newsgroup string) (string, error) { + return n.discard(r) +} + +func (n *nullStore) DeleteArticle(msgid string) (err error) { + return +} + +func (n *nullStore) Ensure() (err error) { + return +} + +func (n *nullStore) ForEachInGroup(newsgroup string, chnl chan string) { + return +} + +func (n *nullStore) OpenArticle(msgid string) (r *os.File, err error) { + err = ErrNoSuchArticle + return +} + +// create a storage backend that does nothing +func NewNullStorage() Storage { + return &nullStore{} +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/store/store.go b/contrib/backends/srndv2/src/nntpchan/lib/store/store.go new file mode 100644 index 0000000..bf49ef5 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/store/store.go @@ -0,0 +1,41 @@ +package store + +import ( + "errors" + "io" + "os" +) + +var ErrNoSuchArticle = errors.New("no such article") + +// storage for nntp articles and attachments +type Storage interface { + // store an attachment that we read from an io.Reader + // filename is used to hint to store what extension to store it as + // returns absolute filepath where attachment was stored and nil on success + // returns emtpy string and error if an error ocurred while storing + StoreAttachment(r io.Reader, filename string) (string, error) + + // store an article that we read from an io.Reader + // message id is used to hint where the article is stored as well as newsgroup + // returns absolute filepath to where the article was stored and nil on success + // returns empty string and error if an error ocurred while storing + StoreArticle(r io.Reader, msgid, newsgroup string) (string, error) + + // return nil if the article with the given message id exists in this storage + // return ErrNoSuchArticle if it does not exist or an error if another error occured while checking + HasArticle(msgid string) error + + // delete article from underlying storage + DeleteArticle(msgid string) error + + // open article for reading + OpenArticle(msgid string) (*os.File, error) + + // ensure the underlying storage backend is created + Ensure() error + + // iterate over all messages in a newsgroup + // send results down a channel + ForEachInGroup(newsgroup string, cnhl chan string) +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/thumbnail/doc.go b/contrib/backends/srndv2/src/nntpchan/lib/thumbnail/doc.go new file mode 100644 index 0000000..dc9aaa1 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/thumbnail/doc.go @@ -0,0 +1,4 @@ +// +// attachment thumbnailing +// +package thumbnail diff --git a/contrib/backends/srndv2/src/nntpchan/lib/thumbnail/exec.go b/contrib/backends/srndv2/src/nntpchan/lib/thumbnail/exec.go new file mode 100644 index 0000000..5b0ef9d --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/thumbnail/exec.go @@ -0,0 +1,40 @@ +package thumbnail + +import ( + "os/exec" + "regexp" +) + +// thumbnail by executing an external program +type ExecThumbnailer struct { + // path to executable + Exec string + // regular expression that checks for acceptable infiles + Accept *regexp.Regexp + // function to generate arguments to use with external program + // inf and outf are the filenames of the input and output files respectively + // if this is nil the command will be passed in 2 arguments, infile and outfile + GenArgs func(inf, outf string) []string +} + +func (exe *ExecThumbnailer) CanThumbnail(infpath string) bool { + re := exe.Accept.Copy() + return re.MatchString(infpath) +} + +func (exe *ExecThumbnailer) Generate(infpath, outfpath string) (err error) { + // do sanity check + if exe.CanThumbnail(infpath) { + var args []string + if exe.GenArgs == nil { + args = []string{infpath, outfpath} + } else { + args = exe.GenArgs(infpath, outfpath) + } + cmd := exec.Command(exe.Exec, args...) + _, err = cmd.CombinedOutput() + } else { + err = ErrCannotThumbanil + } + return +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/thumbnail/multi.go b/contrib/backends/srndv2/src/nntpchan/lib/thumbnail/multi.go new file mode 100644 index 0000000..1a26e58 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/thumbnail/multi.go @@ -0,0 +1,47 @@ +package thumbnail + +import ( + "errors" +) + +var ErrNoThumbnailer = errors.New("no thumbnailer found") + +type multiThumbnailer struct { + impls []Thumbnailer +} + +// get the frist matching thumbnailer that works with the given file +// if we can't find one return nil +func (mth *multiThumbnailer) getThumbnailer(fpath string) Thumbnailer { + for _, th := range mth.impls { + if th.CanThumbnail(fpath) { + return th + } + } + return nil +} + +func (mth *multiThumbnailer) Generate(infpath, outfpath string) (err error) { + th := mth.getThumbnailer(infpath) + if th == nil { + err = ErrNoThumbnailer + } else { + err = th.Generate(infpath, outfpath) + } + return +} + +func (mth *multiThumbnailer) CanThumbnail(infpath string) bool { + for _, th := range mth.impls { + if th.CanThumbnail(infpath) { + return true + } + } + return false +} + +func MuxThumbnailers(th ...Thumbnailer) Thumbnailer { + return &multiThumbnailer{ + impls: th, + } +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/thumbnail/thumb.go b/contrib/backends/srndv2/src/nntpchan/lib/thumbnail/thumb.go new file mode 100644 index 0000000..a974053 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/thumbnail/thumb.go @@ -0,0 +1,76 @@ +package thumbnail + +import ( + "errors" + "fmt" + "regexp" + "strings" +) + +var ErrCannotThumbanil = errors.New("cannot thumbnail file") + +// a generator of thumbnails +type Thumbnailer interface { + // generate thumbnail of attachment + // + // infpath: absolute filepath to attachment + // + // outfpath: absolute filepath to thumbnail + // + // return error if the thumbnailing fails + Generate(infpath, outfpath string) error + + // can we generate a thumbnail for this file? + CanThumbnail(infpath string) bool +} + +// thumbnail configuration +type Config struct { + // width of thumbnails + ThumbW int + // height of thumbnails + ThumbH int + // only generate jpg thumbnails + JpegOnly bool +} + +var defaultCfg = &Config{ + ThumbW: 300, + ThumbH: 200, + JpegOnly: true, +} + +// create an imagemagick thumbnailer +func ImageMagickThumbnailer(convertPath string, conf *Config) Thumbnailer { + if conf == nil { + conf = defaultCfg + } + return &ExecThumbnailer{ + Exec: convertPath, + Accept: regexp.MustCompilePOSIX(`\.(png|jpg|jpeg|gif|webp)$`), + GenArgs: func(inf, outf string) []string { + if strings.HasSuffix(inf, ".gif") { + inf += "[0]" + } + if conf.JpegOnly { + outf += ".jpeg" + } + return []string{"-thumbnail", fmt.Sprintf("%d", conf.ThumbW), inf, outf} + }, + } +} + +// generate a thumbnailer that uses ffmpeg +func FFMpegThumbnailer(ffmpegPath string, conf *Config) Thumbnailer { + if conf == nil { + conf = defaultCfg + } + return &ExecThumbnailer{ + Exec: ffmpegPath, + Accept: regexp.MustCompilePOSIX(`\.(mkv|mp4|avi|webm|ogv|mov|m4v|mpg)$`), + GenArgs: func(inf, outf string) []string { + outf += ".jpeg" + return []string{"-i", inf, "-vf", fmt.Sprintf("scale=%d:%d", conf.ThumbW, conf.ThumbH), "-vframes", "1", outf} + }, + } +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/thumbnail/thumb_test.go b/contrib/backends/srndv2/src/nntpchan/lib/thumbnail/thumb_test.go new file mode 100644 index 0000000..3796155 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/thumbnail/thumb_test.go @@ -0,0 +1,56 @@ +package thumbnail + +import ( + "testing" +) + +func doTestThumb(t *testing.T, th Thumbnailer, allowed, disallowed []string) { + for _, f := range allowed { + if !th.CanThumbnail(f) { + t.Logf("cannot thumbnail expected file: %s", f) + t.Fail() + } + } + + for _, f := range disallowed { + if th.CanThumbnail(f) { + t.Logf("can thumbnail wrong file: %s", f) + t.Fail() + } + } + +} + +var _image = []string{"asd.gif", "asd.jpeg", "asd.jpg", "asd.png", "asd.webp"} +var _video = []string{"asd.mkv", "asd.mov", "asd.mp4", "asd.m4v", "asd.ogv", "asd.avi", "asd.mpg", "asd.webm"} +var _sound = []string{"asd.flac", "asd.mp3", "asd.mp2", "asd.wav", "asd.ogg", "asd.opus", "asd.m4a"} +var _misc = []string{"asd.txt", "asd.swf"} +var _garbage = []string{"asd", "asd.asd", "asd.asd.asd.asd", "asd.benis"} + +func TestCanThumbnailImage(t *testing.T) { + th := ImageMagickThumbnailer("", nil) + var allowed []string + var disallowed []string + + allowed = append(allowed, _image...) + disallowed = append(disallowed, _video...) + disallowed = append(disallowed, _sound...) + disallowed = append(disallowed, _misc...) + disallowed = append(disallowed, _garbage...) + + doTestThumb(t, th, allowed, disallowed) +} + +func TestCanThumbnailVideo(t *testing.T) { + th := FFMpegThumbnailer("", nil) + var allowed []string + var disallowed []string + + allowed = append(allowed, _video...) + disallowed = append(disallowed, _image...) + disallowed = append(disallowed, _sound...) + disallowed = append(disallowed, _misc...) + disallowed = append(disallowed, _garbage...) + + doTestThumb(t, th, allowed, disallowed) +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/util/cache.go b/contrib/backends/srndv2/src/nntpchan/lib/util/cache.go new file mode 100644 index 0000000..0f1b3e4 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/util/cache.go @@ -0,0 +1,87 @@ +package util + +import ( + "fmt" + "path/filepath" + "regexp" + "strconv" +) + +func GetThreadHashHTML(file string) (thread string) { + exp := regexp.MustCompilePOSIX(`thread-([0-9a-f]+)\.html`) + matches := exp.FindStringSubmatch(file) + if len(matches) != 2 { + return "" + } + thread = matches[1] + return +} + +func GetGroupAndPageHTML(file string) (board string, page int) { + exp := regexp.MustCompilePOSIX(`(.*)-([0-9]+)\.html`) + matches := exp.FindStringSubmatch(file) + if len(matches) != 3 { + return "", -1 + } + var err error + board = matches[1] + tmp := matches[2] + page, err = strconv.Atoi(tmp) + if err != nil { + page = -1 + } + return +} + +func GetGroupForCatalogHTML(file string) (group string) { + exp := regexp.MustCompilePOSIX(`catalog-(.+)\.html`) + matches := exp.FindStringSubmatch(file) + if len(matches) != 2 { + return "" + } + group = matches[1] + return +} + +func GetFilenameForBoardPage(webroot_dir, boardname string, pageno int, json bool) string { + var ext string + if json { + ext = "json" + } else { + ext = "html" + } + fname := fmt.Sprintf("%s-%d.%s", boardname, pageno, ext) + return filepath.Join(webroot_dir, fname) +} + +func GetFilenameForThread(webroot_dir, root_post_id string, json bool) string { + var ext string + if json { + ext = "json" + } else { + ext = "html" + } + fname := fmt.Sprintf("thread-%s.%s", HashMessageID(root_post_id), ext) + return filepath.Join(webroot_dir, fname) +} + +func GetFilenameForCatalog(webroot_dir, boardname string) string { + fname := fmt.Sprintf("catalog-%s.html", boardname) + return filepath.Join(webroot_dir, fname) +} + +func GetFilenameForIndex(webroot_dir string) string { + return filepath.Join(webroot_dir, "index.html") +} + +func GetFilenameForBoards(webroot_dir string) string { + return filepath.Join(webroot_dir, "boards.html") +} + +func GetFilenameForHistory(webroot_dir string) string { + return filepath.Join(webroot_dir, "history.html") +} + +func GetFilenameForUkko(webroot_dir string) string { + return filepath.Join(webroot_dir, "ukko.html") +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/util/discard.go b/contrib/backends/srndv2/src/nntpchan/lib/util/discard.go new file mode 100644 index 0000000..4b954ff --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/util/discard.go @@ -0,0 +1,14 @@ +package util + +type ioDiscard struct{} + +func (discard *ioDiscard) Write(d []byte) (n int, err error) { + n = len(d) + return +} + +func (discard *ioDiscard) Close() (err error) { + return +} + +var Discard = new(ioDiscard) diff --git a/contrib/backends/srndv2/src/nntpchan/lib/util/hex.go b/contrib/backends/srndv2/src/nntpchan/lib/util/hex.go new file mode 100644 index 0000000..5d4e588 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/util/hex.go @@ -0,0 +1,11 @@ +package util + +import ( + "crypto/sha1" + "fmt" +) + +// message id hash +func HashMessageID(msgid string) string { + return fmt.Sprintf("%x", sha1.Sum([]byte(msgid))) +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/util/ip.go b/contrib/backends/srndv2/src/nntpchan/lib/util/ip.go new file mode 100644 index 0000000..e287094 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/util/ip.go @@ -0,0 +1,91 @@ +package util + +import ( + "encoding/base64" + "fmt" + "nntpchan/lib/crypto/nacl" + "log" + "net" +) + +// given an address +// generate a new encryption key for it +// return the encryption key and the encrypted address +func NewAddrEnc(addr string) (string, string) { + key_bytes := nacl.RandBytes(encAddrBytes()) + key := base64.StdEncoding.EncodeToString(key_bytes) + return key, EncAddr(addr, key) +} + +// xor address with a one time pad +// if the address isn't long enough it's padded with spaces +func EncAddr(addr, key string) string { + key_bytes, err := base64.StdEncoding.DecodeString(key) + + if err != nil { + log.Println("encAddr() key base64 decode", err) + return "" + } + + if len(addr) > len(key_bytes) { + log.Println("encAddr() len(addr) > len(key_bytes)") + return "" + } + + // pad with spaces + for len(addr) < len(key_bytes) { + addr += " " + } + + addr_bytes := []byte(addr) + res_bytes := make([]byte, len(addr_bytes)) + for idx, b := range key_bytes { + res_bytes[idx] = addr_bytes[idx] ^ b + } + + return base64.StdEncoding.EncodeToString(res_bytes) +} + +// number of bytes to use in otp +func encAddrBytes() int { + return 64 +} + +func IsSubnet(cidr string) (bool, *net.IPNet) { + _, ipnet, err := net.ParseCIDR(cidr) + if err == nil { + return true, ipnet + } + return false, nil +} + +func IPNet2MinMax(inet *net.IPNet) (min, max net.IP) { + netb := []byte(inet.IP) + maskb := []byte(inet.Mask) + maxb := make([]byte, len(netb)) + + for i, _ := range maxb { + maxb[i] = netb[i] | (^maskb[i]) + } + min = net.IP(netb) + max = net.IP(maxb) + return +} + +func ZeroIPString(ip net.IP) string { + p := ip + + if len(ip) == 0 { + return "" + } + + if p4 := p.To4(); len(p4) == net.IPv4len { + return fmt.Sprintf("%03d.%03d.%03d.%03d", p4[0], p4[1], p4[2], p4[3]) + } + if len(p) == net.IPv6len { + //>IPv6 + //ishygddt + return fmt.Sprintf("[%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x]", p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]) + } + return "?" +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/util/nntp_login.go b/contrib/backends/srndv2/src/nntpchan/lib/util/nntp_login.go new file mode 100644 index 0000000..1176810 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/util/nntp_login.go @@ -0,0 +1,29 @@ +package util + +import ( + "crypto/sha512" + "encoding/base64" + "encoding/hex" + "nntpchan/lib/crypto/nacl" +) + +// generate a login salt for nntp users +func GenLoginCredSalt() (salt string) { + salt = randStr(128) + return +} + +// do nntp login credential hash given password and salt +func NntpLoginCredHash(passwd, salt string) (str string) { + var b []byte + b = append(b, []byte(passwd)...) + b = append(b, []byte(salt)...) + h := sha512.Sum512(b) + str = base64.StdEncoding.EncodeToString(h[:]) + return +} + +// make a random string +func randStr(length int) string { + return hex.EncodeToString(nacl.RandBytes(length))[length:] +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/util/post.go b/contrib/backends/srndv2/src/nntpchan/lib/util/post.go new file mode 100644 index 0000000..2decff1 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/util/post.go @@ -0,0 +1,8 @@ +package util + +import "strings" + +func IsSage(str string) bool { + str = strings.ToLower(str) + return str == "sage" || strings.HasPrefix(str, "sage ") +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/util/time.go b/contrib/backends/srndv2/src/nntpchan/lib/util/time.go new file mode 100644 index 0000000..75a7ee4 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/util/time.go @@ -0,0 +1,8 @@ +package util + +import "time" + +// time for right now as int64 +func TimeNow() int64 { + return time.Now().UTC().Unix() +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/webhooks/doc.go b/contrib/backends/srndv2/src/nntpchan/lib/webhooks/doc.go new file mode 100644 index 0000000..5124e0b --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/webhooks/doc.go @@ -0,0 +1,4 @@ +// +// nntpchan web hooks +// +package webhooks diff --git a/contrib/backends/srndv2/src/nntpchan/lib/webhooks/http.go b/contrib/backends/srndv2/src/nntpchan/lib/webhooks/http.go new file mode 100644 index 0000000..df10db9 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/webhooks/http.go @@ -0,0 +1,154 @@ +package webhooks + +import ( + "encoding/json" + "fmt" + log "github.com/Sirupsen/logrus" + "nntpchan/lib/config" + "nntpchan/lib/nntp" + "nntpchan/lib/nntp/message" + "nntpchan/lib/store" + "io" + "mime" + "mime/multipart" + "net/http" + "net/textproto" + "net/url" + "regexp" + "strings" +) + +// web hook implementation +type httpWebhook struct { + conf *config.WebhookConfig + storage store.Storage + hdr *message.HeaderIO +} + +func (h *httpWebhook) SentArticleVia(msgid nntp.MessageID, name string) { + // web hooks don't care about feed state +} + +// we got a new article +func (h *httpWebhook) GotArticle(msgid nntp.MessageID, group nntp.Newsgroup) { + h.sendArticle(msgid, group) +} + +func (h *httpWebhook) sendArticle(msgid nntp.MessageID, group nntp.Newsgroup) { + f, err := h.storage.OpenArticle(msgid.String()) + if err == nil { + u, _ := url.Parse(h.conf.URL) + var r *http.Response + var ctype string + if h.conf.Dialect == "vichan" { + c := textproto.NewConn(f) + var hdr textproto.MIMEHeader + hdr, err = c.ReadMIMEHeader() + if err == nil { + var body io.Reader + ctype = hdr.Get("Content-Type") + if ctype == "" || strings.HasPrefix(ctype, "text/plain") { + ctype = "text/plain" + } + ctype = strings.Replace(strings.ToLower(ctype), "multipart/mixed", "multipart/form-data", 1) + q := u.Query() + for k, vs := range hdr { + for _, v := range vs { + q.Add(k, v) + } + } + q.Set("Content-Type", ctype) + u.RawQuery = q.Encode() + + if strings.HasPrefix(ctype, "multipart") { + pr, pw := io.Pipe() + log.Debug("using pipe") + go func(in io.Reader, out io.WriteCloser) { + _, params, _ := mime.ParseMediaType(ctype) + if params == nil { + // send as whatever lol + io.Copy(out, in) + } else { + boundary, _ := params["boundary"] + mpr := multipart.NewReader(in, boundary) + mpw := multipart.NewWriter(out) + mpw.SetBoundary(boundary) + for { + part, err := mpr.NextPart() + if err == io.EOF { + err = nil + break + } else if err == nil { + // get part header + h := part.Header + // rewrite header part for php + cd := h.Get("Content-Disposition") + r := regexp.MustCompile(`filename="(.*)"`) + // YOLO + parts := r.FindStringSubmatch(cd) + if len(parts) > 1 { + fname := parts[1] + h.Set("Content-Disposition", fmt.Sprintf(`filename="%s"; name="attachment[]"`, fname)) + } + // make write part + wp, err := mpw.CreatePart(h) + if err == nil { + // write part out + io.Copy(wp, part) + } else { + log.Errorf("error writng webhook part: %s", err.Error()) + } + } + part.Close() + } + mpw.Close() + } + out.Close() + }(c.R, pw) + body = pr + } else { + body = f + } + r, err = http.Post(u.String(), ctype, body) + } + } else { + var sz int64 + sz, err = f.Seek(0, 2) + if err != nil { + return + } + f.Seek(0, 0) + // regular webhook + ctype = "text/plain; charset=UTF-8" + cl := new(http.Client) + r, err = cl.Do(&http.Request{ + ContentLength: sz, + URL: u, + Method: "POST", + Body: f, + }) + } + if err == nil && r != nil { + dec := json.NewDecoder(r.Body) + result := make(map[string]interface{}) + err = dec.Decode(&result) + if err == nil || err == io.EOF { + msg, ok := result["error"] + if ok { + log.Warnf("hook gave error: %s", msg) + } else { + log.Debugf("hook response: %s", result) + } + } else { + log.Warnf("hook response does not look like json: %s", err) + } + r.Body.Close() + log.Infof("hook called for %s", msgid) + } + } else { + f.Close() + } + if err != nil { + log.Errorf("error calling web hook %s: %s", h.conf.Name, err.Error()) + } +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/webhooks/multi.go b/contrib/backends/srndv2/src/nntpchan/lib/webhooks/multi.go new file mode 100644 index 0000000..1cf0ce3 --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/webhooks/multi.go @@ -0,0 +1,23 @@ +package webhooks + +import ( + "nntpchan/lib/nntp" +) + +// webhook multiplexer +type multiWebhook struct { + hooks []Webhook +} + +// got an article +func (m *multiWebhook) GotArticle(msgid nntp.MessageID, group nntp.Newsgroup) { + for _, h := range m.hooks { + h.GotArticle(msgid, group) + } +} + +func (m *multiWebhook) SentArticleVia(msgid nntp.MessageID, feedname string) { + for _, h := range m.hooks { + h.SentArticleVia(msgid, feedname) + } +} diff --git a/contrib/backends/srndv2/src/nntpchan/lib/webhooks/webhooks.go b/contrib/backends/srndv2/src/nntpchan/lib/webhooks/webhooks.go new file mode 100644 index 0000000..cd6826b --- /dev/null +++ b/contrib/backends/srndv2/src/nntpchan/lib/webhooks/webhooks.go @@ -0,0 +1,30 @@ +package webhooks + +import ( + "nntpchan/lib/config" + "nntpchan/lib/nntp" + "nntpchan/lib/nntp/message" + "nntpchan/lib/store" +) + +type Webhook interface { + // implements nntp.EventHooks + nntp.EventHooks +} + +// create webhook multiplexing multiple web hooks +func NewWebhooks(conf []*config.WebhookConfig, st store.Storage) Webhook { + h := message.NewHeaderIO() + var hooks []Webhook + for _, c := range conf { + hooks = append(hooks, &httpWebhook{ + conf: c, + storage: st, + hdr: h, + }) + } + + return &multiWebhook{ + hooks: hooks, + } +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/.gitignore b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/.gitignore new file mode 100644 index 0000000..66be63a --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/.gitignore @@ -0,0 +1 @@ +logrus diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/.travis.yml b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/.travis.yml new file mode 100644 index 0000000..ec7dd78 --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - 1.6 + - 1.7 + - tip +install: + - go get -t $(go list ./... | grep -v /examples/) +script: + - GOMAXPROCS=4 GORACE="halt_on_error=1" go test -race -v $(go list ./... | grep -v /examples/) diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/CHANGELOG.md new file mode 100644 index 0000000..f2c2bc2 --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,66 @@ +# 0.10.0 + +* feature: Add a test hook (#180) +* feature: `ParseLevel` is now case-insensitive (#326) +* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) +* performance: avoid re-allocations on `WithFields` (#335) + +# 0.9.0 + +* logrus/text_formatter: don't emit empty msg +* logrus/hooks/airbrake: move out of main repository +* logrus/hooks/sentry: move out of main repository +* logrus/hooks/papertrail: move out of main repository +* logrus/hooks/bugsnag: move out of main repository +* logrus/core: run tests with `-race` +* logrus/core: detect TTY based on `stderr` +* logrus/core: support `WithError` on logger +* logrus/core: Solaris support + +# 0.8.7 + +* logrus/core: fix possible race (#216) +* logrus/doc: small typo fixes and doc improvements + + +# 0.8.6 + +* hooks/raven: allow passing an initialized client + +# 0.8.5 + +* logrus/core: revert #208 + +# 0.8.4 + +* formatter/text: fix data race (#218) + +# 0.8.3 + +* logrus/core: fix entry log level (#208) +* logrus/core: improve performance of text formatter by 40% +* logrus/core: expose `LevelHooks` type +* logrus/core: add support for DragonflyBSD and NetBSD +* formatter/text: print structs more verbosely + +# 0.8.2 + +* logrus: fix more Fatal family functions + +# 0.8.1 + +* logrus: fix not exiting on `Fatalf` and `Fatalln` + +# 0.8.0 + +* logrus: defaults to stderr instead of stdout +* hooks/sentry: add special field for `*http.Request` +* formatter/text: ignore Windows for colors + +# 0.7.3 + +* formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +* formatter/text: Add configuration option for time format (#158) diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/LICENSE b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/LICENSE new file mode 100644 index 0000000..f090cb4 --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/README.md b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/README.md new file mode 100644 index 0000000..f9cfb0a --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/README.md @@ -0,0 +1,426 @@ +# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus) + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not +yet stable (pre 1.0). Logrus itself is completely stable and has been used in +many large deployments. The core API is unlikely to change much but please +version control your Logrus to make sure you aren't fetching latest `master` on +every build.** + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +exit status 1 +``` + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/Sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/Sirupsen/logrus" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Output to stderr instead of stdout, could also be a file. + log.SetOutput(os.Stderr) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "github.com/Sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stderr + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging though logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/Sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" + logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook(123, "xyz", "production")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` +Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). + +| Hook | Description | +| ----- | ----------- | +| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | +| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | +| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | +| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | +| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | +| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | +| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | +| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | +| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | +| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | +| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | +| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | +| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | +| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | +| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | +| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | +| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | +| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | +| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) | +| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | +| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | +| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | +| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | +| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | +| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka | +| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | +| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| +| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| +| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)| +| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | +| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash | +| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) | +| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) | +| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) | + + +#### Level logging + +Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/Sirupsen/logrus" +) + +init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&log.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true` +* `logrus.JSONFormatter`. Logs fields as JSON. + +Third party logging formatters: + +* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. +* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + +#### Tools + +| Tool | Description | +| ---- | ----------- | +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| +|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper arround Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | + +#### Testing + +Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: + +* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook +* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): + +```go +logger, hook := NewNullLogger() +logger.Error("Hello error") + +assert.Equal(1, len(hook.Entries)) +assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) +assert.Equal("Hello error", hook.LastEntry().Message) + +hook.Reset() +assert.Nil(hook.LastEntry()) +``` + +#### Fatal handlers + +Logrus can register one or more functions that will be called when any `fatal` +level message is logged. The registered handlers will be executed before +logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need +to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. + +``` +... +handler := func() { + // gracefully shutdown something... +} +logrus.RegisterExitHandler(handler) +... +``` + +#### Thread safty + +By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs. +If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. + +Situation when locking is not needed includes: + +* You have no hooks registered, or hooks calling is already thread-safe. + +* Writing to logger.Out is already thread-safe, for example: + + 1) logger.Out is protected by locks. + + 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing) + + (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/alt_exit.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/alt_exit.go new file mode 100644 index 0000000..b4c9e84 --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/alt_exit.go @@ -0,0 +1,64 @@ +package logrus + +// The following code was sourced and modified from the +// https://bitbucket.org/tebeka/atexit package governed by the following license: +// +// Copyright (c) 2012 Miki Tebeka . +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import ( + "fmt" + "os" +) + +var handlers = []func(){} + +func runHandler(handler func()) { + defer func() { + if err := recover(); err != nil { + fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) + } + }() + + handler() +} + +func runHandlers() { + for _, handler := range handlers { + runHandler(handler) + } +} + +// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) +func Exit(code int) { + runHandlers() + os.Exit(code) +} + +// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke +// all handlers. The handlers will also be invoked when any Fatal log entry is +// made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func RegisterExitHandler(handler func()) { + handlers = append(handlers, handler) +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/alt_exit_test.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/alt_exit_test.go new file mode 100644 index 0000000..022b778 --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/alt_exit_test.go @@ -0,0 +1,74 @@ +package logrus + +import ( + "io/ioutil" + "os/exec" + "testing" + "time" +) + +func TestRegister(t *testing.T) { + current := len(handlers) + RegisterExitHandler(func() {}) + if len(handlers) != current+1 { + t.Fatalf("can't add handler") + } +} + +func TestHandler(t *testing.T) { + gofile := "/tmp/testprog.go" + if err := ioutil.WriteFile(gofile, testprog, 0666); err != nil { + t.Fatalf("can't create go file") + } + + outfile := "/tmp/testprog.out" + arg := time.Now().UTC().String() + err := exec.Command("go", "run", gofile, outfile, arg).Run() + if err == nil { + t.Fatalf("completed normally, should have failed") + } + + data, err := ioutil.ReadFile(outfile) + if err != nil { + t.Fatalf("can't read output file %s", outfile) + } + + if string(data) != arg { + t.Fatalf("bad data") + } +} + +var testprog = []byte(` +// Test program for atexit, gets output file and data as arguments and writes +// data to output file in atexit handler. +package main + +import ( + "github.com/Sirupsen/logrus" + "flag" + "fmt" + "io/ioutil" +) + +var outfile = "" +var data = "" + +func handler() { + ioutil.WriteFile(outfile, []byte(data), 0666) +} + +func badHandler() { + n := 0 + fmt.Println(1/n) +} + +func main() { + flag.Parse() + outfile = flag.Arg(0) + data = flag.Arg(1) + + logrus.RegisterExitHandler(handler) + logrus.RegisterExitHandler(badHandler) + logrus.Fatal("Bye bye") +} +`) diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/doc.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/doc.go new file mode 100644 index 0000000..dddd5f8 --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/Sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/Sirupsen/logrus +*/ +package logrus diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/entry.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/entry.go new file mode 100644 index 0000000..4edbe7a --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/entry.go @@ -0,0 +1,275 @@ +package logrus + +import ( + "bytes" + "fmt" + "os" + "sync" + "time" +) + +var bufferPool *sync.Pool + +func init() { + bufferPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + } +} + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Debug, Info, +// Warn, Error, Fatal or Panic is called on it. These objects can be reused and +// passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + Level Level + + // Message passed to Debug, Info, Warn, Error, Fatal or Panic + Message string + + // When formatter is called in entry.log(), an Buffer may be set to entry + Buffer *bytes.Buffer +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, give a little extra room + Data: make(Fields, 5), + } +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + return "", err + } + str := string(serialized) + return str, nil +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := make(Fields, len(entry.Data)+len(fields)) + for k, v := range entry.Data { + data[k] = v + } + for k, v := range fields { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data} +} + +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) log(level Level, msg string) { + var buffer *bytes.Buffer + entry.Time = time.Now() + entry.Level = level + entry.Message = msg + + if err := entry.Logger.Hooks.Fire(level, &entry); err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + entry.Logger.mu.Unlock() + } + buffer = bufferPool.Get().(*bytes.Buffer) + buffer.Reset() + defer bufferPool.Put(buffer) + entry.Buffer = buffer + serialized, err := entry.Logger.Formatter.Format(&entry) + entry.Buffer = nil + if err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + entry.Logger.mu.Unlock() + } else { + entry.Logger.mu.Lock() + _, err = entry.Logger.Out.Write(serialized) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } + entry.Logger.mu.Unlock() + } + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(&entry) + } +} + +func (entry *Entry) Debug(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.log(DebugLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.log(InfoLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warn(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.log(WarnLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.log(ErrorLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Fatal(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.log(FatalLevel, fmt.Sprint(args...)) + } + Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.log(PanicLevel, fmt.Sprint(args...)) + } + panic(fmt.Sprint(args...)) +} + +// Entry Printf family functions + +func (entry *Entry) Debugf(format string, args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(fmt.Sprintf(format, args...)) + } + Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(fmt.Sprintf(format, args...)) + } +} + +// Entry Println family functions + +func (entry *Entry) Debugln(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Infoln(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Fatalln(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(entry.sprintlnn(args...)) + } + Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(entry.sprintlnn(args...)) + } +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/entry_test.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/entry_test.go new file mode 100644 index 0000000..99c3b41 --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/entry_test.go @@ -0,0 +1,77 @@ +package logrus + +import ( + "bytes" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestEntryWithError(t *testing.T) { + + assert := assert.New(t) + + defer func() { + ErrorKey = "error" + }() + + err := fmt.Errorf("kaboom at layer %d", 4711) + + assert.Equal(err, WithError(err).Data["error"]) + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + + assert.Equal(err, entry.WithError(err).Data["error"]) + + ErrorKey = "err" + + assert.Equal(err, entry.WithError(err).Data["err"]) + +} + +func TestEntryPanicln(t *testing.T) { + errBoom := fmt.Errorf("boom time") + + defer func() { + p := recover() + assert.NotNil(t, p) + + switch pVal := p.(type) { + case *Entry: + assert.Equal(t, "kaboom", pVal.Message) + assert.Equal(t, errBoom, pVal.Data["err"]) + default: + t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) + } + }() + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + entry.WithField("err", errBoom).Panicln("kaboom") +} + +func TestEntryPanicf(t *testing.T) { + errBoom := fmt.Errorf("boom again") + + defer func() { + p := recover() + assert.NotNil(t, p) + + switch pVal := p.(type) { + case *Entry: + assert.Equal(t, "kaboom true", pVal.Message) + assert.Equal(t, errBoom, pVal.Data["err"]) + default: + t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) + } + }() + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + entry.WithField("err", errBoom).Panicf("kaboom %v", true) +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go new file mode 100644 index 0000000..a1623ec --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go @@ -0,0 +1,50 @@ +package main + +import ( + "github.com/Sirupsen/logrus" +) + +var log = logrus.New() + +func init() { + log.Formatter = new(logrus.JSONFormatter) + log.Formatter = new(logrus.TextFormatter) // default + log.Level = logrus.DebugLevel +} + +func main() { + defer func() { + err := recover() + if err != nil { + log.WithFields(logrus.Fields{ + "omg": true, + "err": err, + "number": 100, + }).Fatal("The ice breaks!") + } + }() + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "number": 8, + }).Debug("Started observing beach") + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(logrus.Fields{ + "temperature": -4, + }).Debug("Temperature changes") + + log.WithFields(logrus.Fields{ + "animal": "orca", + "size": 9009, + }).Panic("It's over 9000!") +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go new file mode 100644 index 0000000..3187f6d --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go @@ -0,0 +1,30 @@ +package main + +import ( + "github.com/Sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" +) + +var log = logrus.New() + +func init() { + log.Formatter = new(logrus.TextFormatter) // default + log.Hooks.Add(airbrake.NewHook(123, "xyz", "development")) +} + +func main() { + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/exported.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/exported.go new file mode 100644 index 0000000..9a0120a --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/exported.go @@ -0,0 +1,193 @@ +package logrus + +import ( + "io" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.mu.Lock() + defer std.mu.Unlock() + std.Out = out +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.mu.Lock() + defer std.mu.Unlock() + std.Formatter = formatter +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.mu.Lock() + defer std.mu.Unlock() + std.Level = level +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + std.mu.Lock() + defer std.mu.Unlock() + return std.Level +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.mu.Lock() + defer std.mu.Unlock() + std.Hooks.Add(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/formatter.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/formatter.go new file mode 100644 index 0000000..b5fbe93 --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/formatter.go @@ -0,0 +1,45 @@ +package logrus + +import "time" + +const DefaultTimestampFormat = time.RFC3339 + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields) { + if t, ok := data["time"]; ok { + data["fields.time"] = t + } + + if m, ok := data["msg"]; ok { + data["fields.msg"] = m + } + + if l, ok := data["level"]; ok { + data["fields.level"] = l + } +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go new file mode 100644 index 0000000..c6d290c --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go @@ -0,0 +1,98 @@ +package logrus + +import ( + "fmt" + "testing" + "time" +) + +// smallFields is a small size data set for benchmarking +var smallFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", +} + +// largeFields is a large size data set for benchmarking +var largeFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", + "five": "six", + "seven": "eight", + "nine": "ten", + "eleven": "twelve", + "thirteen": "fourteen", + "fifteen": "sixteen", + "seventeen": "eighteen", + "nineteen": "twenty", + "a": "b", + "c": "d", + "e": "f", + "g": "h", + "i": "j", + "k": "l", + "m": "n", + "o": "p", + "q": "r", + "s": "t", + "u": "v", + "w": "x", + "y": "z", + "this": "will", + "make": "thirty", + "entries": "yeah", +} + +var errorFields = Fields{ + "foo": fmt.Errorf("bar"), + "baz": fmt.Errorf("qux"), +} + +func BenchmarkErrorTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields) +} + +func BenchmarkSmallTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) +} + +func BenchmarkLargeTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields) +} + +func BenchmarkSmallColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields) +} + +func BenchmarkLargeColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields) +} + +func BenchmarkSmallJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, smallFields) +} + +func BenchmarkLargeJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, largeFields) +} + +func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { + entry := &Entry{ + Time: time.Time{}, + Level: InfoLevel, + Message: "message", + Data: fields, + } + var d []byte + var err error + for i := 0; i < b.N; i++ { + d, err = formatter.Format(entry) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(d))) + } +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/hook_test.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/hook_test.go new file mode 100644 index 0000000..13f34cb --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/hook_test.go @@ -0,0 +1,122 @@ +package logrus + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type TestHook struct { + Fired bool +} + +func (hook *TestHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *TestHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookFires(t *testing.T) { + hook := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + assert.Equal(t, hook.Fired, false) + + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} + +type ModifyHook struct { +} + +func (hook *ModifyHook) Fire(entry *Entry) error { + entry.Data["wow"] = "whale" + return nil +} + +func (hook *ModifyHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookCanModifyEntry(t *testing.T) { + hook := new(ModifyHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + }) +} + +func TestCanFireMultipleHooks(t *testing.T) { + hook1 := new(ModifyHook) + hook2 := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook1) + log.Hooks.Add(hook2) + + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + assert.Equal(t, hook2.Fired, true) + }) +} + +type ErrorHook struct { + Fired bool +} + +func (hook *ErrorHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *ErrorHook) Levels() []Level { + return []Level{ + ErrorLevel, + } +} + +func TestErrorHookShouldntFireOnInfo(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, false) + }) +} + +func TestErrorHookShouldFireOnError(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Error("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/hooks.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/hooks.go new file mode 100644 index 0000000..3f151cd --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md new file mode 100644 index 0000000..066704b --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md @@ -0,0 +1,39 @@ +# Syslog Hooks for Logrus :walrus: + +## Usage + +```go +import ( + "log/syslog" + "github.com/Sirupsen/logrus" + logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" +) + +func main() { + log := logrus.New() + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + + if err == nil { + log.Hooks.Add(hook) + } +} +``` + +If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). Just assign empty string to the first two parameters of `NewSyslogHook`. It should look like the following. + +```go +import ( + "log/syslog" + "github.com/Sirupsen/logrus" + logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" +) + +func main() { + log := logrus.New() + hook, err := logrus_syslog.NewSyslogHook("", "", syslog.LOG_INFO, "") + + if err == nil { + log.Hooks.Add(hook) + } +} +``` \ No newline at end of file diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go new file mode 100644 index 0000000..a36e200 --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go @@ -0,0 +1,54 @@ +// +build !windows,!nacl,!plan9 + +package logrus_syslog + +import ( + "fmt" + "github.com/Sirupsen/logrus" + "log/syslog" + "os" +) + +// SyslogHook to send logs via syslog. +type SyslogHook struct { + Writer *syslog.Writer + SyslogNetwork string + SyslogRaddr string +} + +// Creates a hook to be added to an instance of logger. This is called with +// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")` +// `if err == nil { log.Hooks.Add(hook) }` +func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) { + w, err := syslog.Dial(network, raddr, priority, tag) + return &SyslogHook{w, network, raddr}, err +} + +func (hook *SyslogHook) Fire(entry *logrus.Entry) error { + line, err := entry.String() + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err) + return err + } + + switch entry.Level { + case logrus.PanicLevel: + return hook.Writer.Crit(line) + case logrus.FatalLevel: + return hook.Writer.Crit(line) + case logrus.ErrorLevel: + return hook.Writer.Err(line) + case logrus.WarnLevel: + return hook.Writer.Warning(line) + case logrus.InfoLevel: + return hook.Writer.Info(line) + case logrus.DebugLevel: + return hook.Writer.Debug(line) + default: + return nil + } +} + +func (hook *SyslogHook) Levels() []logrus.Level { + return logrus.AllLevels +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go new file mode 100644 index 0000000..42762dc --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go @@ -0,0 +1,26 @@ +package logrus_syslog + +import ( + "github.com/Sirupsen/logrus" + "log/syslog" + "testing" +) + +func TestLocalhostAddAndPrint(t *testing.T) { + log := logrus.New() + hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + + if err != nil { + t.Errorf("Unable to connect to local syslog.") + } + + log.Hooks.Add(hook) + + for _, level := range hook.Levels() { + if len(log.Hooks[level]) != 1 { + t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level])) + } + } + + log.Info("Congratulations!") +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/hooks/test/test.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/hooks/test/test.go new file mode 100644 index 0000000..0688125 --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/hooks/test/test.go @@ -0,0 +1,67 @@ +package test + +import ( + "io/ioutil" + + "github.com/Sirupsen/logrus" +) + +// test.Hook is a hook designed for dealing with logs in test scenarios. +type Hook struct { + Entries []*logrus.Entry +} + +// Installs a test hook for the global logger. +func NewGlobal() *Hook { + + hook := new(Hook) + logrus.AddHook(hook) + + return hook + +} + +// Installs a test hook for a given local logger. +func NewLocal(logger *logrus.Logger) *Hook { + + hook := new(Hook) + logger.Hooks.Add(hook) + + return hook + +} + +// Creates a discarding logger and installs the test hook. +func NewNullLogger() (*logrus.Logger, *Hook) { + + logger := logrus.New() + logger.Out = ioutil.Discard + + return logger, NewLocal(logger) + +} + +func (t *Hook) Fire(e *logrus.Entry) error { + t.Entries = append(t.Entries, e) + return nil +} + +func (t *Hook) Levels() []logrus.Level { + return logrus.AllLevels +} + +// LastEntry returns the last entry that was logged or nil. +func (t *Hook) LastEntry() (l *logrus.Entry) { + + if i := len(t.Entries) - 1; i < 0 { + return nil + } else { + return t.Entries[i] + } + +} + +// Reset removes all Entries from this test hook. +func (t *Hook) Reset() { + t.Entries = make([]*logrus.Entry, 0) +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go new file mode 100644 index 0000000..d69455b --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go @@ -0,0 +1,39 @@ +package test + +import ( + "testing" + + "github.com/Sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func TestAllHooks(t *testing.T) { + + assert := assert.New(t) + + logger, hook := NewNullLogger() + assert.Nil(hook.LastEntry()) + assert.Equal(0, len(hook.Entries)) + + logger.Error("Hello error") + assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal("Hello error", hook.LastEntry().Message) + assert.Equal(1, len(hook.Entries)) + + logger.Warn("Hello warning") + assert.Equal(logrus.WarnLevel, hook.LastEntry().Level) + assert.Equal("Hello warning", hook.LastEntry().Message) + assert.Equal(2, len(hook.Entries)) + + hook.Reset() + assert.Nil(hook.LastEntry()) + assert.Equal(0, len(hook.Entries)) + + hook = NewGlobal() + + logrus.Error("Hello error") + assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal("Hello error", hook.LastEntry().Message) + assert.Equal(1, len(hook.Entries)) + +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/json_formatter.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/json_formatter.go new file mode 100644 index 0000000..2ad6dc5 --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/json_formatter.go @@ -0,0 +1,41 @@ +package logrus + +import ( + "encoding/json" + "fmt" +) + +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string +} + +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+3) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/Sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + prefixFieldClashes(data) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + + data["time"] = entry.Time.Format(timestampFormat) + data["msg"] = entry.Message + data["level"] = entry.Level.String() + + serialized, err := json.Marshal(data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/json_formatter_test.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/json_formatter_test.go new file mode 100644 index 0000000..1d70873 --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/json_formatter_test.go @@ -0,0 +1,120 @@ +package logrus + +import ( + "encoding/json" + "errors" + + "testing" +) + +func TestErrorNotLost(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("error", errors.New("wild walrus"))) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["error"] != "wild walrus" { + t.Fatal("Error field not set") + } +} + +func TestErrorNotLostOnFieldNotNamedError(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("omg", errors.New("wild walrus"))) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["omg"] != "wild walrus" { + t.Fatal("Error field not set") + } +} + +func TestFieldClashWithTime(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("time", "right now!")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.time"] != "right now!" { + t.Fatal("fields.time not set to original time field") + } + + if entry["time"] != "0001-01-01T00:00:00Z" { + t.Fatal("time field not set to current time, was: ", entry["time"]) + } +} + +func TestFieldClashWithMsg(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("msg", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.msg"] != "something" { + t.Fatal("fields.msg not set to original msg field") + } +} + +func TestFieldClashWithLevel(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.level"] != "something" { + t.Fatal("fields.level not set to original level field") + } +} + +func TestJSONEntryEndsWithNewline(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + if b[len(b)-1] != '\n' { + t.Fatal("Expected JSON log entry to end with a newline") + } +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/logger.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/logger.go new file mode 100644 index 0000000..b769f3d --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/logger.go @@ -0,0 +1,308 @@ +package logrus + +import ( + "io" + "os" + "sync" +) + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventorous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. `logrus.Debug` is useful in + Level Level + // Used to sync writing to the log. Locking is enabled by Default + mu MutexWrap + // Reusable empty entry + entryPool sync.Pool +} + +type MutexWrap struct { + lock sync.Mutex + disabled bool +} + +func (mw *MutexWrap) Lock() { + if !mw.disabled { + mw.lock.Lock() + } +} + +func (mw *MutexWrap) Unlock() { + if !mw.disabled { + mw.lock.Unlock() + } +} + +func (mw *MutexWrap) Disable() { + mw.disabled = true +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &Logger{ +// Out: os.Stderr, +// Formatter: new(JSONFormatter), +// Hooks: make(LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + } +} + +func (logger *Logger) newEntry() *Entry { + entry, ok := logger.entryPool.Get().(*Entry) + if ok { + return entry + } + return NewEntry(logger) +} + +func (logger *Logger) releaseEntry(entry *Entry) { + logger.entryPool.Put(entry) +} + +// Adds a field to the log entry, note that it doesn't log until you call +// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithError(err) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + if logger.Level >= DebugLevel { + entry := logger.newEntry() + entry.Debugf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + if logger.Level >= InfoLevel { + entry := logger.newEntry() + entry.Infof(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + entry := logger.newEntry() + entry.Printf(format, args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warnf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warnf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + if logger.Level >= ErrorLevel { + entry := logger.newEntry() + entry.Errorf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + if logger.Level >= FatalLevel { + entry := logger.newEntry() + entry.Fatalf(format, args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + if logger.Level >= PanicLevel { + entry := logger.newEntry() + entry.Panicf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Debug(args ...interface{}) { + if logger.Level >= DebugLevel { + entry := logger.newEntry() + entry.Debug(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Info(args ...interface{}) { + if logger.Level >= InfoLevel { + entry := logger.newEntry() + entry.Info(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Print(args ...interface{}) { + entry := logger.newEntry() + entry.Info(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warn(args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warn(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warning(args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warn(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Error(args ...interface{}) { + if logger.Level >= ErrorLevel { + entry := logger.newEntry() + entry.Error(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatal(args ...interface{}) { + if logger.Level >= FatalLevel { + entry := logger.newEntry() + entry.Fatal(args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + if logger.Level >= PanicLevel { + entry := logger.newEntry() + entry.Panic(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Debugln(args ...interface{}) { + if logger.Level >= DebugLevel { + entry := logger.newEntry() + entry.Debugln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Infoln(args ...interface{}) { + if logger.Level >= InfoLevel { + entry := logger.newEntry() + entry.Infoln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Println(args ...interface{}) { + entry := logger.newEntry() + entry.Println(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnln(args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warnln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warningln(args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warnln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Errorln(args ...interface{}) { + if logger.Level >= ErrorLevel { + entry := logger.newEntry() + entry.Errorln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatalln(args ...interface{}) { + if logger.Level >= FatalLevel { + entry := logger.newEntry() + entry.Fatalln(args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + if logger.Level >= PanicLevel { + entry := logger.newEntry() + entry.Panicln(args...) + logger.releaseEntry(entry) + } +} + +//When file is opened with appending mode, it's safe to +//write concurrently to a file (within 4k message on Linux). +//In these cases user can choose to disable the lock. +func (logger *Logger) SetNoLock() { + logger.mu.Disable() +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/logger_bench_test.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/logger_bench_test.go new file mode 100644 index 0000000..dd23a35 --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/logger_bench_test.go @@ -0,0 +1,61 @@ +package logrus + +import ( + "os" + "testing" +) + +// smallFields is a small size data set for benchmarking +var loggerFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", +} + +func BenchmarkDummyLogger(b *testing.B) { + nullf, err := os.OpenFile("/dev/null", os.O_WRONLY, 0666) + if err != nil { + b.Fatalf("%v", err) + } + defer nullf.Close() + doLoggerBenchmark(b, nullf, &TextFormatter{DisableColors: true}, smallFields) +} + +func BenchmarkDummyLoggerNoLock(b *testing.B) { + nullf, err := os.OpenFile("/dev/null", os.O_WRONLY|os.O_APPEND, 0666) + if err != nil { + b.Fatalf("%v", err) + } + defer nullf.Close() + doLoggerBenchmarkNoLock(b, nullf, &TextFormatter{DisableColors: true}, smallFields) +} + +func doLoggerBenchmark(b *testing.B, out *os.File, formatter Formatter, fields Fields) { + logger := Logger{ + Out: out, + Level: InfoLevel, + Formatter: formatter, + } + entry := logger.WithFields(fields) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + entry.Info("aaa") + } + }) +} + +func doLoggerBenchmarkNoLock(b *testing.B, out *os.File, formatter Formatter, fields Fields) { + logger := Logger{ + Out: out, + Level: InfoLevel, + Formatter: formatter, + } + logger.SetNoLock() + entry := logger.WithFields(fields) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + entry.Info("aaa") + } + }) +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/logrus.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/logrus.go new file mode 100644 index 0000000..e596691 --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/logrus.go @@ -0,0 +1,143 @@ +package logrus + +import ( + "fmt" + "log" + "strings" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint8 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + switch level { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warning" + case ErrorLevel: + return "error" + case FatalLevel: + return "fatal" + case PanicLevel: + return "panic" + } + + return "unknown" +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch strings.ToLower(lvl) { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// A constant exposing all logging levels +var AllLevels = []Level{ + PanicLevel, + FatalLevel, + ErrorLevel, + WarnLevel, + InfoLevel, + DebugLevel, +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} + +// The FieldLogger interface generalizes the Entry and Logger types +type FieldLogger interface { + WithField(key string, value interface{}) *Entry + WithFields(fields Fields) *Entry + WithError(err error) *Entry + + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Printf(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Panicf(format string, args ...interface{}) + + Debug(args ...interface{}) + Info(args ...interface{}) + Print(args ...interface{}) + Warn(args ...interface{}) + Warning(args ...interface{}) + Error(args ...interface{}) + Fatal(args ...interface{}) + Panic(args ...interface{}) + + Debugln(args ...interface{}) + Infoln(args ...interface{}) + Println(args ...interface{}) + Warnln(args ...interface{}) + Warningln(args ...interface{}) + Errorln(args ...interface{}) + Fatalln(args ...interface{}) + Panicln(args ...interface{}) +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/logrus_test.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/logrus_test.go new file mode 100644 index 0000000..bfc4780 --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/logrus_test.go @@ -0,0 +1,361 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "strconv" + "strings" + "sync" + "testing" + + "github.com/stretchr/testify/assert" +) + +func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + log(logger) + + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assertions(fields) +} + +func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) { + var buffer bytes.Buffer + + logger := New() + logger.Out = &buffer + logger.Formatter = &TextFormatter{ + DisableColors: true, + } + + log(logger) + + fields := make(map[string]string) + for _, kv := range strings.Split(buffer.String(), " ") { + if !strings.Contains(kv, "=") { + continue + } + kvArr := strings.Split(kv, "=") + key := strings.TrimSpace(kvArr[0]) + val := kvArr[1] + if kvArr[1][0] == '"' { + var err error + val, err = strconv.Unquote(val) + assert.NoError(t, err) + } + fields[key] = val + } + assertions(fields) +} + +func TestPrint(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestInfo(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestWarn(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Warn("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "warning") + }) +} + +func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test test") + }) +} + +func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test 10") + }) +} + +func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "testtest") + }) +} + +func TestWithFieldsShouldAllowAssignments(t *testing.T) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + localLog := logger.WithFields(Fields{ + "key1": "value1", + }) + + localLog.WithField("key2", "value2").Info("test") + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assert.Equal(t, "value2", fields["key2"]) + assert.Equal(t, "value1", fields["key1"]) + + buffer = bytes.Buffer{} + fields = Fields{} + localLog.Info("test") + err = json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + _, ok := fields["key2"] + assert.Equal(t, false, ok) + assert.Equal(t, "value1", fields["key1"]) +} + +func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + }) +} + +func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["fields.msg"], "hello") + }) +} + +func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("time", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["fields.time"], "hello") + }) +} + +func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("level", 1).Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["level"], "info") + assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only + }) +} + +func TestDefaultFieldsAreNotPrefixed(t *testing.T) { + LogAndAssertText(t, func(log *Logger) { + ll := log.WithField("herp", "derp") + ll.Info("hello") + ll.Info("bye") + }, func(fields map[string]string) { + for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} { + if _, ok := fields[fieldName]; ok { + t.Fatalf("should not have prefixed %q: %v", fieldName, fields) + } + } + }) +} + +func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { + + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + llog := logger.WithField("context", "eating raw fish") + + llog.Info("looks delicious") + + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.NoError(t, err, "should have decoded first message") + assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") + assert.Equal(t, fields["msg"], "looks delicious") + assert.Equal(t, fields["context"], "eating raw fish") + + buffer.Reset() + + llog.Warn("omg it is!") + + err = json.Unmarshal(buffer.Bytes(), &fields) + assert.NoError(t, err, "should have decoded second message") + assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") + assert.Equal(t, fields["msg"], "omg it is!") + assert.Equal(t, fields["context"], "eating raw fish") + assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry") + +} + +func TestConvertLevelToString(t *testing.T) { + assert.Equal(t, "debug", DebugLevel.String()) + assert.Equal(t, "info", InfoLevel.String()) + assert.Equal(t, "warning", WarnLevel.String()) + assert.Equal(t, "error", ErrorLevel.String()) + assert.Equal(t, "fatal", FatalLevel.String()) + assert.Equal(t, "panic", PanicLevel.String()) +} + +func TestParseLevel(t *testing.T) { + l, err := ParseLevel("panic") + assert.Nil(t, err) + assert.Equal(t, PanicLevel, l) + + l, err = ParseLevel("PANIC") + assert.Nil(t, err) + assert.Equal(t, PanicLevel, l) + + l, err = ParseLevel("fatal") + assert.Nil(t, err) + assert.Equal(t, FatalLevel, l) + + l, err = ParseLevel("FATAL") + assert.Nil(t, err) + assert.Equal(t, FatalLevel, l) + + l, err = ParseLevel("error") + assert.Nil(t, err) + assert.Equal(t, ErrorLevel, l) + + l, err = ParseLevel("ERROR") + assert.Nil(t, err) + assert.Equal(t, ErrorLevel, l) + + l, err = ParseLevel("warn") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("WARN") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("warning") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("WARNING") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("info") + assert.Nil(t, err) + assert.Equal(t, InfoLevel, l) + + l, err = ParseLevel("INFO") + assert.Nil(t, err) + assert.Equal(t, InfoLevel, l) + + l, err = ParseLevel("debug") + assert.Nil(t, err) + assert.Equal(t, DebugLevel, l) + + l, err = ParseLevel("DEBUG") + assert.Nil(t, err) + assert.Equal(t, DebugLevel, l) + + l, err = ParseLevel("invalid") + assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) +} + +func TestGetSetLevelRace(t *testing.T) { + wg := sync.WaitGroup{} + for i := 0; i < 100; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + if i%2 == 0 { + SetLevel(InfoLevel) + } else { + GetLevel() + } + }(i) + + } + wg.Wait() +} + +func TestLoggingRace(t *testing.T) { + logger := New() + + var wg sync.WaitGroup + wg.Add(100) + + for i := 0; i < 100; i++ { + go func() { + logger.Info("info") + wg.Done() + }() + } + wg.Wait() +} + +// Compile test +func TestLogrusInterface(t *testing.T) { + var buffer bytes.Buffer + fn := func(l FieldLogger) { + b := l.WithField("key", "value") + b.Debug("Test") + } + // test logger + logger := New() + logger.Out = &buffer + fn(logger) + + // test Entry + e := logger.WithField("another", "value") + fn(e) +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/terminal_appengine.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/terminal_appengine.go new file mode 100644 index 0000000..1960169 --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/terminal_appengine.go @@ -0,0 +1,8 @@ +// +build appengine + +package logrus + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal() bool { + return true +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/terminal_bsd.go new file mode 100644 index 0000000..5f6be4d --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/terminal_bsd.go @@ -0,0 +1,10 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/terminal_linux.go new file mode 100644 index 0000000..308160c --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/terminal_linux.go @@ -0,0 +1,14 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TCGETS + +type Termios syscall.Termios diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go new file mode 100644 index 0000000..329038f --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -0,0 +1,22 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package logrus + +import ( + "syscall" + "unsafe" +) + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stderr + var termios Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/terminal_solaris.go new file mode 100644 index 0000000..a3c6f6e --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/terminal_solaris.go @@ -0,0 +1,15 @@ +// +build solaris,!appengine + +package logrus + +import ( + "os" + + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal() bool { + _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA) + return err == nil +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/terminal_windows.go new file mode 100644 index 0000000..3727e8a --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/terminal_windows.go @@ -0,0 +1,27 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows,!appengine + +package logrus + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") +) + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stderr + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/text_formatter.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/text_formatter.go new file mode 100644 index 0000000..9114b3c --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/text_formatter.go @@ -0,0 +1,168 @@ +package logrus + +import ( + "bytes" + "fmt" + "runtime" + "sort" + "strings" + "time" +) + +const ( + nocolor = 0 + red = 31 + green = 32 + yellow = 33 + blue = 34 + gray = 37 +) + +var ( + baseTimestamp time.Time + isTerminal bool +) + +func init() { + baseTimestamp = time.Now() + isTerminal = IsTerminal() +} + +func miniTS() int { + return int(time.Since(baseTimestamp) / time.Second) +} + +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool +} + +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + var b *bytes.Buffer + var keys []string = make([]string, 0, len(entry.Data)) + for k := range entry.Data { + keys = append(keys, k) + } + + if !f.DisableSorting { + sort.Strings(keys) + } + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + prefixFieldClashes(entry.Data) + + isColorTerminal := isTerminal && (runtime.GOOS != "windows") + isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + if isColored { + f.printColored(b, entry, keys, timestampFormat) + } else { + if !f.DisableTimestamp { + f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) + } + f.appendKeyValue(b, "level", entry.Level.String()) + if entry.Message != "" { + f.appendKeyValue(b, "msg", entry.Message) + } + for _, key := range keys { + f.appendKeyValue(b, key, entry.Data[key]) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String())[0:4] + + if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) + } + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) + } +} + +func needsQuoting(text string) bool { + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.') { + return true + } + } + return false +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + + b.WriteString(key) + b.WriteByte('=') + f.appendValue(b, value) + b.WriteByte(' ') +} + +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + switch value := value.(type) { + case string: + if !needsQuoting(value) { + b.WriteString(value) + } else { + fmt.Fprintf(b, "%q", value) + } + case error: + errmsg := value.Error() + if !needsQuoting(errmsg) { + b.WriteString(errmsg) + } else { + fmt.Fprintf(b, "%q", errmsg) + } + default: + fmt.Fprint(b, value) + } +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/text_formatter_test.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/text_formatter_test.go new file mode 100644 index 0000000..e25a44f --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/text_formatter_test.go @@ -0,0 +1,61 @@ +package logrus + +import ( + "bytes" + "errors" + "testing" + "time" +) + +func TestQuoting(t *testing.T) { + tf := &TextFormatter{DisableColors: true} + + checkQuoting := func(q bool, value interface{}) { + b, _ := tf.Format(WithField("test", value)) + idx := bytes.Index(b, ([]byte)("test=")) + cont := bytes.Contains(b[idx+5:], []byte{'"'}) + if cont != q { + if q { + t.Errorf("quoting expected for: %#v", value) + } else { + t.Errorf("quoting not expected for: %#v", value) + } + } + } + + checkQuoting(false, "abcd") + checkQuoting(false, "v1.0") + checkQuoting(false, "1234567890") + checkQuoting(true, "/foobar") + checkQuoting(true, "x y") + checkQuoting(true, "x,y") + checkQuoting(false, errors.New("invalid")) + checkQuoting(true, errors.New("invalid argument")) +} + +func TestTimestampFormat(t *testing.T) { + checkTimeStr := func(format string) { + customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format} + customStr, _ := customFormatter.Format(WithField("test", "test")) + timeStart := bytes.Index(customStr, ([]byte)("time=")) + timeEnd := bytes.Index(customStr, ([]byte)("level=")) + timeStr := customStr[timeStart+5 : timeEnd-1] + if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' { + timeStr = timeStr[1 : len(timeStr)-1] + } + if format == "" { + format = time.RFC3339 + } + _, e := time.Parse(format, (string)(timeStr)) + if e != nil { + t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e) + } + } + + checkTimeStr("2006-01-02T15:04:05.000000000Z07:00") + checkTimeStr("Mon Jan _2 15:04:05 2006") + checkTimeStr("") +} + +// TODO add tests for sorting etc., this requires a parser for the text +// formatter output. diff --git a/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/writer.go b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/writer.go new file mode 100644 index 0000000..f74d2aa --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/Sirupsen/logrus/writer.go @@ -0,0 +1,53 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +func (logger *Logger) Writer() *io.PipeWriter { + return logger.WriterLevel(InfoLevel) +} + +func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + switch level { + case DebugLevel: + printFunc = logger.Debug + case InfoLevel: + printFunc = logger.Info + case WarnLevel: + printFunc = logger.Warn + case ErrorLevel: + printFunc = logger.Error + case FatalLevel: + printFunc = logger.Fatal + case PanicLevel: + printFunc = logger.Panic + default: + printFunc = logger.Print + } + + go logger.writerScanner(reader, printFunc) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + printFunc(scanner.Text()) + } + if err := scanner.Err(); err != nil { + logger.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/dchest/blake256/README.markdown b/contrib/backends/srndv2/src/vendor/github.com/dchest/blake256/README.markdown new file mode 100644 index 0000000..98426c2 --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/dchest/blake256/README.markdown @@ -0,0 +1,56 @@ +Package blake256 +===================== + + import "github.com/dchest/blake256" + +Package blake256 implements BLAKE-256 and BLAKE-224 hash functions (SHA-3 +candidate). + +Public domain. + + +Constants +--------- + +``` go +const BlockSize = 64 +``` +The block size of the hash algorithm in bytes. + +``` go +const Size = 32 +``` +The size of BLAKE-256 hash in bytes. + +``` go +const Size224 = 28 +``` +The size of BLAKE-224 hash in bytes. + + +Functions +--------- + +### func New + + func New() hash.Hash + +New returns a new hash.Hash computing the BLAKE-256 checksum. + +### func New224 + + func New224() hash.Hash + +New224 returns a new hash.Hash computing the BLAKE-224 checksum. + +### func New224Salt + + func New224Salt(salt []byte) hash.Hash + +New224Salt is like New224 but initializes salt with the given 16-byte slice. + +### func NewSalt + + func NewSalt(salt []byte) hash.Hash + +NewSalt is like New but initializes salt with the given 16-byte slice. diff --git a/contrib/backends/srndv2/src/vendor/github.com/dchest/blake256/blake256.go b/contrib/backends/srndv2/src/vendor/github.com/dchest/blake256/blake256.go new file mode 100644 index 0000000..148ec9e --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/dchest/blake256/blake256.go @@ -0,0 +1,194 @@ +// Written in 2011-2012 by Dmitry Chestnykh. +// +// To the extent possible under law, the author have dedicated all copyright +// and related and neighboring rights to this software to the public domain +// worldwide. This software is distributed without any warranty. +// http://creativecommons.org/publicdomain/zero/1.0/ + +// Package blake256 implements BLAKE-256 and BLAKE-224 hash functions (SHA-3 +// candidate). +package blake256 + +import "hash" + +// The block size of the hash algorithm in bytes. +const BlockSize = 64 + +// The size of BLAKE-256 hash in bytes. +const Size = 32 + +// The size of BLAKE-224 hash in bytes. +const Size224 = 28 + +type digest struct { + hashSize int // hash output size in bits (224 or 256) + h [8]uint32 // current chain value + s [4]uint32 // salt (zero by default) + t uint64 // message bits counter + nullt bool // special case for finalization: skip counter + x [BlockSize]byte // buffer for data not yet compressed + nx int // number of bytes in buffer +} + +var ( + // Initialization values. + iv256 = [8]uint32{ + 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, + 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19} + + iv224 = [8]uint32{ + 0xC1059ED8, 0x367CD507, 0x3070DD17, 0xF70E5939, + 0xFFC00B31, 0x68581511, 0x64F98FA7, 0xBEFA4FA4} + + pad = [64]byte{0x80} +) + +// Reset resets the state of digest. It leaves salt intact. +func (d *digest) Reset() { + if d.hashSize == 224 { + d.h = iv224 + } else { + d.h = iv256 + } + d.t = 0 + d.nx = 0 + d.nullt = false +} + +func (d *digest) Size() int { return d.hashSize >> 3 } + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Write(p []byte) (nn int, err error) { + nn = len(p) + if d.nx > 0 { + n := len(p) + if n > BlockSize-d.nx { + n = BlockSize - d.nx + } + d.nx += copy(d.x[d.nx:], p) + if d.nx == BlockSize { + block(d, d.x[:]) + d.nx = 0 + } + p = p[n:] + } + if len(p) >= BlockSize { + n := len(p) &^ (BlockSize - 1) + block(d, p[:n]) + p = p[n:] + } + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +// Sum returns the calculated checksum. +func (d0 *digest) Sum(in []byte) []byte { + // Make a copy of d0 so that caller can keep writing and summing. + d := *d0 + + nx := uint64(d.nx) + l := d.t + nx<<3 + len := make([]byte, 8) + len[0] = byte(l >> 56) + len[1] = byte(l >> 48) + len[2] = byte(l >> 40) + len[3] = byte(l >> 32) + len[4] = byte(l >> 24) + len[5] = byte(l >> 16) + len[6] = byte(l >> 8) + len[7] = byte(l) + + if nx == 55 { + // One padding byte. + d.t -= 8 + if d.hashSize == 224 { + d.Write([]byte{0x80}) + } else { + d.Write([]byte{0x81}) + } + } else { + if nx < 55 { + // Enough space to fill the block. + if nx == 0 { + d.nullt = true + } + d.t -= 440 - nx<<3 + d.Write(pad[0 : 55-nx]) + } else { + // Need 2 compressions. + d.t -= 512 - nx<<3 + d.Write(pad[0 : 64-nx]) + d.t -= 440 + d.Write(pad[1:56]) + d.nullt = true + } + if d.hashSize == 224 { + d.Write([]byte{0x00}) + } else { + d.Write([]byte{0x01}) + } + d.t -= 8 + } + d.t -= 64 + d.Write(len) + + out := make([]byte, d.Size()) + j := 0 + for _, s := range d.h[:d.hashSize>>5] { + out[j+0] = byte(s >> 24) + out[j+1] = byte(s >> 16) + out[j+2] = byte(s >> 8) + out[j+3] = byte(s >> 0) + j += 4 + } + return append(in, out...) +} + +func (d *digest) setSalt(s []byte) { + if len(s) != 16 { + panic("salt length must be 16 bytes") + } + d.s[0] = uint32(s[0])<<24 | uint32(s[1])<<16 | uint32(s[2])<<8 | uint32(s[3]) + d.s[1] = uint32(s[4])<<24 | uint32(s[5])<<16 | uint32(s[6])<<8 | uint32(s[7]) + d.s[2] = uint32(s[8])<<24 | uint32(s[9])<<16 | uint32(s[10])<<8 | uint32(s[11]) + d.s[3] = uint32(s[12])<<24 | uint32(s[13])<<16 | uint32(s[14])<<8 | uint32(s[15]) +} + +// New returns a new hash.Hash computing the BLAKE-256 checksum. +func New() hash.Hash { + return &digest{ + hashSize: 256, + h: iv256, + } +} + +// NewSalt is like New but initializes salt with the given 16-byte slice. +func NewSalt(salt []byte) hash.Hash { + d := &digest{ + hashSize: 256, + h: iv256, + } + d.setSalt(salt) + return d +} + +// New224 returns a new hash.Hash computing the BLAKE-224 checksum. +func New224() hash.Hash { + return &digest{ + hashSize: 224, + h: iv224, + } +} + +// New224Salt is like New224 but initializes salt with the given 16-byte slice. +func New224Salt(salt []byte) hash.Hash { + d := &digest{ + hashSize: 224, + h: iv224, + } + d.setSalt(salt) + return d +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/dchest/blake256/blake256_test.go b/contrib/backends/srndv2/src/vendor/github.com/dchest/blake256/blake256_test.go new file mode 100644 index 0000000..1908133 --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/dchest/blake256/blake256_test.go @@ -0,0 +1,188 @@ +// Written in 2011-2012 by Dmitry Chestnykh. +// +// To the extent possible under law, the author have dedicated all copyright +// and related and neighboring rights to this software to the public domain +// worldwide. This software is distributed without any warranty. +// http://creativecommons.org/publicdomain/zero/1.0/ + +package blake256 + +import ( + "bytes" + "fmt" + "hash" + "testing" +) + +func Test256C(t *testing.T) { + // Test as in C program. + var hashes = [][]byte{ + { + 0x0C, 0xE8, 0xD4, 0xEF, 0x4D, 0xD7, 0xCD, 0x8D, + 0x62, 0xDF, 0xDE, 0xD9, 0xD4, 0xED, 0xB0, 0xA7, + 0x74, 0xAE, 0x6A, 0x41, 0x92, 0x9A, 0x74, 0xDA, + 0x23, 0x10, 0x9E, 0x8F, 0x11, 0x13, 0x9C, 0x87, + }, + { + 0xD4, 0x19, 0xBA, 0xD3, 0x2D, 0x50, 0x4F, 0xB7, + 0xD4, 0x4D, 0x46, 0x0C, 0x42, 0xC5, 0x59, 0x3F, + 0xE5, 0x44, 0xFA, 0x4C, 0x13, 0x5D, 0xEC, 0x31, + 0xE2, 0x1B, 0xD9, 0xAB, 0xDC, 0xC2, 0x2D, 0x41, + }, + } + data := make([]byte, 72) + + h := New() + h.Write(data[:1]) + sum := h.Sum(nil) + if !bytes.Equal(hashes[0], sum) { + t.Errorf("0: expected %X, got %X", hashes[0], sum) + } + + // Try to continue hashing. + h.Write(data[1:]) + sum = h.Sum(nil) + if !bytes.Equal(hashes[1], sum) { + t.Errorf("1(1): expected %X, got %X", hashes[1], sum) + } + + // Try with reset. + h.Reset() + h.Write(data) + sum = h.Sum(nil) + if !bytes.Equal(hashes[1], sum) { + t.Errorf("1(2): expected %X, got %X", hashes[1], sum) + } +} + +type blakeVector struct { + out, in string +} + +var vectors256 = []blakeVector{ + {"7576698ee9cad30173080678e5965916adbb11cb5245d386bf1ffda1cb26c9d7", + "The quick brown fox jumps over the lazy dog"}, + {"07663e00cf96fbc136cf7b1ee099c95346ba3920893d18cc8851f22ee2e36aa6", + "BLAKE"}, + {"716f6e863f744b9ac22c97ec7b76ea5f5908bc5b2f67c61510bfc4751384ea7a", + ""}, + {"18a393b4e62b1887a2edf79a5c5a5464daf5bbb976f4007bea16a73e4c1e198e", + "'BLAKE wins SHA-3! Hooray!!!' (I have time machine)"}, + {"fd7282ecc105ef201bb94663fc413db1b7696414682090015f17e309b835f1c2", + "Go"}, + {"1e75db2a709081f853c2229b65fd1558540aa5e7bd17b04b9a4b31989effa711", + "HELP! I'm trapped in hash!"}, + {"4181475cb0c22d58ae847e368e91b4669ea2d84bcd55dbf01fe24bae6571dd08", + `Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec a diam lectus. Sed sit amet ipsum mauris. Maecenas congue ligula ac quam viverra nec consectetur ante hendrerit. Donec et mollis dolor. Praesent et diam eget libero egestas mattis sit amet vitae augue. Nam tincidunt congue enim, ut porta lorem lacinia consectetur. Donec ut libero sed arcu vehicula ultricies a non tortor. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean ut gravida lorem. Ut turpis felis, pulvinar a semper sed, adipiscing id dolor. Pellentesque auctor nisi id magna consequat sagittis. Curabitur dapibus enim sit amet elit pharetra tincidunt feugiat nisl imperdiet. Ut convallis libero in urna ultrices accumsan. Donec sed odio eros. Donec viverra mi quis quam pulvinar at malesuada arcu rhoncus. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. In rutrum accumsan ultricies. Mauris vitae nisi at sem facilisis semper ac in est.`, + }, + {"af95fffc7768821b1e08866a2f9f66916762bfc9d71c4acb5fd515f31fd6785a", // test with one padding byte + "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec a diam lectus. Sed sit amet ipsum mauris. Maecenas congu", + }, +} + +var vectors224 = []blakeVector{ + {"c8e92d7088ef87c1530aee2ad44dc720cc10589cc2ec58f95a15e51b", + "The quick brown fox jumps over the lazy dog"}, + {"cfb6848add73e1cb47994c4765df33b8f973702705a30a71fe4747a3", + "BLAKE"}, + {"7dc5313b1c04512a174bd6503b89607aecbee0903d40a8a569c94eed", + ""}, + {"dde9e442003c24495db607b17e07ec1f67396cc1907642a09a96594e", + "Go"}, + {"9f655b0a92d4155754fa35e055ce7c5e18eb56347081ea1e5158e751", + "Buffalo buffalo Buffalo buffalo buffalo buffalo Buffalo buffalo"}, +} + +func testVectors(t *testing.T, hashfunc func() hash.Hash, vectors []blakeVector) { + for i, v := range vectors { + h := hashfunc() + h.Write([]byte(v.in)) + res := fmt.Sprintf("%x", h.Sum(nil)) + if res != v.out { + t.Errorf("%d: expected %q, got %q", i, v.out, res) + } + } +} + +func Test256(t *testing.T) { + testVectors(t, New, vectors256) +} + +func Test224(t *testing.T) { + testVectors(t, New224, vectors224) +} + +var vectors256salt = []struct{ out, in, salt string }{ + {"561d6d0cfa3d31d5eedaf2d575f3942539b03522befc2a1196ba0e51af8992a8", + "", + "1234567890123456"}, + {"88cc11889bbbee42095337fe2153c591971f94fbf8fe540d3c7e9f1700ab2d0c", + "It's so salty out there!", + "SALTsaltSaltSALT"}, +} + +func TestSalt(t *testing.T) { + for i, v := range vectors256salt { + h := NewSalt([]byte(v.salt)) + h.Write([]byte(v.in)) + res := fmt.Sprintf("%x", h.Sum(nil)) + if res != v.out { + t.Errorf("%d: expected %q, got %q", i, v.out, res) + } + } + + // Check that passing bad salt length panics. + defer func() { + if err := recover(); err == nil { + t.Errorf("expected panic for bad salt length") + } + }() + NewSalt([]byte{1, 2, 3, 4, 5, 6, 7, 8}) +} + +func TestTwoWrites(t *testing.T) { + b := make([]byte, 65) + for i := range b { + b[i] = byte(i) + } + h1 := New() + h1.Write(b[:1]) + h1.Write(b[1:]) + sum1 := h1.Sum(nil) + + h2 := New() + h2.Write(b) + sum2 := h2.Sum(nil) + + if !bytes.Equal(sum1, sum2) { + t.Errorf("Result of two writes differs from a single write with the same bytes") + } +} + +var bench = New() +var buf = make([]byte, 8<<10) + +func BenchmarkHash1K(b *testing.B) { + b.SetBytes(1024) + for i := 0; i < b.N; i++ { + bench.Write(buf[:1024]) + } +} + +func BenchmarkHash8K(b *testing.B) { + b.SetBytes(int64(len(buf))) + for i := 0; i < b.N; i++ { + bench.Write(buf) + } +} + +func BenchmarkFull64(b *testing.B) { + b.SetBytes(64) + tmp := make([]byte, 32) + b.ResetTimer() + for i := 0; i < b.N; i++ { + bench.Reset() + bench.Write(buf[:64]) + bench.Sum(tmp[0:0]) + } +} diff --git a/contrib/backends/srndv2/src/vendor/github.com/dchest/blake256/blake256block.go b/contrib/backends/srndv2/src/vendor/github.com/dchest/blake256/blake256block.go new file mode 100644 index 0000000..49daf69 --- /dev/null +++ b/contrib/backends/srndv2/src/vendor/github.com/dchest/blake256/blake256block.go @@ -0,0 +1,1681 @@ +// Written in 2011-2012 by Dmitry Chestnykh. +// +// To the extent possible under law, the author have dedicated all copyright +// and related and neighboring rights to this software to the public domain +// worldwide. This software is distributed without any warranty. +// http://creativecommons.org/publicdomain/zero/1.0/ + +// BLAKE-256 block step. +// In its own file so that a faster assembly or C version +// can be substituted easily. + +package blake256 + +const ( + cst0 = 0x243F6A88 + cst1 = 0x85A308D3 + cst2 = 0x13198A2E + cst3 = 0x03707344 + cst4 = 0xA4093822 + cst5 = 0x299F31D0 + cst6 = 0x082EFA98 + cst7 = 0xEC4E6C89 + cst8 = 0x452821E6 + cst9 = 0x38D01377 + cst10 = 0xBE5466CF + cst11 = 0x34E90C6C + cst12 = 0xC0AC29B7 + cst13 = 0xC97C50DD + cst14 = 0x3F84D5B5 + cst15 = 0xB5470917 +) + +func block(d *digest, p []uint8) { + h0, h1, h2, h3, h4, h5, h6, h7 := d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] + s0, s1, s2, s3 := d.s[0], d.s[1], d.s[2], d.s[3] + + for len(p) >= BlockSize { + v0, v1, v2, v3, v4, v5, v6, v7 := h0, h1, h2, h3, h4, h5, h6, h7 + v8 := cst0 ^ s0 + v9 := cst1 ^ s1 + v10 := cst2 ^ s2 + v11 := cst3 ^ s3 + v12 := uint32(cst4) + v13 := uint32(cst5) + v14 := uint32(cst6) + v15 := uint32(cst7) + d.t += 512 + if !d.nullt { + v12 ^= uint32(d.t) + v13 ^= uint32(d.t) + v14 ^= uint32(d.t >> 32) + v15 ^= uint32(d.t >> 32) + } + var m [16]uint32 + + m[0] = uint32(p[0])<<24 | uint32(p[1])<<16 | uint32(p[2])<<8 | uint32(p[3]) + m[1] = uint32(p[4])<<24 | uint32(p[5])<<16 | uint32(p[6])<<8 | uint32(p[7]) + m[2] = uint32(p[8])<<24 | uint32(p[9])<<16 | uint32(p[10])<<8 | uint32(p[11]) + m[3] = uint32(p[12])<<24 | uint32(p[13])<<16 | uint32(p[14])<<8 | uint32(p[15]) + m[4] = uint32(p[16])<<24 | uint32(p[17])<<16 | uint32(p[18])<<8 | uint32(p[19]) + m[5] = uint32(p[20])<<24 | uint32(p[21])<<16 | uint32(p[22])<<8 | uint32(p[23]) + m[6] = uint32(p[24])<<24 | uint32(p[25])<<16 | uint32(p[26])<<8 | uint32(p[27]) + m[7] = uint32(p[28])<<24 | uint32(p[29])<<16 | uint32(p[30])<<8 | uint32(p[31]) + m[8] = uint32(p[32])<<24 | uint32(p[33])<<16 | uint32(p[34])<<8 | uint32(p[35]) + m[9] = uint32(p[36])<<24 | uint32(p[37])<<16 | uint32(p[38])<<8 | uint32(p[39]) + m[10] = uint32(p[40])<<24 | uint32(p[41])<<16 | uint32(p[42])<<8 | uint32(p[43]) + m[11] = uint32(p[44])<<24 | uint32(p[45])<<16 | uint32(p[46])<<8 | uint32(p[47]) + m[12] = uint32(p[48])<<24 | uint32(p[49])<<16 | uint32(p[50])<<8 | uint32(p[51]) + m[13] = uint32(p[52])<<24 | uint32(p[53])<<16 | uint32(p[54])<<8 | uint32(p[55]) + m[14] = uint32(p[56])<<24 | uint32(p[57])<<16 | uint32(p[58])<<8 | uint32(p[59]) + m[15] = uint32(p[60])<<24 | uint32(p[61])<<16 | uint32(p[62])<<8 | uint32(p[63]) + + // Round 1. + v0 += m[0] ^ cst1 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-12) | v4>>12 + v1 += m[2] ^ cst3 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-12) | v5>>12 + v2 += m[4] ^ cst5 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-12) | v6>>12 + v3 += m[6] ^ cst7 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-12) | v7>>12 + v2 += m[5] ^ cst4 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-8) | v14>>8 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-7) | v6>>7 + v3 += m[7] ^ cst6 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-8) | v15>>8 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-7) | v7>>7 + v1 += m[3] ^ cst2 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-8) | v13>>8 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-7) | v5>>7 + v0 += m[1] ^ cst0 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-8) | v12>>8 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-7) | v4>>7 + v0 += m[8] ^ cst9 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-12) | v5>>12 + v1 += m[10] ^ cst11 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-12) | v6>>12 + v2 += m[12] ^ cst13 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-12) | v7>>12 + v3 += m[14] ^ cst15 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-12) | v4>>12 + v2 += m[13] ^ cst12 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-8) | v13>>8 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-7) | v7>>7 + v3 += m[15] ^ cst14 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-8) | v14>>8 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-7) | v4>>7 + v1 += m[11] ^ cst10 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-8) | v12>>8 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-7) | v6>>7 + v0 += m[9] ^ cst8 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-8) | v15>>8 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-7) | v5>>7 + + // Round 2. + v0 += m[14] ^ cst10 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-12) | v4>>12 + v1 += m[4] ^ cst8 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-12) | v5>>12 + v2 += m[9] ^ cst15 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-12) | v6>>12 + v3 += m[13] ^ cst6 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-12) | v7>>12 + v2 += m[15] ^ cst9 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-8) | v14>>8 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-7) | v6>>7 + v3 += m[6] ^ cst13 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-8) | v15>>8 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-7) | v7>>7 + v1 += m[8] ^ cst4 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-8) | v13>>8 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-7) | v5>>7 + v0 += m[10] ^ cst14 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-8) | v12>>8 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-7) | v4>>7 + v0 += m[1] ^ cst12 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-12) | v5>>12 + v1 += m[0] ^ cst2 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-12) | v6>>12 + v2 += m[11] ^ cst7 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-12) | v7>>12 + v3 += m[5] ^ cst3 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-12) | v4>>12 + v2 += m[7] ^ cst11 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-8) | v13>>8 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-7) | v7>>7 + v3 += m[3] ^ cst5 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-8) | v14>>8 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-7) | v4>>7 + v1 += m[2] ^ cst0 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-8) | v12>>8 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-7) | v6>>7 + v0 += m[12] ^ cst1 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-8) | v15>>8 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-7) | v5>>7 + + // Round 3. + v0 += m[11] ^ cst8 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-12) | v4>>12 + v1 += m[12] ^ cst0 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-12) | v5>>12 + v2 += m[5] ^ cst2 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-12) | v6>>12 + v3 += m[15] ^ cst13 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-12) | v7>>12 + v2 += m[2] ^ cst5 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-8) | v14>>8 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-7) | v6>>7 + v3 += m[13] ^ cst15 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-8) | v15>>8 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-7) | v7>>7 + v1 += m[0] ^ cst12 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-8) | v13>>8 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-7) | v5>>7 + v0 += m[8] ^ cst11 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-8) | v12>>8 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-7) | v4>>7 + v0 += m[10] ^ cst14 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-12) | v5>>12 + v1 += m[3] ^ cst6 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-12) | v6>>12 + v2 += m[7] ^ cst1 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-12) | v7>>12 + v3 += m[9] ^ cst4 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-12) | v4>>12 + v2 += m[1] ^ cst7 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-8) | v13>>8 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-7) | v7>>7 + v3 += m[4] ^ cst9 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-8) | v14>>8 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-7) | v4>>7 + v1 += m[6] ^ cst3 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-8) | v12>>8 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-7) | v6>>7 + v0 += m[14] ^ cst10 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-8) | v15>>8 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-7) | v5>>7 + + // Round 4. + v0 += m[7] ^ cst9 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-12) | v4>>12 + v1 += m[3] ^ cst1 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-12) | v5>>12 + v2 += m[13] ^ cst12 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-12) | v6>>12 + v3 += m[11] ^ cst14 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-12) | v7>>12 + v2 += m[12] ^ cst13 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-8) | v14>>8 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-7) | v6>>7 + v3 += m[14] ^ cst11 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-8) | v15>>8 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-7) | v7>>7 + v1 += m[1] ^ cst3 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-8) | v13>>8 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-7) | v5>>7 + v0 += m[9] ^ cst7 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-8) | v12>>8 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-7) | v4>>7 + v0 += m[2] ^ cst6 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-12) | v5>>12 + v1 += m[5] ^ cst10 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-12) | v6>>12 + v2 += m[4] ^ cst0 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-12) | v7>>12 + v3 += m[15] ^ cst8 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-12) | v4>>12 + v2 += m[0] ^ cst4 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-8) | v13>>8 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-7) | v7>>7 + v3 += m[8] ^ cst15 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-8) | v14>>8 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-7) | v4>>7 + v1 += m[10] ^ cst5 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-8) | v12>>8 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-7) | v6>>7 + v0 += m[6] ^ cst2 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-8) | v15>>8 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-7) | v5>>7 + + // Round 5. + v0 += m[9] ^ cst0 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-12) | v4>>12 + v1 += m[5] ^ cst7 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-12) | v5>>12 + v2 += m[2] ^ cst4 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-12) | v6>>12 + v3 += m[10] ^ cst15 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-12) | v7>>12 + v2 += m[4] ^ cst2 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-8) | v14>>8 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-7) | v6>>7 + v3 += m[15] ^ cst10 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-8) | v15>>8 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-7) | v7>>7 + v1 += m[7] ^ cst5 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-8) | v13>>8 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-7) | v5>>7 + v0 += m[0] ^ cst9 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-8) | v12>>8 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-7) | v4>>7 + v0 += m[14] ^ cst1 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-12) | v5>>12 + v1 += m[11] ^ cst12 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-12) | v6>>12 + v2 += m[6] ^ cst8 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-12) | v7>>12 + v3 += m[3] ^ cst13 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-12) | v4>>12 + v2 += m[8] ^ cst6 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-8) | v13>>8 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-7) | v7>>7 + v3 += m[13] ^ cst3 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-8) | v14>>8 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-7) | v4>>7 + v1 += m[12] ^ cst11 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-8) | v12>>8 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-7) | v6>>7 + v0 += m[1] ^ cst14 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-8) | v15>>8 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-7) | v5>>7 + + // Round 6. + v0 += m[2] ^ cst12 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-12) | v4>>12 + v1 += m[6] ^ cst10 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-12) | v5>>12 + v2 += m[0] ^ cst11 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-12) | v6>>12 + v3 += m[8] ^ cst3 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-12) | v7>>12 + v2 += m[11] ^ cst0 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-8) | v14>>8 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-7) | v6>>7 + v3 += m[3] ^ cst8 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-8) | v15>>8 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-7) | v7>>7 + v1 += m[10] ^ cst6 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-8) | v13>>8 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-7) | v5>>7 + v0 += m[12] ^ cst2 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-8) | v12>>8 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-7) | v4>>7 + v0 += m[4] ^ cst13 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-12) | v5>>12 + v1 += m[7] ^ cst5 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-12) | v6>>12 + v2 += m[15] ^ cst14 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-12) | v7>>12 + v3 += m[1] ^ cst9 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-12) | v4>>12 + v2 += m[14] ^ cst15 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-8) | v13>>8 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-7) | v7>>7 + v3 += m[9] ^ cst1 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-8) | v14>>8 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-7) | v4>>7 + v1 += m[5] ^ cst7 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-8) | v12>>8 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-7) | v6>>7 + v0 += m[13] ^ cst4 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-8) | v15>>8 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-7) | v5>>7 + + // Round 7. + v0 += m[12] ^ cst5 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-12) | v4>>12 + v1 += m[1] ^ cst15 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-12) | v5>>12 + v2 += m[14] ^ cst13 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-12) | v6>>12 + v3 += m[4] ^ cst10 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-12) | v7>>12 + v2 += m[13] ^ cst14 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-8) | v14>>8 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-7) | v6>>7 + v3 += m[10] ^ cst4 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-8) | v15>>8 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-7) | v7>>7 + v1 += m[15] ^ cst1 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-8) | v13>>8 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-7) | v5>>7 + v0 += m[5] ^ cst12 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-8) | v12>>8 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-7) | v4>>7 + v0 += m[0] ^ cst7 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-12) | v5>>12 + v1 += m[6] ^ cst3 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-12) | v6>>12 + v2 += m[9] ^ cst2 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-12) | v7>>12 + v3 += m[8] ^ cst11 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-12) | v4>>12 + v2 += m[2] ^ cst9 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-8) | v13>>8 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-7) | v7>>7 + v3 += m[11] ^ cst8 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-8) | v14>>8 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-7) | v4>>7 + v1 += m[3] ^ cst6 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-8) | v12>>8 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-7) | v6>>7 + v0 += m[7] ^ cst0 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-8) | v15>>8 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-7) | v5>>7 + + // Round 8. + v0 += m[13] ^ cst11 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-12) | v4>>12 + v1 += m[7] ^ cst14 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-12) | v5>>12 + v2 += m[12] ^ cst1 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-12) | v6>>12 + v3 += m[3] ^ cst9 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-12) | v7>>12 + v2 += m[1] ^ cst12 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-8) | v14>>8 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-7) | v6>>7 + v3 += m[9] ^ cst3 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-8) | v15>>8 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-7) | v7>>7 + v1 += m[14] ^ cst7 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-8) | v13>>8 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-7) | v5>>7 + v0 += m[11] ^ cst13 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-8) | v12>>8 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-7) | v4>>7 + v0 += m[5] ^ cst0 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-12) | v5>>12 + v1 += m[15] ^ cst4 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-12) | v6>>12 + v2 += m[8] ^ cst6 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-12) | v7>>12 + v3 += m[2] ^ cst10 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-12) | v4>>12 + v2 += m[6] ^ cst8 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-8) | v13>>8 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-7) | v7>>7 + v3 += m[10] ^ cst2 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-8) | v14>>8 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-7) | v4>>7 + v1 += m[4] ^ cst15 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-8) | v12>>8 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-7) | v6>>7 + v0 += m[0] ^ cst5 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-8) | v15>>8 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-7) | v5>>7 + + // Round 9. + v0 += m[6] ^ cst15 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-12) | v4>>12 + v1 += m[14] ^ cst9 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-12) | v5>>12 + v2 += m[11] ^ cst3 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-12) | v6>>12 + v3 += m[0] ^ cst8 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-12) | v7>>12 + v2 += m[3] ^ cst11 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-8) | v14>>8 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-7) | v6>>7 + v3 += m[8] ^ cst0 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-8) | v15>>8 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-7) | v7>>7 + v1 += m[9] ^ cst14 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-8) | v13>>8 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-7) | v5>>7 + v0 += m[15] ^ cst6 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-8) | v12>>8 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-7) | v4>>7 + v0 += m[12] ^ cst2 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-12) | v5>>12 + v1 += m[13] ^ cst7 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-12) | v6>>12 + v2 += m[1] ^ cst4 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-12) | v7>>12 + v3 += m[10] ^ cst5 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-12) | v4>>12 + v2 += m[4] ^ cst1 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-8) | v13>>8 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-7) | v7>>7 + v3 += m[5] ^ cst10 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-8) | v14>>8 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-7) | v4>>7 + v1 += m[7] ^ cst13 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-8) | v12>>8 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-7) | v6>>7 + v0 += m[2] ^ cst12 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-8) | v15>>8 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-7) | v5>>7 + + // Round 10. + v0 += m[10] ^ cst2 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-12) | v4>>12 + v1 += m[8] ^ cst4 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-12) | v5>>12 + v2 += m[7] ^ cst6 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-12) | v6>>12 + v3 += m[1] ^ cst5 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-12) | v7>>12 + v2 += m[6] ^ cst7 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-8) | v14>>8 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-7) | v6>>7 + v3 += m[5] ^ cst1 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-8) | v15>>8 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-7) | v7>>7 + v1 += m[4] ^ cst8 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-8) | v13>>8 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-7) | v5>>7 + v0 += m[2] ^ cst10 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-8) | v12>>8 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-7) | v4>>7 + v0 += m[15] ^ cst11 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-12) | v5>>12 + v1 += m[9] ^ cst14 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-12) | v6>>12 + v2 += m[3] ^ cst12 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-12) | v7>>12 + v3 += m[13] ^ cst0 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-12) | v4>>12 + v2 += m[12] ^ cst3 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-8) | v13>>8 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-7) | v7>>7 + v3 += m[0] ^ cst13 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-8) | v14>>8 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-7) | v4>>7 + v1 += m[14] ^ cst9 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-8) | v12>>8 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-7) | v6>>7 + v0 += m[11] ^ cst15 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-8) | v15>>8 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-7) | v5>>7 + + // Round 11. + v0 += m[0] ^ cst1 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-12) | v4>>12 + v1 += m[2] ^ cst3 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-12) | v5>>12 + v2 += m[4] ^ cst5 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-12) | v6>>12 + v3 += m[6] ^ cst7 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-12) | v7>>12 + v2 += m[5] ^ cst4 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-8) | v14>>8 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-7) | v6>>7 + v3 += m[7] ^ cst6 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-8) | v15>>8 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-7) | v7>>7 + v1 += m[3] ^ cst2 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-8) | v13>>8 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-7) | v5>>7 + v0 += m[1] ^ cst0 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-8) | v12>>8 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-7) | v4>>7 + v0 += m[8] ^ cst9 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-12) | v5>>12 + v1 += m[10] ^ cst11 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-12) | v6>>12 + v2 += m[12] ^ cst13 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-12) | v7>>12 + v3 += m[14] ^ cst15 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-12) | v4>>12 + v2 += m[13] ^ cst12 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-8) | v13>>8 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-7) | v7>>7 + v3 += m[15] ^ cst14 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-8) | v14>>8 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-7) | v4>>7 + v1 += m[11] ^ cst10 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-8) | v12>>8 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-7) | v6>>7 + v0 += m[9] ^ cst8 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-8) | v15>>8 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-7) | v5>>7 + + // Round 12. + v0 += m[14] ^ cst10 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-12) | v4>>12 + v1 += m[4] ^ cst8 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-12) | v5>>12 + v2 += m[9] ^ cst15 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-12) | v6>>12 + v3 += m[13] ^ cst6 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-12) | v7>>12 + v2 += m[15] ^ cst9 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-8) | v14>>8 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-7) | v6>>7 + v3 += m[6] ^ cst13 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-8) | v15>>8 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-7) | v7>>7 + v1 += m[8] ^ cst4 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-8) | v13>>8 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-7) | v5>>7 + v0 += m[10] ^ cst14 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-8) | v12>>8 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-7) | v4>>7 + v0 += m[1] ^ cst12 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-12) | v5>>12 + v1 += m[0] ^ cst2 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-12) | v6>>12 + v2 += m[11] ^ cst7 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-12) | v7>>12 + v3 += m[5] ^ cst3 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-12) | v4>>12 + v2 += m[7] ^ cst11 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-8) | v13>>8 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-7) | v7>>7 + v3 += m[3] ^ cst5 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-8) | v14>>8 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-7) | v4>>7 + v1 += m[2] ^ cst0 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-8) | v12>>8 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-7) | v6>>7 + v0 += m[12] ^ cst1 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-8) | v15>>8 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-7) | v5>>7 + + // Round 13. + v0 += m[11] ^ cst8 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-12) | v4>>12 + v1 += m[12] ^ cst0 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-12) | v5>>12 + v2 += m[5] ^ cst2 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-12) | v6>>12 + v3 += m[15] ^ cst13 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-12) | v7>>12 + v2 += m[2] ^ cst5 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-8) | v14>>8 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-7) | v6>>7 + v3 += m[13] ^ cst15 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-8) | v15>>8 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-7) | v7>>7 + v1 += m[0] ^ cst12 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-8) | v13>>8 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-7) | v5>>7 + v0 += m[8] ^ cst11 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-8) | v12>>8 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-7) | v4>>7 + v0 += m[10] ^ cst14 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-12) | v5>>12 + v1 += m[3] ^ cst6 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-12) | v6>>12 + v2 += m[7] ^ cst1 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-12) | v7>>12 + v3 += m[9] ^ cst4 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-12) | v4>>12 + v2 += m[1] ^ cst7 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-8) | v13>>8 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-7) | v7>>7 + v3 += m[4] ^ cst9 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-8) | v14>>8 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-7) | v4>>7 + v1 += m[6] ^ cst3 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-8) | v12>>8 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-7) | v6>>7 + v0 += m[14] ^ cst10 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-8) | v15>>8 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-7) | v5>>7 + + // Round 14. + v0 += m[7] ^ cst9 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-12) | v4>>12 + v1 += m[3] ^ cst1 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-12) | v5>>12 + v2 += m[13] ^ cst12 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-12) | v6>>12 + v3 += m[11] ^ cst14 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-12) | v7>>12 + v2 += m[12] ^ cst13 + v2 += v6 + v14 ^= v2 + v14 = v14<<(32-8) | v14>>8 + v10 += v14 + v6 ^= v10 + v6 = v6<<(32-7) | v6>>7 + v3 += m[14] ^ cst11 + v3 += v7 + v15 ^= v3 + v15 = v15<<(32-8) | v15>>8 + v11 += v15 + v7 ^= v11 + v7 = v7<<(32-7) | v7>>7 + v1 += m[1] ^ cst3 + v1 += v5 + v13 ^= v1 + v13 = v13<<(32-8) | v13>>8 + v9 += v13 + v5 ^= v9 + v5 = v5<<(32-7) | v5>>7 + v0 += m[9] ^ cst7 + v0 += v4 + v12 ^= v0 + v12 = v12<<(32-8) | v12>>8 + v8 += v12 + v4 ^= v8 + v4 = v4<<(32-7) | v4>>7 + v0 += m[2] ^ cst6 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-12) | v5>>12 + v1 += m[5] ^ cst10 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-12) | v6>>12 + v2 += m[4] ^ cst0 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-12) | v7>>12 + v3 += m[15] ^ cst8 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-12) | v4>>12 + v2 += m[0] ^ cst4 + v2 += v7 + v13 ^= v2 + v13 = v13<<(32-8) | v13>>8 + v8 += v13 + v7 ^= v8 + v7 = v7<<(32-7) | v7>>7 + v3 += m[8] ^ cst15 + v3 += v4 + v14 ^= v3 + v14 = v14<<(32-8) | v14>>8 + v9 += v14 + v4 ^= v9 + v4 = v4<<(32-7) | v4>>7 + v1 += m[10] ^ cst5 + v1 += v6 + v12 ^= v1 + v12 = v12<<(32-8) | v12>>8 + v11 += v12 + v6 ^= v11 + v6 = v6<<(32-7) | v6>>7 + v0 += m[6] ^ cst2 + v0 += v5 + v15 ^= v0 + v15 = v15<<(32-8) | v15>>8 + v10 += v15 + v5 ^= v10 + v5 = v5<<(32-7) | v5>>7 + + h0 ^= v0 ^ v8 ^ s0 + h1 ^= v1 ^ v9 ^ s1 + h2 ^= v2 ^ v10 ^ s2 + h3 ^= v3 ^ v11 ^ s3 + h4 ^= v4 ^ v12 ^ s0 + h5 ^= v5 ^ v13 ^ s1 + h6 ^= v6 ^ v14 ^ s2 + h7 ^= v7 ^ v15 ^ s3 + + p = p[BlockSize:] + } + d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] = h0, h1, h2, h3, h4, h5, h6, h7 +}