Archived
1
0

Revert "Revert "move nntpchand source to nntpchan repo""

This reverts commit 1465b99df6.
This commit is contained in:
Jeff Becker 2016-12-14 17:45:44 -05:00
parent 1465b99df6
commit a00a902022
156 changed files with 12200 additions and 1 deletions

2
.gitignore vendored
View File

@ -19,7 +19,7 @@ webroot
# built binaries
go
srndv2
./srndv2
# private key
*.key

1
contrib/backends/srndv2/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
nntpchand

View File

@ -0,0 +1,10 @@
GOPATH=$(PWD)
all: nntpchand
nntpchand:
go build -o nntpchand -v nntpchan/cmd/nntpchan
clean:
go clean -v
rm -f nntpchand

View File

@ -0,0 +1,122 @@
package main
import (
log "github.com/Sirupsen/logrus"
"nntpchan/lib/config"
"nntpchan/lib/nntp"
"nntpchan/lib/store"
"nntpchan/lib/webhooks"
"net"
"net/http"
_ "net/http/pprof"
"os"
"os/signal"
"syscall"
"time"
)
func main() {
go func() {
err := http.ListenAndServe("127.0.0.1:7700", nil)
if err != nil {
log.Fatal(err)
}
}()
log.Info("starting up nntpchan...")
cfg_fname := "nntpchan.json"
conf, err := config.Ensure(cfg_fname)
if err != nil {
log.Fatal(err)
}
if conf.Log == "debug" {
log.SetLevel(log.DebugLevel)
}
sconfig := conf.Store
if sconfig == nil {
log.Fatal("no article storage configured")
}
nconfig := conf.NNTP
if nconfig == nil {
log.Fatal("no nntp server configured")
}
dconfig := conf.Database
if dconfig == nil {
log.Fatal("no database configured")
}
// create nntp server
nserv := nntp.NewServer()
nserv.Config = nconfig
nserv.Feeds = conf.Feeds
if nconfig.LoginsFile != "" {
nserv.Auth = nntp.FlatfileAuth(nconfig.LoginsFile)
}
// create article storage
nserv.Storage, err = store.NewFilesytemStorage(sconfig.Path, true)
if err != nil {
log.Fatal(err)
}
if conf.WebHooks != nil && len(conf.WebHooks) > 0 {
// put webhooks into nntp server event hooks
nserv.Hooks = webhooks.NewWebhooks(conf.WebHooks, nserv.Storage)
}
if conf.NNTPHooks != nil && len(conf.NNTPHooks) > 0 {
var hooks nntp.MulitHook
if nserv.Hooks != nil {
hooks = append(hooks, nserv.Hooks)
}
for _, h := range conf.NNTPHooks {
hooks = append(hooks, nntp.NewHook(h))
}
nserv.Hooks = hooks
}
// nntp server loop
go func() {
for {
naddr := conf.NNTP.Bind
log.Infof("Bind nntp server to %s", naddr)
nl, err := net.Listen("tcp", naddr)
if err == nil {
err = nserv.Serve(nl)
if err != nil {
nl.Close()
log.Errorf("nntpserver.serve() %s", err.Error())
}
} else {
log.Errorf("nntp server net.Listen failed: %s", err.Error())
}
time.Sleep(time.Second)
}
}()
// start persisting feeds
go nserv.PersistFeeds()
// handle signals
sigchnl := make(chan os.Signal, 1)
signal.Notify(sigchnl, syscall.SIGHUP)
for {
s := <-sigchnl
if s == syscall.SIGHUP {
// handle SIGHUP
conf, err := config.Ensure(cfg_fname)
if err == nil {
log.Infof("reloading config: %s", cfg_fname)
nserv.ReloadServer(conf.NNTP)
nserv.ReloadFeeds(conf.Feeds)
}
}
}
}

View File

@ -0,0 +1,42 @@
package main
// simple nntp server
import (
log "github.com/Sirupsen/logrus"
"nntpchan/lib/config"
"nntpchan/lib/nntp"
"nntpchan/lib/store"
"net"
)
func main() {
log.Info("starting NNTP server...")
conf, err := config.Ensure("settings.json")
if err != nil {
log.Fatal(err)
}
if conf.Log == "debug" {
log.SetLevel(log.DebugLevel)
}
serv := &nntp.Server{
Config: conf.NNTP,
Feeds: conf.Feeds,
}
serv.Storage, err = store.NewFilesytemStorage(conf.Store.Path, false)
if err != nil {
log.Fatal(err)
}
l, err := net.Listen("tcp", conf.NNTP.Bind)
if err != nil {
log.Fatal(err)
}
log.Info("listening on ", l.Addr())
err = serv.Serve(l)
if err != nil {
log.Fatal(err)
}
}

View File

@ -0,0 +1,4 @@
//
// server admin panel
//
package admin

View File

@ -0,0 +1,16 @@
package admin
import (
"net/http"
)
type Server struct {
}
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
func NewServer() *Server {
return &Server{}
}

View File

@ -0,0 +1,9 @@
package api
import (
"nntpchan/lib/model"
)
// json api
type API interface {
MakePost(p model.Post)
}

View File

@ -0,0 +1,2 @@
// json api
package api

View File

@ -0,0 +1,20 @@
package api
import (
"github.com/gorilla/mux"
"net/http"
)
// api server
type Server struct {
}
func (s *Server) HandlePing(w http.ResponseWriter, r *http.Request) {
}
// inject api routes
func (s *Server) SetupRoutes(r *mux.Router) {
// setup api pinger
r.Path("/ping").HandlerFunc(s.HandlePing)
}

View File

@ -0,0 +1,25 @@
package cache
import (
"github.com/majestrate/srndv2/lib/config"
"strings"
)
// create cache from config structure
func FromConfig(c *config.CacheConfig) (cache CacheInterface, err error) {
// set up cache
if c != nil {
// get cache backend
cacheBackend := strings.ToLower(c.Backend)
if cacheBackend == "redis" {
// redis cache
cache, err = NewRedisCache(c.Addr, c.Password)
} else {
// fall through
}
}
if cache == nil {
cache = NewNullCache()
}
return
}

View File

@ -0,0 +1,17 @@
package cache
import (
"io"
"net/http"
)
// recache markup to io.Writer
type RecacheHandler func(io.Writer) error
type CacheInterface interface {
ServeCached(w http.ResponseWriter, r *http.Request, key string, handler RecacheHandler)
DeleteCache(key string)
Cache(key string, body io.Reader)
Has(key string) bool
Close()
}

View File

@ -0,0 +1,68 @@
// +build !disable_File
package cache
import (
log "github.com/Sirupsen/logrus"
"io"
"net/http"
"os"
"time"
)
type FileCache struct {
}
func (self *FileCache) Has(key string) bool {
_, err := os.Stat(key)
return !os.IsNotExist(err)
}
func (self *FileCache) ServeCached(w http.ResponseWriter, r *http.Request, key string, handler RecacheHandler) {
_, err := os.Stat(key)
if os.IsNotExist(err) {
modtime := time.Now().UTC()
ts := modtime.Format(http.TimeFormat)
w.Header().Set("Last-Modified", ts)
f, err := os.Create(key)
if err == nil {
defer f.Close()
mw := io.MultiWriter(f, w)
err = handler(mw)
}
return
}
http.ServeFile(w, r, key)
}
func (self *FileCache) DeleteCache(key string) {
err := os.Remove(key)
if err != nil {
log.Warnf("cannot remove file %s: %s", key, err.Error())
}
}
func (self *FileCache) Cache(key string, body io.Reader) {
f, err := os.Create(key)
if err != nil {
log.Warnf("cannot cache %s: %s", key, err.Error())
return
}
defer f.Close()
_, err = io.Copy(f, body)
if err != nil {
log.Warnf("cannot cache key %s: %s", key, err.Error())
}
}
func (self *FileCache) Close() {
}
func NewFileCache() CacheInterface {
cache := new(FileCache)
return cache
}

View File

@ -0,0 +1,33 @@
package cache
import (
"io"
"io/ioutil"
"net/http"
)
type NullCache struct {
}
func (self *NullCache) ServeCached(w http.ResponseWriter, r *http.Request, key string, handler RecacheHandler) {
handler(w)
}
func (self *NullCache) DeleteCache(key string) {
}
func (self *NullCache) Cache(key string, body io.Reader) {
io.Copy(ioutil.Discard, body)
}
func (self *NullCache) Close() {
}
func (self *NullCache) Has(key string) bool {
return false
}
func NewNullCache() CacheInterface {
cache := new(NullCache)
return cache
}

View File

@ -0,0 +1,73 @@
package config
import "regexp"
// configration for local article policies
type ArticleConfig struct {
// explicitly allow these newsgroups (regexp)
AllowGroups []string `json:"whitelist"`
// explicitly disallow these newsgroups (regexp)
DisallowGroups []string `json:"blacklist"`
// only allow explicitly allowed groups
ForceWhitelist bool `json:"force-whitelist"`
// allow anonymous posts?
AllowAnon bool `json:"anon"`
// allow attachments?
AllowAttachments bool `json:"attachments"`
// allow anonymous attachments?
AllowAnonAttachments bool `json:"anon-attachments"`
}
func (c *ArticleConfig) AllowGroup(group string) bool {
for _, g := range c.DisallowGroups {
r := regexp.MustCompile(g)
if r.MatchString(group) && c.ForceWhitelist {
// disallowed
return false
}
}
// check allowed groups first
for _, g := range c.AllowGroups {
r := regexp.MustCompile(g)
if r.MatchString(g) {
return true
}
}
return !c.ForceWhitelist
}
// allow an article?
func (c *ArticleConfig) Allow(msgid, group string, anon, attachment bool) bool {
// check attachment policy
if c.AllowGroup(group) {
allow := true
// no anon ?
if anon && !c.AllowAnon {
allow = false
}
// no attachments ?
if allow && attachment && !c.AllowAttachments {
allow = false
}
// no anon attachments ?
if allow && attachment && anon && !c.AllowAnonAttachments {
allow = false
}
return allow
} else {
return false
}
}
var DefaultArticlePolicy = ArticleConfig{
AllowGroups: []string{"ctl", "overchan.test"},
DisallowGroups: []string{"overchan.cp"},
ForceWhitelist: false,
AllowAnon: true,
AllowAttachments: true,
AllowAnonAttachments: false,
}

View File

@ -0,0 +1,13 @@
package config
// caching interface configuration
type CacheConfig struct {
// backend cache driver name
Backend string `json:"backend"`
// address for cache
Addr string `json:"addr"`
// username for login
User string `json:"user"`
// password for login
Password string `json:"password"`
}

View File

@ -0,0 +1,85 @@
package config
import (
"bytes"
"encoding/json"
"io/ioutil"
"os"
)
// main configuration
type Config struct {
// nntp server configuration
NNTP *NNTPServerConfig `json:"nntp"`
// log level
Log string `json:"log"`
// article storage config
Store *StoreConfig `json:"storage"`
// web hooks to call
WebHooks []*WebhookConfig `json:"webhooks"`
// external scripts to call
NNTPHooks []*NNTPHookConfig `json:"nntphooks"`
// database backend configuration
Database *DatabaseConfig `json:"db"`
// list of feeds to add on runtime
Feeds []*FeedConfig `json:"feeds"`
// unexported fields ...
// absolute filepath to configuration
fpath string
}
// default configuration
var DefaultConfig = Config{
Store: &DefaultStoreConfig,
NNTP: &DefaultNNTPConfig,
Database: &DefaultDatabaseConfig,
WebHooks: []*WebhookConfig{DefaultWebHookConfig},
NNTPHooks: []*NNTPHookConfig{DefaultNNTPHookConfig},
Feeds: DefaultFeeds,
Log: "debug",
}
// reload configuration
func (c *Config) Reload() (err error) {
var b []byte
b, err = ioutil.ReadFile(c.fpath)
if err == nil {
err = json.Unmarshal(b, c)
}
return
}
// ensure that a config file exists
// creates one if it does not exist
func Ensure(fname string) (cfg *Config, err error) {
_, err = os.Stat(fname)
if os.IsNotExist(err) {
err = nil
var d []byte
d, err = json.Marshal(&DefaultConfig)
if err == nil {
b := new(bytes.Buffer)
err = json.Indent(b, d, "", " ")
if err == nil {
err = ioutil.WriteFile(fname, b.Bytes(), 0600)
}
}
}
if err == nil {
cfg, err = Load(fname)
}
return
}
// load configuration file
func Load(fname string) (cfg *Config, err error) {
cfg = new(Config)
cfg.fpath = fname
err = cfg.Reload()
if err != nil {
cfg = nil
}
return
}

View File

@ -0,0 +1,18 @@
package config
type DatabaseConfig struct {
// url or address for database connector
Addr string `json:"addr"`
// password to use
Password string `json:"password"`
// username to use
Username string `json:"username"`
// type of database to use
Type string `json:"type"`
}
var DefaultDatabaseConfig = DatabaseConfig{
Type: "postgres",
Addr: "/var/run/postgresql",
Password: "",
}

View File

@ -0,0 +1,4 @@
//
// package for parsing config files
//
package config

View File

@ -0,0 +1,33 @@
package config
// configuration for 1 nntp feed
type FeedConfig struct {
// feed's policy, filters articles
Policy *ArticleConfig `json:"policy"`
// remote server's address
Addr string `json:"addr"`
// proxy server config
Proxy *ProxyConfig `json:"proxy"`
// nntp username to log in with
Username string `json:"username"`
// nntp password to use when logging in
Password string `json:"password"`
// do we want to use tls?
TLS bool `json:"tls"`
// the name of this feed
Name string `json:"name"`
// how often to pull articles from the server in minutes
// 0 for never
PullInterval int `json:"pull"`
}
var DuummyFeed = FeedConfig{
Policy: &DefaultArticlePolicy,
Addr: "nntp.dummy.tld:1119",
Proxy: &DefaultTorProxy,
Name: "dummy",
}
var DefaultFeeds = []*FeedConfig{
&DuummyFeed,
}

View File

@ -0,0 +1,21 @@
package config
type FrontendConfig struct {
// bind to address
BindAddr string `json:"bind"`
// frontend cache
Cache *CacheConfig `json:"cache"`
// frontend ssl settings
SSL *SSLSettings `json:"ssl"`
// static files directory
Static string `json:"static_dir"`
// http middleware configuration
Middleware *MiddlewareConfig `json:"middleware"`
}
// default Frontend Configuration
var DefaultFrontendConfig = FrontendConfig{
BindAddr: "127.0.0.1:18888",
Static: "./files/static/",
Middleware: &DefaultMiddlewareConfig,
}

View File

@ -0,0 +1,15 @@
package config
// config for external callback for nntp articles
type NNTPHookConfig struct {
// name of hook
Name string `json:"name"`
// executable script path to be called with arguments: /path/to/article
Exec string `json:"exec"`
}
// default dummy hook
var DefaultNNTPHookConfig = &NNTPHookConfig{
Name: "dummy",
Exec: "/bin/true",
}

View File

@ -0,0 +1,14 @@
package config
// configuration for http middleware
type MiddlewareConfig struct {
// middleware type, currently just 1 is available: overchan
Type string `json:"type"`
// directory for our html templates
Templates string `json:"templates_dir"`
}
var DefaultMiddlewareConfig = MiddlewareConfig{
Type: "overchan",
Templates: "./files/templates/overchan/",
}

View File

@ -0,0 +1,24 @@
package config
type NNTPServerConfig struct {
// address to bind to
Bind string `json:"bind"`
// name of the nntp server
Name string `json:"name"`
// default inbound article policy
Article *ArticleConfig `json:"policy"`
// do we allow anonymous NNTP sync?
AnonNNTP bool `json:"anon-nntp"`
// ssl settings for nntp
SSL *SSLSettings
// file with login credentials
LoginsFile string `json:"authfile"`
}
var DefaultNNTPConfig = NNTPServerConfig{
AnonNNTP: false,
Bind: "0.0.0.0:1119",
Name: "nntp.server.tld",
Article: &DefaultArticlePolicy,
LoginsFile: "",
}

View File

@ -0,0 +1,13 @@
package config
// proxy configuration
type ProxyConfig struct {
Type string `json:"type"`
Addr string `json:"addr"`
}
// default tor proxy
var DefaultTorProxy = ProxyConfig{
Type: "socks",
Addr: "127.0.0.1:9050",
}

View File

@ -0,0 +1,11 @@
package config
// settings for setting up ssl
type SSLSettings struct {
// path to ssl private key
SSLKeyFile string `json:"key"`
// path to ssl certificate signed by CA
SSLCertFile string `json:"cert"`
// domain name to use for ssl
DomainName string `json:"fqdn"`
}

View File

@ -0,0 +1,10 @@
package config
type StoreConfig struct {
// path to article directory
Path string `json:"path"`
}
var DefaultStoreConfig = StoreConfig{
Path: "storage",
}

View File

@ -0,0 +1,17 @@
package config
// configuration for a single web hook
type WebhookConfig struct {
// user provided name for this hook
Name string `json:"name"`
// callback URL for webhook
URL string `json:"url"`
// dialect to use when calling webhook
Dialect string `json:"dialect"`
}
var DefaultWebHookConfig = &WebhookConfig{
Name: "vichan",
Dialect: "vichan",
URL: "http://localhost/webhook.php",
}

View File

@ -0,0 +1,5 @@
//
// nntpchan crypto package
// wraps all external crypro libs
//
package crypto

View File

@ -0,0 +1,8 @@
package crypto
import (
"github.com/dchest/blake256"
)
// common hash function is blake2
var Hash = blake256.New

View File

@ -0,0 +1,83 @@
package crypto
import (
"crypto/sha512"
"hash"
"nntpchan/lib/crypto/nacl"
)
type fuckyNacl struct {
k []byte
hash hash.Hash
}
func (fucky *fuckyNacl) Write(d []byte) (int, error) {
return fucky.hash.Write(d)
}
func (fucky *fuckyNacl) Sign() (s Signature) {
h := fucky.hash.Sum(nil)
if h == nil {
panic("fuck.hash.Sum == nil")
}
kp := nacl.LoadSignKey(fucky.k)
defer kp.Free()
sk := kp.Secret()
sig := nacl.CryptoSignFucky(h, sk)
if sig == nil {
panic("fucky signer's call to nacl.CryptoSignFucky returned nil")
}
s = Signature(sig)
fucky.resetState()
return
}
// reset inner state so we can reuse this fuckyNacl for another operation
func (fucky *fuckyNacl) resetState() {
fucky.hash = sha512.New()
}
func (fucky *fuckyNacl) Verify(sig Signature) (valid bool) {
h := fucky.hash.Sum(nil)
if h == nil {
panic("fuck.hash.Sum == nil")
}
valid = nacl.CryptoVerifyFucky(h, sig, fucky.k)
fucky.resetState()
return
}
func createFucky(k []byte) *fuckyNacl {
return &fuckyNacl{
k: k,
hash: sha512.New(),
}
}
// create a standard signer given a secret key
func CreateSigner(sk []byte) Signer {
return createFucky(sk)
}
// create a standard verifier given a public key
func CreateVerifier(pk []byte) Verifer {
return createFucky(pk)
}
// get the public component given the secret key
func ToPublic(sk []byte) (pk []byte) {
kp := nacl.LoadSignKey(sk)
defer kp.Free()
pk = kp.Public()
return
}
// create a standard keypair
func GenKeypair() (pk, sk []byte) {
kp := nacl.GenSignKeypair()
defer kp.Free()
pk = kp.Public()
sk = kp.Seed()
return
}

View File

@ -0,0 +1,95 @@
package nacl
// #cgo freebsd CFLAGS: -I/usr/local/include
// #cgo freebsd LDFLAGS: -L/usr/local/lib
// #cgo LDFLAGS: -lsodium
// #include <sodium.h>
import "C"
import (
"errors"
)
// encrypts a message to a user given their public key is known
// returns an encrypted box
func CryptoBox(msg, nounce, pk, sk []byte) ([]byte, error) {
msgbuff := NewBuffer(msg)
defer msgbuff.Free()
// check sizes
if len(pk) != int(C.crypto_box_publickeybytes()) {
err := errors.New("len(pk) != crypto_box_publickey_bytes")
return nil, err
}
if len(sk) != int(C.crypto_box_secretkeybytes()) {
err := errors.New("len(sk) != crypto_box_secretkey_bytes")
return nil, err
}
if len(nounce) != int(C.crypto_box_macbytes()) {
err := errors.New("len(nounce) != crypto_box_macbytes()")
return nil, err
}
pkbuff := NewBuffer(pk)
defer pkbuff.Free()
skbuff := NewBuffer(sk)
defer skbuff.Free()
nouncebuff := NewBuffer(nounce)
defer nouncebuff.Free()
resultbuff := malloc(msgbuff.size + nouncebuff.size)
defer resultbuff.Free()
res := C.crypto_box_easy(resultbuff.uchar(), msgbuff.uchar(), C.ulonglong(msgbuff.size), nouncebuff.uchar(), pkbuff.uchar(), skbuff.uchar())
if res != 0 {
err := errors.New("crypto_box_easy failed")
return nil, err
}
return resultbuff.Bytes(), nil
}
// open an encrypted box
func CryptoBoxOpen(box, nounce, sk, pk []byte) ([]byte, error) {
boxbuff := NewBuffer(box)
defer boxbuff.Free()
// check sizes
if len(pk) != int(C.crypto_box_publickeybytes()) {
err := errors.New("len(pk) != crypto_box_publickey_bytes")
return nil, err
}
if len(sk) != int(C.crypto_box_secretkeybytes()) {
err := errors.New("len(sk) != crypto_box_secretkey_bytes")
return nil, err
}
if len(nounce) != int(C.crypto_box_macbytes()) {
err := errors.New("len(nounce) != crypto_box_macbytes()")
return nil, err
}
pkbuff := NewBuffer(pk)
defer pkbuff.Free()
skbuff := NewBuffer(sk)
defer skbuff.Free()
nouncebuff := NewBuffer(nounce)
defer nouncebuff.Free()
resultbuff := malloc(boxbuff.size - nouncebuff.size)
defer resultbuff.Free()
// decrypt
res := C.crypto_box_open_easy(resultbuff.uchar(), boxbuff.uchar(), C.ulonglong(boxbuff.size), nouncebuff.uchar(), pkbuff.uchar(), skbuff.uchar())
if res != 0 {
return nil, errors.New("crypto_box_open_easy failed")
}
// return result
return resultbuff.Bytes(), nil
}
// generate a new nounce
func NewBoxNounce() []byte {
return RandBytes(NounceLen())
}
// length of a nounce
func NounceLen() int {
return int(C.crypto_box_macbytes())
}

View File

@ -0,0 +1,86 @@
package nacl
// #cgo freebsd CFLAGS: -I/usr/local/include
// #cgo freebsd LDFLAGS: -L/usr/local/lib
// #cgo LDFLAGS: -lsodium
// #include <sodium.h>
//
// unsigned char * deref_uchar(void * ptr) { return (unsigned char*) ptr; }
//
import "C"
import (
"encoding/hex"
"reflect"
"unsafe"
)
// wrapper arround malloc/free
type Buffer struct {
ptr unsafe.Pointer
length C.int
size C.size_t
}
// wrapper arround nacl.malloc
func Malloc(size int) *Buffer {
if size > 0 {
return malloc(C.size_t(size))
}
return nil
}
// does not check for negatives
func malloc(size C.size_t) *Buffer {
ptr := C.malloc(size)
C.sodium_memzero(ptr, size)
buffer := &Buffer{ptr: ptr, size: size, length: C.int(size)}
return buffer
}
// create a new buffer copying from a byteslice
func NewBuffer(buff []byte) *Buffer {
buffer := Malloc(len(buff))
if buffer == nil {
return nil
}
if copy(buffer.Data(), buff) != len(buff) {
return nil
}
return buffer
}
func (self *Buffer) uchar() *C.uchar {
return C.deref_uchar(self.ptr)
}
func (self *Buffer) Length() int {
return int(self.length)
}
// get immutable byte slice
func (self *Buffer) Bytes() []byte {
buff := make([]byte, self.Length())
copy(buff, self.Data())
return buff
}
// get underlying byte slice
func (self *Buffer) Data() []byte {
hdr := reflect.SliceHeader{
Data: uintptr(self.ptr),
Len: self.Length(),
Cap: self.Length(),
}
return *(*[]byte)(unsafe.Pointer(&hdr))
}
func (self *Buffer) String() string {
return hex.EncodeToString(self.Data())
}
// zero out memory and then free
func (self *Buffer) Free() {
C.sodium_memzero(self.ptr, self.size)
C.free(self.ptr)
}

View File

@ -0,0 +1,178 @@
package nacl
// #cgo freebsd CFLAGS: -I/usr/local/include
// #cgo freebsd LDFLAGS: -L/usr/local/lib
// #cgo LDFLAGS: -lsodium
// #include <sodium.h>
import "C"
import (
"encoding/hex"
"errors"
"fmt"
)
type KeyPair struct {
pk *Buffer
sk *Buffer
}
// free this keypair from memory
func (self *KeyPair) Free() {
self.pk.Free()
self.sk.Free()
}
func (self *KeyPair) Secret() []byte {
return self.sk.Bytes()
}
func (self *KeyPair) Public() []byte {
return self.pk.Bytes()
}
func (self *KeyPair) Seed() []byte {
seed_len := C.crypto_sign_seedbytes()
return self.sk.Bytes()[:seed_len]
}
// generate a keypair
func GenSignKeypair() *KeyPair {
sk_len := C.crypto_sign_secretkeybytes()
sk := malloc(sk_len)
pk_len := C.crypto_sign_publickeybytes()
pk := malloc(pk_len)
res := C.crypto_sign_keypair(pk.uchar(), sk.uchar())
if res == 0 {
return &KeyPair{pk, sk}
}
pk.Free()
sk.Free()
return nil
}
// get public key from secret key
func GetSignPubkey(sk []byte) ([]byte, error) {
sk_len := C.crypto_sign_secretkeybytes()
if C.size_t(len(sk)) != sk_len {
return nil, errors.New(fmt.Sprintf("nacl.GetSignPubkey() invalid secret key size %d != %d", len(sk), sk_len))
}
pk_len := C.crypto_sign_publickeybytes()
pkbuff := malloc(pk_len)
defer pkbuff.Free()
skbuff := NewBuffer(sk)
defer skbuff.Free()
//XXX: hack
res := C.crypto_sign_seed_keypair(pkbuff.uchar(), skbuff.uchar(), skbuff.uchar())
if res != 0 {
return nil, errors.New(fmt.Sprintf("nacl.GetSignPubkey() failed to get public key from secret key: %d", res))
}
return pkbuff.Bytes(), nil
}
// make keypair from seed
func LoadSignKey(seed []byte) *KeyPair {
seed_len := C.crypto_sign_seedbytes()
if C.size_t(len(seed)) != seed_len {
return nil
}
seedbuff := NewBuffer(seed)
defer seedbuff.Free()
pk_len := C.crypto_sign_publickeybytes()
sk_len := C.crypto_sign_secretkeybytes()
pkbuff := malloc(pk_len)
skbuff := malloc(sk_len)
res := C.crypto_sign_seed_keypair(pkbuff.uchar(), skbuff.uchar(), seedbuff.uchar())
if res != 0 {
pkbuff.Free()
skbuff.Free()
return nil
}
return &KeyPair{pkbuff, skbuff}
}
func GenBoxKeypair() *KeyPair {
sk_len := C.crypto_box_secretkeybytes()
sk := malloc(sk_len)
pk_len := C.crypto_box_publickeybytes()
pk := malloc(pk_len)
res := C.crypto_box_keypair(pk.uchar(), sk.uchar())
if res == 0 {
return &KeyPair{pk, sk}
}
pk.Free()
sk.Free()
return nil
}
// get public key from secret key
func GetBoxPubkey(sk []byte) []byte {
sk_len := C.crypto_box_seedbytes()
if C.size_t(len(sk)) != sk_len {
return nil
}
pk_len := C.crypto_box_publickeybytes()
pkbuff := malloc(pk_len)
defer pkbuff.Free()
skbuff := NewBuffer(sk)
defer skbuff.Free()
// compute the public key
C.crypto_scalarmult_base(pkbuff.uchar(), skbuff.uchar())
return pkbuff.Bytes()
}
// load keypair from secret key
func LoadBoxKey(sk []byte) *KeyPair {
pk := GetBoxPubkey(sk)
if pk == nil {
return nil
}
pkbuff := NewBuffer(pk)
skbuff := NewBuffer(sk)
return &KeyPair{pkbuff, skbuff}
}
// make keypair from seed
func SeedBoxKey(seed []byte) *KeyPair {
seed_len := C.crypto_box_seedbytes()
if C.size_t(len(seed)) != seed_len {
return nil
}
seedbuff := NewBuffer(seed)
defer seedbuff.Free()
pk_len := C.crypto_box_publickeybytes()
sk_len := C.crypto_box_secretkeybytes()
pkbuff := malloc(pk_len)
skbuff := malloc(sk_len)
res := C.crypto_box_seed_keypair(pkbuff.uchar(), skbuff.uchar(), seedbuff.uchar())
if res != 0 {
pkbuff.Free()
skbuff.Free()
return nil
}
return &KeyPair{pkbuff, skbuff}
}
func (self *KeyPair) String() string {
return fmt.Sprintf("pk=%s sk=%s", hex.EncodeToString(self.pk.Data()), hex.EncodeToString(self.sk.Data()))
}
func CryptoSignPublicLen() int {
return int(C.crypto_sign_publickeybytes())
}
func CryptoSignSecretLen() int {
return int(C.crypto_sign_secretkeybytes())
}
func CryptoSignSeedLen() int {
return int(C.crypto_sign_seedbytes())
}

View File

@ -0,0 +1,44 @@
package nacl
// #cgo freebsd CFLAGS: -I/usr/local/include
// #cgo freebsd LDFLAGS: -L/usr/local/lib
// #cgo LDFLAGS: -lsodium
// #include <sodium.h>
import "C"
import (
"log"
)
// return how many bytes overhead does CryptoBox have
func CryptoBoxOverhead() int {
return int(C.crypto_box_macbytes())
}
// size of crypto_box public keys
func CryptoBoxPubKeySize() int {
return int(C.crypto_box_publickeybytes())
}
// size of crypto_box private keys
func CryptoBoxPrivKeySize() int {
return int(C.crypto_box_secretkeybytes())
}
// size of crypto_sign public keys
func CryptoSignPubKeySize() int {
return int(C.crypto_sign_publickeybytes())
}
// size of crypto_sign private keys
func CryptoSignPrivKeySize() int {
return int(C.crypto_sign_secretkeybytes())
}
// initialize sodium
func init() {
status := C.sodium_init()
if status == -1 {
log.Fatalf("failed to initialize libsodium status=%d", status)
}
}

View File

@ -0,0 +1,24 @@
package nacl
// #cgo freebsd CFLAGS: -I/usr/local/include
// #cgo freebsd LDFLAGS: -L/usr/local/lib
// #cgo LDFLAGS: -lsodium
// #include <sodium.h>
import "C"
func randbytes(size C.size_t) *Buffer {
buff := malloc(size)
C.randombytes_buf(buff.ptr, size)
return buff
}
func RandBytes(size int) []byte {
if size > 0 {
buff := randbytes(C.size_t(size))
defer buff.Free()
return buff.Bytes()
}
return nil
}

View File

@ -0,0 +1,58 @@
package nacl
// #cgo freebsd CFLAGS: -I/usr/local/include
// #cgo freebsd LDFLAGS: -L/usr/local/lib
// #cgo LDFLAGS: -lsodium
// #include <sodium.h>
import "C"
// sign data detached with secret key sk
func CryptoSignDetached(msg, sk []byte) []byte {
msgbuff := NewBuffer(msg)
defer msgbuff.Free()
skbuff := NewBuffer(sk)
defer skbuff.Free()
if skbuff.size != C.crypto_sign_bytes() {
return nil
}
// allocate the signature buffer
sig := malloc(C.crypto_sign_bytes())
defer sig.Free()
// compute signature
siglen := C.ulonglong(0)
res := C.crypto_sign_detached(sig.uchar(), &siglen, msgbuff.uchar(), C.ulonglong(msgbuff.size), skbuff.uchar())
if res == 0 && siglen == C.ulonglong(C.crypto_sign_bytes()) {
// return copy of signature buffer
return sig.Bytes()
}
// failure to sign
return nil
}
// sign data with secret key sk
// return detached sig
// this uses crypto_sign instead pf crypto_sign_detached
func CryptoSignFucky(msg, sk []byte) []byte {
msgbuff := NewBuffer(msg)
defer msgbuff.Free()
skbuff := NewBuffer(sk)
defer skbuff.Free()
if skbuff.size != C.crypto_sign_bytes() {
return nil
}
// allocate the signed message buffer
sig := malloc(C.crypto_sign_bytes() + msgbuff.size)
defer sig.Free()
// compute signature
siglen := C.ulonglong(0)
res := C.crypto_sign(sig.uchar(), &siglen, msgbuff.uchar(), C.ulonglong(msgbuff.size), skbuff.uchar())
if res == 0 {
// return copy of signature inside the signed message
offset := int(C.crypto_sign_bytes())
return sig.Bytes()[:offset]
}
// failure to sign
return nil
}

View File

@ -0,0 +1,342 @@
package nacl
import (
"bytes"
"errors"
"io"
"net"
"time"
)
// TOY encrypted authenticated stream protocol like tls
var BadHandshake = errors.New("Bad handshake")
var ShortWrite = errors.New("short write")
var ShortRead = errors.New("short read")
var Closed = errors.New("socket closed")
// write boxes at 512 bytes at a time
const DefaultMTU = 512
// wrapper arround crypto_box
// provides an authenticated encrypted stream
// this is a TOY
type CryptoStream struct {
// underlying stream to write on
stream io.ReadWriteCloser
// secret key seed
key *KeyPair
// public key of who we expect on the other end
remote_pk []byte
tx_nonce []byte
rx_nonce []byte
// box size
mtu int
}
func (cs *CryptoStream) Close() (err error) {
if cs.key != nil {
cs.key.Free()
cs.key = nil
}
return cs.stream.Close()
}
// implements io.Writer
func (cs *CryptoStream) Write(data []byte) (n int, err error) {
// let's split it up
for n < len(data) && err == nil {
if n+cs.mtu < len(data) {
err = cs.writeSegment(data[n : n+cs.mtu])
n += cs.mtu
} else {
err = cs.writeSegment(data[n:])
if err == nil {
n = len(data)
}
}
}
return
}
func (cs *CryptoStream) public() (p []byte) {
p = cs.key.Public()
return
}
func (cs *CryptoStream) secret() (s []byte) {
s = cs.key.Secret()
return
}
// read 1 segment
func (cs *CryptoStream) readSegment() (s []byte, err error) {
var stream_read int
var seg []byte
nl := NounceLen()
msg := make([]byte, cs.mtu+nl)
stream_read, err = cs.stream.Read(msg)
seg, err = CryptoBoxOpen(msg[:stream_read], cs.rx_nonce, cs.secret(), cs.remote_pk)
if err == nil {
copy(cs.rx_nonce, seg[:nl])
s = seg[nl:]
}
return
}
// write 1 segment encrypted
// update nounce
func (cs *CryptoStream) writeSegment(data []byte) (err error) {
var segment []byte
nl := NounceLen()
msg := make([]byte, len(data)+nl)
// generate next nounce
nextNounce := NewBoxNounce()
copy(msg, nextNounce)
copy(msg[nl:], data)
// encrypt segment with current nounce
segment, err = CryptoBox(data, cs.tx_nonce, cs.remote_pk, cs.secret())
var n int
n, err = cs.stream.Write(segment)
if n != len(segment) {
// short write?
err = ShortWrite
return
}
// update nounce
copy(cs.tx_nonce, nextNounce)
return
}
// implements io.Reader
func (cs *CryptoStream) Read(data []byte) (n int, err error) {
var seg []byte
seg, err = cs.readSegment()
if err == nil {
if len(seg) <= len(data) {
copy(data, seg)
n = len(seg)
} else {
// too big?
err = ShortRead
}
}
return
}
// version 0 protocol magic
var protocol_magic = []byte("BENIS|00")
// verify that a handshake is signed right and is in the correct format etc
func verifyHandshake(hs, pk []byte) (valid bool) {
ml := len(protocol_magic)
// valid handshake?
if bytes.Equal(hs[0:ml], protocol_magic) {
// check pk
pl := CryptoSignPublicLen()
nl := NounceLen()
if bytes.Equal(pk, hs[ml:ml+pl]) {
// check signature
msg := hs[0 : ml+pl+nl]
sig := hs[ml+pl+nl:]
valid = CryptoVerifyFucky(msg, sig, pk)
}
}
return
}
// get claimed public key from handshake
func getPubkey(hs []byte) (pk []byte) {
ml := len(protocol_magic)
pl := CryptoSignPublicLen()
pk = hs[ml : ml+pl]
return
}
func (cs *CryptoStream) genHandshake() (d []byte) {
// protocol magic string version 00
// Benis Encrypted Network Information Stream
// :-DDDDD meme crypto
d = append(d, protocol_magic...)
// our public key
d = append(d, cs.public()...)
// nounce
cs.tx_nonce = NewBoxNounce()
d = append(d, cs.tx_nonce...)
// sign protocol magic string, nounce and pubkey
sig := CryptoSignFucky(d, cs.secret())
// if sig is nil we'll just die
d = append(d, sig...)
return
}
// extract nounce from handshake
func getNounce(hs []byte) (n []byte) {
ml := len(protocol_magic)
pl := CryptoSignPublicLen()
nl := NounceLen()
n = hs[ml+pl : ml+pl+nl]
return
}
// initiate protocol handshake
func (cs *CryptoStream) Handshake() (err error) {
// send them our info
hs := cs.genHandshake()
var n int
n, err = cs.stream.Write(hs)
if n != len(hs) {
err = ShortWrite
return
}
// read thier info
buff := make([]byte, len(hs))
_, err = io.ReadFull(cs.stream, buff)
if cs.remote_pk == nil {
// inbound
pk := getPubkey(buff)
cs.remote_pk = make([]byte, len(pk))
copy(cs.remote_pk, pk)
}
if !verifyHandshake(buff, cs.remote_pk) {
// verification failed
err = BadHandshake
return
}
cs.rx_nonce = make([]byte, NounceLen())
copy(cs.rx_nonce, getNounce(buff))
return
}
// create a client
func Client(stream io.ReadWriteCloser, local_sk, remote_pk []byte) (c *CryptoStream) {
c = &CryptoStream{
stream: stream,
mtu: DefaultMTU,
}
c.remote_pk = make([]byte, len(remote_pk))
copy(c.remote_pk, remote_pk)
c.key = LoadSignKey(local_sk)
if c.key == nil {
return nil
}
return c
}
type CryptoConn struct {
stream *CryptoStream
conn net.Conn
}
func (cc *CryptoConn) Close() (err error) {
err = cc.stream.Close()
return
}
func (cc *CryptoConn) Write(d []byte) (n int, err error) {
return cc.stream.Write(d)
}
func (cc *CryptoConn) Read(d []byte) (n int, err error) {
return cc.stream.Read(d)
}
func (cc *CryptoConn) LocalAddr() net.Addr {
return cc.conn.LocalAddr()
}
func (cc *CryptoConn) RemoteAddr() net.Addr {
return cc.conn.RemoteAddr()
}
func (cc *CryptoConn) SetDeadline(t time.Time) (err error) {
return cc.conn.SetDeadline(t)
}
func (cc *CryptoConn) SetReadDeadline(t time.Time) (err error) {
return cc.conn.SetReadDeadline(t)
}
func (cc *CryptoConn) SetWriteDeadline(t time.Time) (err error) {
return cc.conn.SetWriteDeadline(t)
}
type CryptoListener struct {
l net.Listener
handshake chan net.Conn
accepted chan *CryptoConn
trust func(pk []byte) bool
key *KeyPair
}
func (cl *CryptoListener) Close() (err error) {
err = cl.l.Close()
close(cl.accepted)
close(cl.handshake)
cl.key.Free()
cl.key = nil
return
}
func (cl *CryptoListener) acceptInbound() {
for {
c, err := cl.l.Accept()
if err == nil {
cl.handshake <- c
} else {
return
}
}
}
func (cl *CryptoListener) runChans() {
for {
select {
case c := <-cl.handshake:
go func() {
s := &CryptoStream{
stream: c,
mtu: DefaultMTU,
key: cl.key,
}
err := s.Handshake()
if err == nil {
// we gud handshake was okay
if cl.trust(s.remote_pk) {
// the key is trusted okay
cl.accepted <- &CryptoConn{stream: s, conn: c}
} else {
// not trusted, close connection
s.Close()
}
}
}()
}
}
}
// accept inbound authenticated and trusted connections
func (cl *CryptoListener) Accept() (c net.Conn, err error) {
var ok bool
c, ok = <-cl.accepted
if !ok {
err = Closed
}
return
}
// create a listener
func Server(l net.Listener, local_sk []byte, trust func(pk []byte) bool) (s *CryptoListener) {
s = &CryptoListener{
l: l,
trust: trust,
handshake: make(chan net.Conn),
accepted: make(chan *CryptoConn),
}
s.key = LoadSignKey(local_sk)
go s.runChans()
go s.acceptInbound()
return
}

View File

@ -0,0 +1,53 @@
package nacl
// #cgo freebsd CFLAGS: -I/usr/local/include
// #cgo freebsd LDFLAGS: -L/usr/local/lib
// #cgo LDFLAGS: -lsodium
// #include <sodium.h>
import "C"
// verify a fucky detached sig
func CryptoVerifyFucky(msg, sig, pk []byte) bool {
var smsg []byte
smsg = append(smsg, sig...)
smsg = append(smsg, msg...)
return CryptoVerify(smsg, pk)
}
// verify a signed message
func CryptoVerify(smsg, pk []byte) bool {
smsg_buff := NewBuffer(smsg)
defer smsg_buff.Free()
pk_buff := NewBuffer(pk)
defer pk_buff.Free()
if pk_buff.size != C.crypto_sign_publickeybytes() {
return false
}
mlen := C.ulonglong(0)
msg := malloc(C.size_t(len(smsg)))
defer msg.Free()
smlen := C.ulonglong(smsg_buff.size)
return C.crypto_sign_open(msg.uchar(), &mlen, smsg_buff.uchar(), smlen, pk_buff.uchar()) != -1
}
// verfiy a detached signature
// return true on valid otherwise false
func CryptoVerifyDetached(msg, sig, pk []byte) bool {
msg_buff := NewBuffer(msg)
defer msg_buff.Free()
sig_buff := NewBuffer(sig)
defer sig_buff.Free()
pk_buff := NewBuffer(pk)
defer pk_buff.Free()
if pk_buff.size != C.crypto_sign_publickeybytes() {
return false
}
// invalid sig size
if sig_buff.size != C.crypto_sign_bytes() {
return false
}
return C.crypto_sign_verify_detached(sig_buff.uchar(), msg_buff.uchar(), C.ulonglong(len(msg)), pk_buff.uchar()) == 0
}

View File

@ -0,0 +1,34 @@
package crypto
import (
"bytes"
"crypto/rand"
"io"
"testing"
)
func TestNaclToPublic(t *testing.T) {
pk, sk := GenKeypair()
t_pk := ToPublic(sk)
if !bytes.Equal(pk, t_pk) {
t.Logf("%q != %q", pk, t_pk)
t.Fail()
}
}
func TestNaclSignVerify(t *testing.T) {
var msg [1024]byte
pk, sk := GenKeypair()
io.ReadFull(rand.Reader, msg[:])
signer := CreateSigner(sk)
signer.Write(msg[:])
sig := signer.Sign()
verifier := CreateVerifier(pk)
verifier.Write(msg[:])
if !verifier.Verify(sig) {
t.Logf("%q is invalid signature and is %dB long", sig, len(sig))
t.Fail()
}
}

View File

@ -0,0 +1,8 @@
package crypto
import (
"nntpchan/lib/crypto/nacl"
)
// generate random bytes
var RandBytes = nacl.RandBytes

View File

@ -0,0 +1,25 @@
package crypto
import "io"
// a detached signature
type Signature []byte
type SigEncoder interface {
// encode a signature to an io.Writer
// return error if one occurrened while writing out signature
Encode(sig Signature, w io.Writer) error
// encode a signature to a string
EncodeString(sig Signature) string
}
// a decoder of signatures
type SigDecoder interface {
// decode signature from io.Reader
// reads all data until io.EOF
// returns singaure or error if an error occured while reading
Decode(r io.Reader) (Signature, error)
// decode a signature from string
// returns signature or error if an error ocurred while decoding
DecodeString(str string) (Signature, error)
}

View File

@ -0,0 +1,14 @@
package crypto
import "io"
//
// provides generic signing interface for producing detached signatures
// call Write() to feed data to be signed, call Sign() to generate
// a detached signature
//
type Signer interface {
io.Writer
// generate detached Signature from previously fed body via Write()
Sign() Signature
}

View File

@ -0,0 +1,14 @@
package crypto
import "io"
// provides generic signature
// call Write() to feed in message body
// once the entire body has been fed in via Write() call Verify() with detached
// signature to verify the detached signature against the previously fed body
type Verifer interface {
io.Writer
// verify detached signature from body previously fed via Write()
// return true if the detached signature is valid given the body
Verify(sig Signature) bool
}

View File

@ -0,0 +1,258 @@
package database
import (
"errors"
"nntpchan/lib/config"
"nntpchan/lib/model"
"net"
"strings"
)
// generic database driver
type DB interface {
// finalize all transactions and close connection
// after calling this db driver can no longer be used
Close()
// ensire database is well formed
Ensure() error
// do we have a newsgroup locally?
HasNewsgroup(group string) (bool, error)
// have we seen an article with message-id before?
SeenArticle(message_id string) (bool, error)
// do we have an article locally given message-id?
HasArticle(message_id string) (bool, error)
// register a newsgroup with database
RegisterNewsgroup(group string) error
// register an article
RegisterArticle(a *model.Article) error
// get all articles in a newsgroup
// send entries down a channel
// return error if one happens while fetching
GetAllArticlesInGroup(group string, send chan model.ArticleEntry) error
// count all the articles in a newsgroup
CountAllArticlesInGroup(group string) (int64, error)
// get all articles locally known
GetAllArticles() ([]model.ArticleEntry, error)
// check if a newsgroup is banned
NewsgroupBanned(group string) (bool, error)
// ban newsgroup
BanNewsgroup(group string) error
// unban newsgroup
UnbanNewsgroup(group string) error
// return true if this is root post has expired
IsExpired(root_message_id string) (bool, error)
// get an article's MessageID given the hash of the MessageID
// return an article entry or nil when it doesn't exist + and error if it happened
GetMessageIDByHash(hash string) (model.ArticleEntry, error)
// get root message_id, newsgroup, pageno for a post regardless if it's rootpost or not
GetInfoForMessage(msgid string) (string, string, int64, error)
// what page is the thread with this root post on?
// return newsgroup, pageno
GetPageForRootMessage(root_message_id string) (string, int64, error)
// record that a message given a message id was posted signed by this pubkey
RegisterSigned(message_id, pubkey string) error
// get the number of articles we have in all groups
ArticleCount() (int64, error)
// return true if a thread with given root post with message-id has any replies
ThreadHasReplies(root_message_id string) (bool, error)
// get the number of posts in a certain newsgroup since N seconds ago
// if N <= 0 then count all we have now
CountPostsInGroup(group string, time_frame int64) (int64, error)
// get all replies' message-id to a thread
// if last > 0 then get that many of the last replies
// start at reply number start
GetThreadReplies(root_message_id string, start, last int) ([]string, error)
// get a thread model given root post's message id
GetThread(root_message_id string) (model.Thread, error)
// get a thread model given root post hash
GetThreadByHash(hash string) (model.Thread, error)
// count the number of replies to this thread
CountThreadReplies(root_message_id string) (int64, error)
// get all attachments for a message given its message-id
GetPostAttachments(message_id string) ([]*model.Attachment, error)
// return true if this newsgroup has posts
GroupHasPosts(newsgroup string) (bool, error)
// get all active threads on a board
// send each thread's ArticleEntry down a channel
// return error if one happens while fetching
GetGroupThreads(newsgroup string, send chan model.ArticleEntry) error
// get every message id for root posts that need to be expired in a newsgroup
// threadcount is the upperbound limit to how many root posts we keep
GetRootPostsForExpiration(newsgroup string, threadcount int) ([]string, error)
// get the number of pages a board has
GetGroupPageCount(newsgroup string) (int64, error)
// get board page number N
// fully loads all models
GetGroupForPage(newsgroup string, pageno, perpage int) (*model.BoardPage, error)
// get the threads for ukko page
GetUkkoThreads(threadcount int) ([]*model.Thread, error)
// get a post model for a single post
GetPost(messageID string) (*model.Post, error)
// add a public key to the database
AddModPubkey(pubkey string) error
// mark that a mod with this pubkey can act on all boards
MarkModPubkeyGlobal(pubkey string) error
// revoke mod with this pubkey the privilege of being able to act on all boards
UnMarkModPubkeyGlobal(pubkey string) error
// check if this mod pubkey can moderate at a global level
CheckModPubkeyGlobal(pubkey string) bool
// check if a mod with this pubkey has permission to moderate at all
CheckModPubkey(pubkey string) (bool, error)
// check if a mod with this pubkey can moderate on the given newsgroup
CheckModPubkeyCanModGroup(pubkey, newsgroup string) (bool, error)
// add a pubkey to be able to mod a newsgroup
MarkModPubkeyCanModGroup(pubkey, newsgroup string) error
// remote a pubkey to they can't mod a newsgroup
UnMarkModPubkeyCanModGroup(pubkey, newsgroup string) error
// ban an article
BanArticle(messageID, reason string) error
// check if an article is banned or not
ArticleBanned(messageID string) (bool, error)
// Get ip address given the encrypted version
// return emtpy string if we don't have it
GetIPAddress(encAddr string) (string, error)
// check if an ip is banned from our local
CheckIPBanned(addr string) (bool, error)
// check if an encrypted ip is banned from our local
CheckEncIPBanned(encAddr string) (bool, error)
// ban an ip address from the local
BanAddr(addr string) error
// unban an ip address from the local
UnbanAddr(addr string) error
// ban an encrypted ip address from the remote
BanEncAddr(encAddr string) error
// return the encrypted version of an IPAddress
// if it's not already there insert it into the database
GetEncAddress(addr string) (string, error)
// get the decryption key for an encrypted address
// return empty string if we don't have it
GetEncKey(encAddr string) (string, error)
// delete an article from the database
// if the article is a root post then all replies are also deleted
DeleteArticle(msg_id string) error
// forget that we tracked an article given the messgae-id
ForgetArticle(msg_id string) error
// get threads per page for a newsgroup
GetThreadsPerPage(group string) (int, error)
// get pages per board for a newsgroup
GetPagesPerBoard(group string) (int, error)
// get every newsgroup we current carry
GetAllNewsgroups() ([]string, error)
// get the numerical id of the last , first article for a given group
GetLastAndFirstForGroup(group string) (int64, int64, error)
// get a message id give a newsgroup and the nntp id
GetMessageIDForNNTPID(group string, id int64) (string, error)
// get nntp id for a given message-id
GetNNTPIDForMessageID(group, msgid string) (int64, error)
// get the last N days post count in decending order
GetLastDaysPosts(n int64) ([]model.PostEntry, error)
// get the last N days post count in decending order
GetLastDaysPostsForGroup(newsgroup string, n int64) ([]model.PostEntry, error)
// get post history per month since beginning of time
GetMonthlyPostHistory() ([]model.PostEntry, error)
// check if an nntp login cred is correct
CheckNNTPLogin(username, passwd string) (bool, error)
// add an nntp login credential
AddNNTPLogin(username, passwd string) error
// remove an nntp login credential
RemoveNNTPLogin(username string) error
// check if an nntp login credential given a user exists
CheckNNTPUserExists(username string) (bool, error)
// get the message ids of an article that has this header with the given value
GetMessageIDByHeader(name, value string) ([]string, error)
// get the header for a message given its message-id
GetHeadersForMessage(msgid string) (model.ArticleHeader, error)
// get all message-ids posted by posters in this cidr
GetMessageIDByCIDR(cidr *net.IPNet) ([]string, error)
// get all message-ids posted by poster with encrypted ip
GetMessageIDByEncryptedIP(encaddr string) ([]string, error)
}
// type for webhooks db backend
type WebhookDB interface {
// mark article sent
MarkMessageSent(msgid, feedname string) error
// check if an article was sent
CheckMessageSent(msgid, feedname string) (bool, error)
}
// get new database connector from configuration
func NewDBFromConfig(c *config.DatabaseConfig) (db DB, err error) {
dbtype := strings.ToLower(c.Type)
if dbtype == "postgres" {
err = errors.New("postgres not supported")
} else {
err = errors.New("no such database driver: " + c.Type)
}
return
}
func NewWebhooksDBFromConfig(c *config.DatabaseConfig) (db WebhookDB, err error) {
dbtype := strings.ToLower(c.Type)
if dbtype == "postgres" {
err = errors.New("postgres not supported")
} else {
err = errors.New("no such database driver: " + c.Type)
}
return
}

View File

@ -0,0 +1,4 @@
//
// database driver
//
package database

View File

@ -0,0 +1 @@
package database

View File

@ -0,0 +1,123 @@
package frontend
import (
"encoding/json"
"errors"
"fmt"
"github.com/dchest/captcha"
"github.com/gorilla/mux"
"github.com/gorilla/sessions"
"github.com/majestrate/srndv2/lib/config"
"net/http"
)
// server of captchas
// implements frontend.Middleware
type CaptchaServer struct {
h int
w int
store *sessions.CookieStore
prefix string
sessionName string
}
// create new captcha server using existing session store
func NewCaptchaServer(w, h int, prefix string, store *sessions.CookieStore) *CaptchaServer {
return &CaptchaServer{
h: h,
w: w,
prefix: prefix,
store: store,
sessionName: "captcha",
}
}
func (cs *CaptchaServer) Reload(c *config.MiddlewareConfig) {
}
func (cs *CaptchaServer) SetupRoutes(m *mux.Router) {
m.Path("/new").HandlerFunc(cs.NewCaptcha)
m.Path("/img/{f}").Handler(captcha.Server(cs.w, cs.h))
m.Path("/verify.json").HandlerFunc(cs.VerifyCaptcha)
}
// return true if this session has solved the last captcha given provided solution, otherwise false
func (cs *CaptchaServer) CheckSession(w http.ResponseWriter, r *http.Request, solution string) (bool, error) {
s, err := cs.store.Get(r, cs.sessionName)
if err == nil {
id, ok := s.Values["captcha_id"]
if ok {
return captcha.VerifyString(id.(string), solution), nil
}
}
return false, err
}
// verify a captcha
func (cs *CaptchaServer) VerifyCaptcha(w http.ResponseWriter, r *http.Request) {
dec := json.NewDecoder(r.Body)
defer r.Body.Close()
// request
req := make(map[string]string)
// response
resp := make(map[string]interface{})
resp["solved"] = false
// decode request
err := dec.Decode(req)
if err == nil {
// decode okay
id, ok := req["id"]
if ok {
// we have id
solution, ok := req["solution"]
if ok {
// we have solution and id
resp["solved"] = captcha.VerifyString(id, solution)
} else {
// we don't have solution
err = errors.New("no captcha solution provided")
}
} else {
// we don't have id
err = errors.New("no captcha id provided")
}
}
if err != nil {
// error happened
resp["error"] = err.Error()
}
// send reply
w.Header().Set("Content-Type", "text/json; encoding=UTF-8")
enc := json.NewEncoder(w)
enc.Encode(resp)
}
// generate a new captcha
func (cs *CaptchaServer) NewCaptcha(w http.ResponseWriter, r *http.Request) {
// obtain session
sess, err := cs.store.Get(r, cs.sessionName)
if err != nil {
// failed to obtain session
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// new captcha
id := captcha.New()
// do we want to interpret as json?
use_json := r.URL.Query().Get("t") == "json"
// image url
url := fmt.Sprintf("%simg/%s.png", cs.prefix, id)
if use_json {
// send json
enc := json.NewEncoder(w)
enc.Encode(map[string]string{"id": id, "url": url})
} else {
// set captcha id
sess.Values["captcha_id"] = id
// save session
sess.Save(r, w)
// rediect to image
http.Redirect(w, r, url, http.StatusFound)
}
}

View File

@ -0,0 +1,5 @@
//
// nntpchan frontend
// allows posting to nntpchan network via various implementations
//
package frontend

View File

@ -0,0 +1,56 @@
package frontend
import (
"github.com/majestrate/srndv2/lib/cache"
"github.com/majestrate/srndv2/lib/config"
"github.com/majestrate/srndv2/lib/database"
"github.com/majestrate/srndv2/lib/model"
"github.com/majestrate/srndv2/lib/nntp"
"net"
)
// a frontend that displays nntp posts and allows posting
type Frontend interface {
// run mainloop using net.Listener
Serve(l net.Listener) error
// do we accept this inbound post?
AllowPost(p model.PostReference) bool
// trigger a manual regen of indexes for a root post
Regen(p model.PostReference)
// implements nntp.EventHooks
GotArticle(msgid nntp.MessageID, group nntp.Newsgroup)
// implements nntp.EventHooks
SentArticleVia(msgid nntp.MessageID, feedname string)
// reload config
Reload(c *config.FrontendConfig)
}
// create a new http frontend give frontend config
func NewHTTPFrontend(c *config.FrontendConfig, db database.DB) (f Frontend, err error) {
var markupCache cache.CacheInterface
markupCache, err = cache.FromConfig(c.Cache)
if err != nil {
return
}
var mid Middleware
if c.Middleware != nil {
// middleware configured
mid, err = OverchanMiddleware(c.Middleware, markupCache, db)
}
if err == nil {
// create http frontend only if no previous errors
f, err = createHttpFrontend(c, mid, db)
}
return
}

View File

@ -0,0 +1,131 @@
package frontend
import (
"fmt"
log "github.com/Sirupsen/logrus"
"github.com/gorilla/mux"
"github.com/majestrate/srndv2/lib/admin"
"github.com/majestrate/srndv2/lib/api"
"github.com/majestrate/srndv2/lib/cache"
"github.com/majestrate/srndv2/lib/config"
"github.com/majestrate/srndv2/lib/database"
"github.com/majestrate/srndv2/lib/model"
"github.com/majestrate/srndv2/lib/nntp"
"net"
"net/http"
)
// http frontend server
// provides glue layer between nntp and middleware
type httpFrontend struct {
// http mux
httpmux *mux.Router
// admin panel
adminPanel *admin.Server
// static files path
staticDir string
// http middleware
middleware Middleware
// api server
apiserve *api.Server
// database driver
db database.DB
}
// reload http frontend
// reloads middleware
func (f *httpFrontend) Reload(c *config.FrontendConfig) {
if f.middleware == nil {
if c.Middleware != nil {
markupcache, err := cache.FromConfig(c.Cache)
if err == nil {
// no middleware set, create middleware
f.middleware, err = OverchanMiddleware(c.Middleware, markupcache, f.db)
if err != nil {
log.Errorf("overchan middleware reload failed: %s", err.Error())
}
} else {
// error creating cache
log.Errorf("failed to create cache: %s", err.Error())
}
}
} else {
// middleware exists
// do middleware reload
f.middleware.Reload(c.Middleware)
}
}
// serve http requests from net.Listener
func (f *httpFrontend) Serve(l net.Listener) (err error) {
// serve http
err = http.Serve(l, f.httpmux)
return
}
// serve robots.txt page
func (f *httpFrontend) serveRobots(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "User-Agent: *\nDisallow: /\n")
}
func (f *httpFrontend) AllowPost(p model.PostReference) bool {
// TODO: implement
return true
}
func (f *httpFrontend) Regen(p model.PostReference) {
// TODO: implement
}
func (f *httpFrontend) GotArticle(msgid nntp.MessageID, group nntp.Newsgroup) {
// TODO: implement
}
func (f *httpFrontend) SentArticleVia(msgid nntp.MessageID, feedname string) {
// TODO: implement
}
func createHttpFrontend(c *config.FrontendConfig, mid Middleware, db database.DB) (f *httpFrontend, err error) {
f = new(httpFrontend)
// set db
// db.Ensure() called elsewhere
f.db = db
// set up mux
f.httpmux = mux.NewRouter()
// set up admin panel
f.adminPanel = admin.NewServer()
// set static files dir
f.staticDir = c.Static
// set middleware
f.middleware = mid
// set up routes
if f.adminPanel != nil {
// route up admin panel
f.httpmux.PathPrefix("/admin/").Handler(f.adminPanel)
}
if f.middleware != nil {
// route up middleware
f.middleware.SetupRoutes(f.httpmux)
}
if f.apiserve != nil {
// route up api
f.apiserve.SetupRoutes(f.httpmux.PathPrefix("/api/").Subrouter())
}
// route up robots.txt
f.httpmux.Path("/robots.txt").HandlerFunc(f.serveRobots)
// route up static files
f.httpmux.PathPrefix("/static/").Handler(http.FileServer(http.Dir(f.staticDir)))
return
}

View File

@ -0,0 +1,14 @@
package frontend
import (
"github.com/gorilla/mux"
"github.com/majestrate/srndv2/lib/config"
)
// http middleware
type Middleware interface {
// set up routes
SetupRoutes(m *mux.Router)
// reload with new configuration
Reload(c *config.MiddlewareConfig)
}

View File

@ -0,0 +1,143 @@
package frontend
import (
log "github.com/Sirupsen/logrus"
"github.com/gorilla/mux"
"github.com/gorilla/sessions"
"github.com/majestrate/srndv2/lib/cache"
"github.com/majestrate/srndv2/lib/config"
"github.com/majestrate/srndv2/lib/database"
"html/template"
"io"
"net/http"
"path/filepath"
"strconv"
)
const nntpchan_cache_key = "NNTPCHAN_CACHE::"
func cachekey(k string) string {
return nntpchan_cache_key + k
}
func cachekey_for_thread(threadid string) string {
return cachekey("thread-" + threadid)
}
func cachekey_for_board(name, page string) string {
return cachekey("board-" + page + "-" + name)
}
// standard overchan imageboard middleware
type overchanMiddleware struct {
templ *template.Template
markupCache cache.CacheInterface
captcha *CaptchaServer
store *sessions.CookieStore
db database.DB
}
func (m *overchanMiddleware) SetupRoutes(mux *mux.Router) {
// setup front page handler
mux.Path("/").HandlerFunc(m.ServeIndex)
// setup thread handler
mux.Path("/thread/{id}/").HandlerFunc(m.ServeThread)
// setup board page handler
mux.Path("/board/{name}/").HandlerFunc(m.ServeBoardPage)
// setup posting endpoint
mux.Path("/post")
// create captcha
captchaPrefix := "/captcha/"
m.captcha = NewCaptchaServer(200, 400, captchaPrefix, m.store)
// setup captcha endpoint
m.captcha.SetupRoutes(mux.PathPrefix(captchaPrefix).Subrouter())
}
// reload middleware
func (m *overchanMiddleware) Reload(c *config.MiddlewareConfig) {
// reload templates
templ, err := template.ParseGlob(filepath.Join(c.Templates, "*.tmpl"))
if err == nil {
log.Infof("middleware reloaded templates")
m.templ = templ
} else {
log.Errorf("middleware reload failed: %s", err.Error())
}
}
func (m *overchanMiddleware) ServeBoardPage(w http.ResponseWriter, r *http.Request) {
param := mux.Vars(r)
board := param["name"]
page := r.URL.Query().Get("page")
if page == "" {
page = "0"
}
pageno, err := strconv.Atoi(page)
if err == nil {
m.serveTemplate(w, r, "board.html.tmpl", cachekey_for_board(board, page), func() (interface{}, error) {
// get object for cache miss
// TODO: hardcoded page size
return m.db.GetGroupForPage(board, pageno, 10)
})
} else {
// 404
http.NotFound(w, r)
}
}
// serve cached thread
func (m *overchanMiddleware) ServeThread(w http.ResponseWriter, r *http.Request) {
param := mux.Vars(r)
thread_id := param["id"]
m.serveTemplate(w, r, "thread.html.tmpl", cachekey_for_thread(thread_id), func() (interface{}, error) {
// get object for cache miss
return m.db.GetThreadByHash(thread_id)
})
}
// serve index page
func (m *overchanMiddleware) ServeIndex(w http.ResponseWriter, r *http.Request) {
m.serveTemplate(w, r, "index.html.tmpl", "index", nil)
}
// serve a template
func (m *overchanMiddleware) serveTemplate(w http.ResponseWriter, r *http.Request, tname, cacheKey string, getObj func() (interface{}, error)) {
t := m.templ.Lookup(tname)
if t == nil {
log.WithFields(log.Fields{
"template": tname,
}).Warning("template not found")
http.NotFound(w, r)
} else {
m.markupCache.ServeCached(w, r, cacheKey, func(wr io.Writer) error {
if getObj == nil {
return t.Execute(wr, nil)
} else {
// get model object
obj, err := getObj()
if err != nil {
// error getting model
log.WithFields(log.Fields{
"error": err,
"template": tname,
"cacheKey": cacheKey,
}).Warning("failed to refresh template")
return err
}
return t.Execute(wr, obj)
}
})
}
}
// create standard overchan middleware
func OverchanMiddleware(c *config.MiddlewareConfig, markupCache cache.CacheInterface, db database.DB) (m Middleware, err error) {
om := new(overchanMiddleware)
om.markupCache = markupCache
om.templ, err = template.ParseGlob(filepath.Join(c.Templates, "*.tmpl"))
om.db = db
if err == nil {
m = om
}
return
}

View File

@ -0,0 +1 @@
package frontend

View File

@ -0,0 +1 @@
package frontend

View File

@ -0,0 +1,15 @@
package model
type Article struct {
Subject string
Name string
Header map[string][]string
Text string
Attachments []Attachment
MessageID string
Newsgroup string
Reference string
Path string
Posted int64
Addr string
}

View File

@ -0,0 +1,10 @@
package model
type Attachment struct {
Path string
Name string
Mime string
Hash string
// only filled for api
Body string
}

View File

@ -0,0 +1,4 @@
package model
type Board struct {
}

View File

@ -0,0 +1,8 @@
package model
type BoardPage struct {
Board string
Page int
Pages int
Threads []Thread
}

View File

@ -0,0 +1,2 @@
// MVC models
package model

View File

@ -0,0 +1,29 @@
package model
import (
"time"
)
type ArticleHeader map[string][]string
// a ( MessageID , newsgroup ) tuple
type ArticleEntry [2]string
func (self ArticleEntry) Newsgroup() string {
return self[1]
}
func (self ArticleEntry) MessageID() string {
return self[0]
}
// a ( time point, post count ) tuple
type PostEntry [2]int64
func (self PostEntry) Time() time.Time {
return time.Unix(self[0], 0)
}
func (self PostEntry) Count() int64 {
return self[1]
}

View File

@ -0,0 +1,29 @@
package model
type Post struct {
Board string
PostName string
PostSubject string
PostMessage string
message_rendered string
Message_id string
MessagePath string
Addr string
OP bool
Posted int64
Parent string
Sage bool
Key string
Files []*Attachment
HashLong string
HashShort string
URL string
Tripcode string
BodyMarkup string
PostMarkup string
PostPrefix string
index int
}
// ( message-id, references, newsgroup )
type PostReference [3]string

View File

@ -0,0 +1,6 @@
package model
type Thread struct {
Root *Post
Replies []*Post
}

View File

@ -0,0 +1,37 @@
package network
import (
"errors"
"nntpchan/lib/config"
"net"
"strings"
)
// operation timed out
var ErrTimeout = errors.New("timeout")
// the operation was reset abruptly
var ErrReset = errors.New("reset")
// the operation was actively refused
var ErrRefused = errors.New("refused")
// generic dialer
// dials out to a remote address
// returns a net.Conn and nil on success
// returns nil and error if an error happens while dialing
type Dialer interface {
Dial(remote string) (net.Conn, error)
}
// create a new dialer from configuration
func NewDialer(conf *config.ProxyConfig) (d Dialer) {
d = StdDialer
if conf != nil {
proxyType := strings.ToLower(conf.Type)
if proxyType == "socks" || proxyType == "socks4a" {
d = SocksDialer(conf.Addr)
}
}
return
}

View File

@ -0,0 +1,4 @@
//
// network utilities
//
package network

View File

@ -0,0 +1 @@
package network

View File

@ -0,0 +1,140 @@
package network
import (
"errors"
log "github.com/Sirupsen/logrus"
"io"
"net"
"strconv"
"strings"
)
type socksDialer struct {
socksAddr string
dialer Dialer
}
// try dialing out via socks proxy
func (sd *socksDialer) Dial(remote string) (c net.Conn, err error) {
log.WithFields(log.Fields{
"addr": remote,
"socks": sd.socksAddr,
}).Debug("dailing out to socks proxy")
c, err = sd.dialer.Dial(sd.socksAddr)
if err == nil {
// dailed out to socks proxy good
remote_addr := remote
// generate request
idx := strings.LastIndex(remote_addr, ":")
if idx == -1 {
err = errors.New("invalid address: " + remote_addr)
return
}
var port uint64
addr := remote_addr[:idx]
port, err = strconv.ParseUint(remote_addr[idx+1:], 10, 16)
if port >= 25536 {
err = errors.New("bad proxy port")
c.Close()
c = nil
return
} else if err != nil {
c.Close()
return
}
var proxy_port uint16
proxy_port = uint16(port)
proxy_ident := "srndproxy"
req_len := len(addr) + 1 + len(proxy_ident) + 1 + 8
req := make([]byte, req_len)
// pack request
req[0] = '\x04'
req[1] = '\x01'
req[2] = byte(proxy_port & 0xff00 >> 8)
req[3] = byte(proxy_port & 0x00ff)
req[7] = '\x01'
idx = 8
proxy_ident_b := []byte(proxy_ident)
addr_b := []byte(addr)
var bi int
for bi = range proxy_ident_b {
req[idx] = proxy_ident_b[bi]
idx += 1
}
idx += 1
for bi = range addr_b {
req[idx] = addr_b[bi]
idx += 1
}
log.WithFields(log.Fields{
"addr": remote,
"socks": sd.socksAddr,
"req": req,
}).Debug("write socks request")
n := 0
n, err = c.Write(req)
if err == nil && n == len(req) {
// wrote request okay
resp := make([]byte, 8)
_, err = io.ReadFull(c, resp)
if err == nil {
// got reply okay
if resp[1] == '\x5a' {
// successful socks connection
log.WithFields(log.Fields{
"addr": remote,
"socks": sd.socksAddr,
}).Debug("socks proxy connection successful")
} else {
// unsucessful socks connect
log.WithFields(log.Fields{
"addr": remote,
"socks": sd.socksAddr,
"code": resp[1],
}).Warn("connect via socks proxy failed")
c.Close()
c = nil
}
} else {
// error reading reply
log.WithFields(log.Fields{
"addr": remote,
"socks": sd.socksAddr,
}).Error("failed to read socks response ", err)
c.Close()
c = nil
}
} else {
if err == nil {
err = errors.New("short write")
}
// error writing request
log.WithFields(log.Fields{
"addr": remote,
"socks": sd.socksAddr,
}).Error("failed to write socks request ", err)
c.Close()
c = nil
}
} else {
// dail fail
log.WithFields(log.Fields{
"addr": remote,
"socks": sd.socksAddr,
}).Error("Cannot connect to socks proxy ", err)
}
return
}
// create a socks dialer that dials out via socks proxy at address
func SocksDialer(addr string) Dialer {
return &socksDialer{
socksAddr: addr,
dialer: StdDialer,
}
}

View File

@ -0,0 +1,14 @@
package network
import (
"net"
)
type stdDialer struct {
}
func (sd *stdDialer) Dial(addr string) (c net.Conn, err error) {
return net.Dial("tcp", addr)
}
var StdDialer = &stdDialer{}

View File

@ -0,0 +1,69 @@
package nntp
import (
"nntpchan/lib/nntp/message"
)
const (
// accepted article
ARTICLE_ACCEPT = iota
// reject article, don't send again
ARTICLE_REJECT
// defer article, send later
ARTICLE_DEFER
// reject + ban
ARTICLE_BAN
)
type PolicyStatus int
const PolicyAccept = PolicyStatus(ARTICLE_ACCEPT)
const PolicyReject = PolicyStatus(ARTICLE_REJECT)
const PolicyDefer = PolicyStatus(ARTICLE_DEFER)
const PolicyBan = PolicyStatus(ARTICLE_BAN)
func (s PolicyStatus) String() string {
switch int(s) {
case ARTICLE_ACCEPT:
return "ACCEPTED"
case ARTICLE_REJECT:
return "REJECTED"
case ARTICLE_DEFER:
return "DEFERRED"
case ARTICLE_BAN:
return "BANNED"
default:
return "[invalid policy status]"
}
}
// is this an accept code?
func (s PolicyStatus) Accept() bool {
return s == ARTICLE_ACCEPT
}
// is this a defer code?
func (s PolicyStatus) Defer() bool {
return s == ARTICLE_DEFER
}
// is this a ban code
func (s PolicyStatus) Ban() bool {
return s == ARTICLE_BAN
}
// is this a reject code?
func (s PolicyStatus) Reject() bool {
return s == ARTICLE_BAN || s == ARTICLE_REJECT
}
// type defining a policy that determines if we want to accept/reject/defer an
// incoming article
type ArticleAcceptor interface {
// check article given an article header
CheckHeader(hdr message.Header) PolicyStatus
// check article given a message id
CheckMessageID(msgid MessageID) PolicyStatus
// get max article size in bytes
MaxArticleSize() int64
}

View File

@ -0,0 +1,38 @@
package nntp
import (
"bufio"
"fmt"
"os"
"strings"
)
// defines server side authentication mechanism
type ServerAuth interface {
// check plaintext login
// returns nil on success otherwise error if one occurs during authentication
// returns true if authentication was successful and an error if a network io error happens
CheckLogin(username, passwd string) (bool, error)
}
type FlatfileAuth string
func (fname FlatfileAuth) CheckLogin(username, passwd string) (found bool, err error) {
cred := fmt.Sprintf("%s:%s", username, passwd)
var f *os.File
f, err = os.Open(string(fname))
if err == nil {
defer f.Close()
r := bufio.NewReader(f)
for err == nil {
var line string
line, err = r.ReadString(10)
line = strings.Trim(line, "\r\n")
if line == cred {
found = true
break
}
}
}
return
}

View File

@ -0,0 +1,51 @@
package nntp
import (
"errors"
"nntpchan/lib/nntp/message"
)
var ErrArticleNotFound = errors.New("article not found")
var ErrPostRejected = errors.New("post rejected")
// an nntp client
// obtains articles from remote nntp server
type Client interface {
// obtain article by message id
// returns an article and nil if obtained
// returns nil and an error if an error occured while obtaining the article,
// error is ErrArticleNotFound if the remote server doesn't have that article
Article(msgid MessageID) (*message.Article, error)
// check if the remote server has an article given its message-id
// return true and nil if the server has the article
// return false and nil if the server doesn't have the article
// returns false and error if an error occured while checking
Check(msgid MessageID) (bool, error)
// check if the remote server carries a newsgroup
// return true and nil if the server carries this newsgroup
// return false and nil if the server doesn't carry this newsgroup
// returns false and error if an error occured while checking
NewsgroupExists(group Newsgroup) (bool, error)
// return true and nil if posting is allowed
// return false and nil if posting is not allowed
// return false and error if an error occured
PostingAllowed() (bool, error)
// post an nntp article to remote server
// returns nil on success
// returns error if an error ocurred during post
// returns ErrPostRejected if the remote server rejected our post
Post(a *message.Article) error
// connect to remote server
// returns nil on success
// returns error if one occurs during dial or handshake
Connect(d Dialer) error
// send quit and disconnects from server
// blocks until done
Quit()
}

View File

@ -0,0 +1,206 @@
package nntp
// 1xx codes
// help info follows
const RPL_Help = "100"
// capabilities info follows
const RPL_Capabilities = "101"
// server date time follows
const RPL_Date = "111"
// 2xx codes
// posting is allowed
const RPL_PostingAllowed = "200"
// posting is not allowed
const RPL_PostingNotAllowed = "201"
// streaming mode enabled
const RPL_PostingStreaming = "203"
// reply to QUIT command, we will close the connection
const RPL_Quit = "205"
// reply for GROUP and LISTGROUP commands
const RPL_Group = "211"
// info list follows
const RPL_List = "215"
// index follows
const RPL_Index = "218"
// article follows
const RPL_Article = "220"
// article headers follows
const RPL_ArticleHeaders = "221"
// article body follows
const RPL_ArticleBody = "222"
// selected article exists
const RPL_ArticleSelectedExists = "223"
// overview info follows
const RPL_Overview = "224"
// list of article heards follows
const RPL_HeadersList = "225"
// list of new articles follows
const RPL_NewArticles = "230"
// list of newsgroups followes
const RPL_NewsgroupList = "231"
// article was transfered okay by IHAVE command
const RPL_TransferOkay = "235"
// article is not found by CHECK and we want it
const RPL_StreamingAccept = "238"
// article was transfered via TAKETHIS successfully
const RPL_StreamingTransfered = "239"
// article was transfered by POST command successfully
const RPL_PostReceived = "240"
// AUTHINFO SIMPLE accepted
const RPL_AuthInfoAccepted = "250"
// authentication creds have been accepted
const RPL_AuthAccepted = "281"
// binary content follows
const RPL_Binary = "288"
// line sent for posting allowed
const Line_PostingAllowed = RPL_PostingAllowed + " Posting Allowed"
// line sent for posting not allowed
const Line_PostingNotAllowed = RPL_PostingNotAllowed + " Posting Not Allowed"
// 3xx codes
// article is accepted via IHAVE
const RPL_TransferAccepted = "335"
// article was accepted via POST
const RPL_PostAccepted = "340"
// continue with authorization
const RPL_ContinueAuthorization = "350"
// more authentication info required
const RPL_MoreAuth = "381"
// continue with tls handshake
const RPL_TLSContinue = "382"
// 4xx codes
// server says servive is not avaiable on initial connection
const RPL_NotAvaiable = "400"
// server is in the wrong mode
const RPL_WrongMode = "401"
// generic fault prevent action from being taken
const RPL_GenericError = "403"
// newsgroup does not exist
const RPL_NoSuchGroup = "411"
// no newsgroup has been selected
const RPL_NoGroupSelected = "412"
// no tin style index available
const RPL_NoIndex = "418"
// current article number is invalid
const RPL_NoArticleNum = "420"
// no next article in this group (NEXT)
const RPL_NoNextArticle = "421"
// no previous article in this group (LAST)
const RPL_NoPrevArticle = "422"
// no article in specified range
const RPL_NoArticleRange = "423"
// no article with that message-id
const RPL_NoArticleMsgID = "430"
// defer article asked by CHECK comamnd
const RPL_StreamingDefer = "431"
// article is not wanted (1st stage of IHAVE)
const RPL_TransferNotWanted = "435"
// article was not sent defer sending (either stage of IHAVE)
const RPL_TransferDefer = "436"
// reject transfer do not retry (2nd stage IHAVE)
const RPL_TransferReject = "437"
// reject article and don't ask again (CHECK command)
const RPL_StreamingReject = "438"
// article transfer via streaming failed (TAKETHIS)
const RPL_StreamingFailed = "439"
// posting not permitted (1st stage of POST command)
const RPL_PostingNotPermitted = "440"
// posting failed (2nd stage of POST command)
const RPL_PostingFailed = "441"
// authorization required
const RPL_AuthorizeRequired = "450"
// authorization rejected
const RPL_AuthorizeRejected = "452"
// command unavaibale until client has authenticated
const RPL_AuthenticateRequired = "480"
// authentication creds rejected
const RPL_AuthenticateRejected = "482"
// command unavailable until connection is encrypted
const RPL_EncryptionRequired = "483"
// 5xx codes
// got an unknown command
const RPL_UnknownCommand = "500"
// got a command with invalid syntax
const RPL_SyntaxError = "501"
// fatal error happened and connection will close
const RPL_GenericFatal = "502"
// feature is not supported
const RPL_FeatureNotSupported = "503"
// message encoding is bad
const RPL_EncodingError = "504"
// starttls can not be done
const RPL_TLSRejected = "580"
// line sent on invalid mode
const Line_InvalidMode = RPL_SyntaxError + " Invalid Mode Selected"
// line sent on successful streaming
const Line_StreamingAllowed = RPL_PostingStreaming + " aw yeh streamit brah"
// send this when we handle a QUIT command
const Line_RPLQuit = RPL_Quit + " bai"

View File

@ -0,0 +1,27 @@
package nntp
type Command string
func (c Command) String() string {
return string(c)
}
// command to list newsgroups
const CMD_Newsgroups = Command("NEWSGROUPS 0 0 GMT")
// create group command for a newsgroup
func CMD_Group(g Newsgroup) Command {
return Command("GROUP " + g.String())
}
const CMD_XOver = Command("XOVER 0")
func CMD_Article(msgid MessageID) Command {
return Command("ARTICLE " + msgid.String())
}
func CMD_Head(msgid MessageID) Command {
return Command("HEAD " + msgid.String())
}
const CMD_Capabilities = Command("CAPABILITIES")

View File

@ -0,0 +1,75 @@
package nntp
import (
"nntpchan/lib/crypto"
"crypto/sha1"
"fmt"
"io"
"regexp"
"strings"
"time"
)
var exp_valid_message_id = regexp.MustCompilePOSIX(`^<[a-zA-Z0-9$.]{2,128}@[a-zA-Z0-9\-.]{2,63}>$`)
type MessageID string
// return true if this message id is well formed, otherwise return false
func (msgid MessageID) Valid() bool {
return exp_valid_message_id.Copy().MatchString(msgid.String())
}
// get message id as string
func (msgid MessageID) String() string {
return string(msgid)
}
// compute long form hash of message id
func (msgid MessageID) LongHash() string {
return fmt.Sprintf("%x", sha1.Sum([]byte(msgid)))
}
// compute truncated form of message id hash
func (msgid MessageID) ShortHash() string {
return strings.ToLower(msgid.LongHash()[:18])
}
// compute blake2 hash of message id
func (msgid MessageID) Blake2Hash() string {
h := crypto.Hash()
io.WriteString(h, msgid.String())
return strings.ToLower(fmt.Sprintf("%x", h.Sum(nil)))
}
// generate a new message id given name of server
func GenMessageID(name string) MessageID {
r := crypto.RandBytes(4)
t := time.Now()
return MessageID(fmt.Sprintf("<%x$%d@%s>", r, t.Unix(), name))
}
var exp_valid_newsgroup = regexp.MustCompilePOSIX(`^[a-zA-Z0-9.]{1,128}$`)
// an nntp newsgroup
type Newsgroup string
// return true if this newsgroup is well formed otherwise false
func (g Newsgroup) Valid() bool {
return exp_valid_newsgroup.Copy().MatchString(g.String())
}
// get newsgroup as string
func (g Newsgroup) String() string {
return string(g)
}
// (message-id, newsgroup) tuple
type ArticleEntry [2]string
func (e ArticleEntry) MessageID() MessageID {
return MessageID(e[0])
}
func (e ArticleEntry) Newsgroup() Newsgroup {
return Newsgroup(e[1])
}

View File

@ -0,0 +1,44 @@
package nntp
import (
"testing"
)
func TestGenMessageID(t *testing.T) {
msgid := GenMessageID("test.tld")
t.Logf("generated id %s", msgid)
if !msgid.Valid() {
t.Logf("invalid generated message-id %s", msgid)
t.Fail()
}
msgid = GenMessageID("<><><>")
t.Logf("generated id %s", msgid)
if msgid.Valid() {
t.Logf("generated valid message-id when it should've been invalid %s", msgid)
t.Fail()
}
}
func TestMessageIDHash(t *testing.T) {
msgid := GenMessageID("test.tld")
lh := msgid.LongHash()
sh := msgid.ShortHash()
bh := msgid.Blake2Hash()
t.Logf("long=%s short=%s blake2=%s", lh, sh, bh)
}
func TestValidNewsgroup(t *testing.T) {
g := Newsgroup("overchan.test")
if !g.Valid() {
t.Logf("%s is invalid?", g)
t.Fail()
}
}
func TestInvalidNewsgroup(t *testing.T) {
g := Newsgroup("asd.asd.asd.&&&")
if g.Valid() {
t.Logf("%s should be invalid", g)
t.Fail()
}
}

View File

@ -0,0 +1,53 @@
package nntp
// an nntp connection
type Conn interface {
// negotiate an nntp session on this connection
// returns nil if we negitated successfully
// returns ErrAuthRejected if the remote server rejected any authentication
// we sent or another error if one occured while negotiating
Negotiate(stream bool) error
// obtain connection state
GetState() *ConnState
// retutrn true if posting is allowed
// return false if posting is not allowed
PostingAllowed() bool
// handle inbound non-streaming connection
// call event hooks on event
ProcessInbound(hooks EventHooks)
// does this connection want to do nntp streaming?
WantsStreaming() bool
// what mode are we in?
// returns mode in all caps
Mode() Mode
// initiate nntp streaming
// after calling this the caller MUST call StreamAndQuit()
// returns a channel for message ids, true if caller sends on the channel or
// returns nil and ErrStreamingNotAllowed if streaming is not allowed on this
// connection or another error if one occurs while trying to start streaming
StartStreaming() (chan ArticleEntry, error)
// stream articles and quit when the channel obtained by StartStreaming() is
// closed, after which this nntp connection is no longer open
StreamAndQuit()
// is this nntp connection open?
IsOpen() bool
// send quit command and close connection
Quit()
// download all articles in a newsgroup
// returns error if a network error occurs
DownloadGroup(g Newsgroup) error
// get list of active newsgroups
ListNewsgroups() ([]Newsgroup, error)
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,15 @@
package nntp
import (
"crypto/tls"
"nntpchan/lib/network"
)
// establishes an outbound nntp connection to a remote server
type Dialer interface {
// dial out with a dialer
// if cfg is not nil, try to establish a tls connection with STARTTLS
// returns a new nntp connection and nil on successful handshake and login
// returns nil and an error if an error happened
Dial(d network.Dialer, cfg *tls.Config) (*Conn, error)
}

View File

@ -0,0 +1,4 @@
//
// nntp client/server
//
package nntp

View File

@ -0,0 +1,60 @@
package nntp
import (
"testing"
)
func TestTAKETHISParse(t *testing.T) {
msgid := GenMessageID("test.tld")
ev := stream_cmd_TAKETHIS(msgid)
t.Logf("event: %s", ev)
if ev.MessageID() != msgid {
t.Logf("%s != %s, event was %s", msgid, ev.MessageID(), ev)
t.Fail()
}
if ev.Command() != "TAKETHIS" {
t.Logf("%s != TAKETHIS, event was %s", ev.Command(), ev)
t.Fail()
}
if !ev.Valid() {
t.Logf("%s is invalid stream event", ev)
t.Fail()
}
}
func TestCHECKParse(t *testing.T) {
msgid := GenMessageID("test.tld")
ev := stream_cmd_CHECK(msgid)
t.Logf("event: %s", ev)
if ev.MessageID() != msgid {
t.Logf("%s != %s, event was %s", msgid, ev.MessageID(), ev)
t.Fail()
}
if ev.Command() != "CHECK" {
t.Logf("%s != CHECK, event was %s", ev.Command(), ev)
t.Fail()
}
if !ev.Valid() {
t.Logf("%s is invalid stream event", ev)
t.Fail()
}
}
func TestInvalidStremEvent(t *testing.T) {
str := "asd"
ev := StreamEvent(str)
t.Logf("invalid str=%s ev=%s", str, ev)
if ev.Valid() {
t.Logf("invalid CHECK command is valid? %s", ev)
t.Fail()
}
str = "asd asd"
ev = StreamEvent(str)
t.Logf("invalid str=%s ev=%s", str, ev)
if ev.Valid() {
t.Logf("invalid CHECK command is valid? %s", ev)
t.Fail()
}
}

View File

@ -0,0 +1,19 @@
package nntp
import (
"nntpchan/lib/nntp/message"
"io"
)
// defines interface for filtering an nntp article
// filters can (and does) modify the article it operates on
type ArticleFilter interface {
// filter the article header
// returns the modified Header and an error if one occurs
FilterHeader(hdr message.Header) (message.Header, error)
// reads the article's body and write the filtered version to an io.Writer
// returns the number of bytes written to the io.Writer, true if the body was
// modifed (or false if body is unchanged) and an error if one occurs
FilterAndWriteBody(body io.Reader, wr io.Writer) (int64, bool, error)
}

View File

@ -0,0 +1,30 @@
package nntp
import (
log "github.com/Sirupsen/logrus"
"nntpchan/lib/config"
"os/exec"
)
type Hook struct {
cfg *config.NNTPHookConfig
}
func NewHook(cfg *config.NNTPHookConfig) *Hook {
return &Hook{
cfg: cfg,
}
}
func (h *Hook) GotArticle(msgid MessageID, group Newsgroup) {
c := exec.Command(h.cfg.Exec, group.String(), msgid.String())
log.Infof("calling hook %s", h.cfg.Name)
err := c.Run()
if err != nil {
log.Errorf("error in nntp hook %s: %s", h.cfg.Name, err.Error())
}
}
func (*Hook) SentArticleVia(msgid MessageID, feedname string) {
}

View File

@ -0,0 +1,9 @@
package nntp
// callback hooks fired on certain events
type EventHooks interface {
// called when we have obtained an article given its message-id
GotArticle(msgid MessageID, group Newsgroup)
// called when we have sent an article to a single remote feed
SentArticleVia(msgid MessageID, feedname string)
}

View File

@ -0,0 +1,11 @@
package message
// an nntp article
type Article struct {
// the article's mime header
Header Header
// unexported fields ...
}

View File

@ -0,0 +1,16 @@
package message
import (
"io"
)
// attachment in an nntp article
type Attachment struct {
// mimetype
Mime string
// the filename
FileName string
// the fully decoded attachment body
// must close when done
Body io.ReadCloser
}

View File

@ -0,0 +1,2 @@
// package for parsing, packing, signing, verifying nntp articles
package message

View File

@ -0,0 +1,174 @@
package message
import (
"io"
"mime"
"strings"
)
// an nntp message header
type Header map[string][]string
// get message-id header
func (self Header) MessageID() (v string) {
for _, hdr := range []string{"MessageID", "Message-ID", "Message-Id", "message-id"} {
v = self.Get(hdr, "")
if v != "" {
break
}
}
return
}
func (self Header) Reference() (ref string) {
return self.Get("Reference", self.MessageID())
}
// extract media type from content-type header
func (self Header) GetMediaType() (mediatype string, params map[string]string, err error) {
return mime.ParseMediaType(self.Get("Content-Type", "text/plain"))
}
// is this header for a multipart message?
func (self Header) IsMultipart() bool {
return strings.HasPrefix(self.Get("Content-Type", "text/plain"), "multipart/mixed")
}
func (self Header) IsSigned() bool {
return self.Get("X-Pubkey-Ed25519", "") != ""
}
func (self Header) Newsgroup() string {
return self.Get("Newsgroups", "overchan.discard")
}
// do we have a key in this header?
func (self Header) Has(key string) bool {
_, ok := self[key]
return ok
}
// set key value
func (self Header) Set(key, val string) {
self[key] = []string{val}
}
func (self Header) AppendPath(name string) {
p := self.Get("Path", name)
if p != name {
p = name + "!" + p
}
self.Set("Path", p)
}
// append value to key
func (self Header) Add(key, val string) {
if self.Has(key) {
self[key] = append(self[key], val)
} else {
self.Set(key, val)
}
}
// get via key or return fallback value
func (self Header) Get(key, fallback string) string {
val, ok := self[key]
if ok {
str := ""
for _, k := range val {
str += k + ", "
}
return str[:len(str)-2]
} else {
return fallback
}
}
// interface for types that can read an nntp header
type HeaderReader interface {
// blocking read an nntp header from an io.Reader
// return the read header and nil on success
// return nil and an error if an error occurred while reading
ReadHeader(r io.Reader) (Header, error)
}
// interface for types that can write an nntp header
type HeaderWriter interface {
// blocking write an nntp header to an io.Writer
// returns an error if one occurs otherwise nil
WriteHeader(hdr Header, w io.Writer) error
}
// implements HeaderReader and HeaderWriter
type HeaderIO struct {
delim byte
}
// read header
func (s *HeaderIO) ReadHeader(r io.Reader) (hdr Header, err error) {
hdr = make(Header)
var k, v string
var buf [1]byte
for err == nil {
// read key
for err == nil {
_, err = r.Read(buf[:])
if err != nil {
return
}
if buf[0] == 58 { // colin
// consume space
_, err = r.Read(buf[:])
for err == nil {
_, err = r.Read(buf[:])
if buf[0] == s.delim {
// got delimiter
hdr.Add(k, v)
k = ""
v = ""
break
} else {
v += string(buf[:])
}
}
break
} else if buf[0] == s.delim {
// done
return
} else {
k += string(buf[:])
}
}
}
return
}
// write header
func (s *HeaderIO) WriteHeader(hdr Header, wr io.Writer) (err error) {
for k, vs := range hdr {
for _, v := range vs {
var line []byte
// key
line = append(line, []byte(k)...)
// ": "
line = append(line, 58, 32)
// value
line = append(line, []byte(v)...)
// delimiter
line = append(line, s.delim)
// write line
_, err = wr.Write(line)
if err != nil {
return
}
}
}
_, err = wr.Write([]byte{s.delim})
return
}
func NewHeaderIO() *HeaderIO {
return &HeaderIO{
delim: 10,
}
}

View File

@ -0,0 +1,69 @@
package nntp
import (
"errors"
"strings"
)
var ErrInvalidMode = errors.New("invalid mode set")
// a mode set by an nntp client
type Mode string
// reader mode
const MODE_READER = Mode("reader")
// streaming mode
const MODE_STREAM = Mode("stream")
// mode is not set
const MODE_UNSET = Mode("")
// get as string
func (m Mode) String() string {
return strings.ToUpper(string(m))
}
// is this a valid mode of operation?
func (m Mode) Valid() bool {
return m.Is(MODE_READER) || m.Is(MODE_STREAM)
}
// is this mode equal to another mode
func (m Mode) Is(other Mode) bool {
return m.String() == other.String()
}
// a switch mode command
type ModeCommand string
// get as string
func (m ModeCommand) String() string {
return strings.ToUpper(string(m))
}
// is this mode command well formed?
// does not check the actual mode sent.
func (m ModeCommand) Valid() bool {
s := m.String()
return strings.Count(s, " ") == 1 && strings.HasPrefix(s, "MODE ")
}
// get the mode selected in this mode command
func (m ModeCommand) Mode() Mode {
return Mode(strings.Split(m.String(), " ")[1])
}
// check if this mode command is equal to an existing one
func (m ModeCommand) Is(cmd ModeCommand) bool {
return m.String() == cmd.String()
}
// reader mode command
const ModeReader = ModeCommand("mode reader")
// streaming mode command
const ModeStream = ModeCommand("mode stream")
// line prefix for mode
const LinePrefix_Mode = "MODE "

View File

@ -0,0 +1,16 @@
package nntp
// multiplexed event hook
type MulitHook []EventHooks
func (m MulitHook) GotArticle(msgid MessageID, group Newsgroup) {
for _, h := range m {
h.GotArticle(msgid, group)
}
}
func (m MulitHook) SentArticleVia(msgid MessageID, feedname string) {
for _, h := range m {
h.SentArticleVia(msgid, feedname)
}
}

View File

@ -0,0 +1,29 @@
package nntp
//
// a policy that governs whether we federate an article via a feed
//
type FeedPolicy struct {
// list of whitelist regexps for newsgorups
Whitelist []string `json:"whitelist"`
// list of blacklist regexps for newsgroups
Blacklist []string `json:"blacklist"`
// are anon posts of any kind allowed?
AllowAnonPosts bool `json:"anon"`
// are anon posts with attachments allowed?
AllowAnonAttachments bool `json:"anon_attachments"`
// are any attachments allowed?
AllowAttachments bool `json:"attachments"`
// do we require Proof Of Work for untrusted connections?
UntrustedRequiresPoW bool `json:"pow"`
}
// default feed policy to be used if not configured explicitly
var DefaultFeedPolicy = &FeedPolicy{
Whitelist: []string{"ctl", "overchan.test"},
Blacklist: []string{`!^overchan\.`},
AllowAnonPosts: true,
AllowAnonAttachments: false,
UntrustedRequiresPoW: true,
AllowAttachments: true,
}

View File

@ -0,0 +1,329 @@
package nntp
import (
log "github.com/Sirupsen/logrus"
"nntpchan/lib/config"
"nntpchan/lib/database"
"nntpchan/lib/network"
"nntpchan/lib/store"
"net"
"time"
)
// nntp outfeed state
type nntpFeed struct {
conn Conn
send chan ArticleEntry
conf *config.FeedConfig
}
// an nntp server
type Server struct {
// user callback
Hooks EventHooks
// filters to apply
Filters []ArticleFilter
// database driver
DB database.DB
// global article acceptor
Acceptor ArticleAcceptor
// article storage
Storage store.Storage
// nntp config
Config *config.NNTPServerConfig
// outfeeds to connect to
Feeds []*config.FeedConfig
// inbound authentiaction mechanism
Auth ServerAuth
// send to outbound feed channel
send chan ArticleEntry
// register inbound feed channel
regis chan *nntpFeed
// deregister inbound feed channel
deregis chan *nntpFeed
}
func NewServer() *Server {
return &Server{
// XXX: buffered?
send: make(chan ArticleEntry),
regis: make(chan *nntpFeed),
deregis: make(chan *nntpFeed),
}
}
// reload server configuration
func (s *Server) ReloadServer(c *config.NNTPServerConfig) {
}
// reload feeds
func (s *Server) ReloadFeeds(feeds []*config.FeedConfig) {
}
func (s *Server) GotArticle(msgid MessageID, group Newsgroup) {
log.WithFields(log.Fields{
"pkg": "nntp-server",
"msgid": msgid,
"group": group,
}).Info("obtained article")
if s.Hooks != nil {
s.Hooks.GotArticle(msgid, group)
}
// send to outbound feeds
s.send <- ArticleEntry{msgid.String(), group.String()}
}
func (s *Server) SentArticleVia(msgid MessageID, feedname string) {
log.WithFields(log.Fields{
"pkg": "nntp-server",
"msgid": msgid,
"feed": feedname,
}).Info("article sent")
if s.Hooks != nil {
s.Hooks.SentArticleVia(msgid, feedname)
}
}
func (s *Server) Name() string {
if s.Config == nil || s.Config.Name == "" {
return "nntp.anon.tld"
}
return s.Config.Name
}
// persist 1 feed forever
func (s *Server) persist(cfg *config.FeedConfig) {
delay := time.Second
log.WithFields(log.Fields{
"name": cfg.Name,
}).Debug("Persist Feed")
for {
dialer := network.NewDialer(cfg.Proxy)
c, err := dialer.Dial(cfg.Addr)
if err == nil {
// successful connect
delay = time.Second
conn := newOutboundConn(c, s, cfg)
err = conn.Negotiate(true)
if err == nil {
// negotiation good
log.WithFields(log.Fields{
"name": cfg.Name,
}).Debug("Negotitation good")
// start streaming
var chnl chan ArticleEntry
chnl, err = conn.StartStreaming()
if err == nil {
// register new connection
f := &nntpFeed{
conn: conn,
send: chnl,
conf: cfg,
}
s.regis <- f
// start streaming
conn.StreamAndQuit()
// deregister
s.deregis <- f
continue
}
} else {
log.WithFields(log.Fields{
"name": cfg.Name,
}).Info("outbound nntp connection failed to negotiate ", err)
}
conn.Quit()
} else {
// failed dial, do exponential backoff up to 1 hour
if delay <= time.Hour {
delay *= 2
}
log.WithFields(log.Fields{
"name": cfg.Name,
}).Info("feed backoff for ", delay)
time.Sleep(delay)
}
}
}
// download all new posts from a remote server
func (s *Server) downloadPosts(cfg *config.FeedConfig) error {
dialer := network.NewDialer(cfg.Proxy)
c, err := dialer.Dial(cfg.Addr)
if err != nil {
return err
}
conn := newOutboundConn(c, s, cfg)
err = conn.Negotiate(false)
if err != nil {
conn.Quit()
return err
}
groups, err := conn.ListNewsgroups()
if err != nil {
conn.Quit()
return err
}
for _, g := range groups {
if cfg.Policy != nil && cfg.Policy.AllowGroup(g.String()) {
log.WithFields(log.Fields{
"group": g,
"pkg": "nntp-server",
}).Debug("downloading group")
err = conn.DownloadGroup(g)
if err != nil {
conn.Quit()
return err
}
}
}
conn.Quit()
return nil
}
func (s *Server) periodicDownload(cfg *config.FeedConfig) {
for cfg.PullInterval > 0 {
err := s.downloadPosts(cfg)
if err != nil {
// report error
log.WithFields(log.Fields{
"feed": cfg.Name,
"pkg": "nntp-server",
"error": err,
}).Error("periodic download failed")
}
time.Sleep(time.Minute * time.Duration(cfg.PullInterval))
}
}
// persist all outbound feeds
func (s *Server) PersistFeeds() {
for _, f := range s.Feeds {
go s.persist(f)
go s.periodicDownload(f)
}
feeds := make(map[string]*nntpFeed)
for {
select {
case e, ok := <-s.send:
if !ok {
break
}
msgid := e.MessageID().String()
group := e.Newsgroup().String()
// TODO: determine anon
anon := false
// TODO: determine attachments
attachments := false
for _, f := range feeds {
if f.conf.Policy != nil && !f.conf.Policy.Allow(msgid, group, anon, attachments) {
// not allowed in this feed
continue
}
log.WithFields(log.Fields{
"name": f.conf.Name,
"msgid": msgid,
"group": group,
}).Debug("sending article")
f.send <- e
}
break
case f, ok := <-s.regis:
if ok {
log.WithFields(log.Fields{
"name": f.conf.Name,
}).Debug("register feed")
feeds[f.conf.Name] = f
}
break
case f, ok := <-s.deregis:
if ok {
log.WithFields(log.Fields{
"name": f.conf.Name,
}).Debug("deregister feed")
delete(feeds, f.conf.Name)
}
break
}
}
}
// serve connections from listener
func (s *Server) Serve(l net.Listener) (err error) {
log.WithFields(log.Fields{
"pkg": "nntp-server",
"addr": l.Addr(),
}).Debug("Serving")
for err == nil {
var c net.Conn
c, err = l.Accept()
if err == nil {
// we got a new connection
go s.handleInboundConnection(c)
} else {
log.WithFields(log.Fields{
"pkg": "nntp-server",
}).Error("failed to accept inbound connection", err)
}
}
return
}
// get the article policy for a connection given its state
func (s *Server) getPolicyFor(state *ConnState) ArticleAcceptor {
return s.Acceptor
}
// recv inbound streaming messages
func (s *Server) recvInboundStream(chnl chan ArticleEntry) {
for {
e, ok := <-chnl
if ok {
s.GotArticle(e.MessageID(), e.Newsgroup())
} else {
return
}
}
}
// process an inbound connection
func (s *Server) handleInboundConnection(c net.Conn) {
log.WithFields(log.Fields{
"pkg": "nntp-server",
"addr": c.RemoteAddr(),
}).Debug("handling inbound connection")
var nc Conn
nc = newInboundConn(s, c)
err := nc.Negotiate(true)
if err == nil {
// do they want to stream?
if nc.WantsStreaming() {
// yeeeeeh let's stream
var chnl chan ArticleEntry
chnl, err = nc.StartStreaming()
// for inbound we will recv messages
go s.recvInboundStream(chnl)
nc.StreamAndQuit()
log.WithFields(log.Fields{
"pkg": "nntp-server",
"addr": c.RemoteAddr(),
}).Info("streaming finished")
return
} else {
// handle non streaming commands
nc.ProcessInbound(s)
}
} else {
log.WithFields(log.Fields{
"pkg": "nntp-server",
"addr": c.RemoteAddr(),
}).Warn("failed to negotiate with inbound connection", err)
c.Close()
}
}

View File

@ -0,0 +1,21 @@
package nntp
// state of an nntp connection
type ConnState struct {
// name of parent feed
FeedName string `json:"feedname"`
// name of the connection
ConnName string `json:"connname"`
// hostname of remote connection
HostName string `json:"hostname"`
// current nntp mode
Mode Mode `json:"mode"`
// current selected nntp newsgroup
Group Newsgroup `json:"newsgroup"`
// current selected nntp article
Article string `json:"article"`
// parent feed's policy
Policy *FeedPolicy `json:"feedpolicy"`
// is this connection open?
Open bool `json:"open"`
}

View File

@ -0,0 +1,65 @@
package nntp
import (
"fmt"
"strings"
)
// an nntp stream event
// these are pipelined between nntp servers
type StreamEvent string
func (ev StreamEvent) MessageID() MessageID {
parts := strings.Split(string(ev), " ")
if len(parts) > 1 {
return MessageID(parts[1])
}
return ""
}
func (ev StreamEvent) String() string {
return string(ev)
}
func (ev StreamEvent) Command() string {
return strings.Split(ev.String(), " ")[0]
}
func (ev StreamEvent) Valid() bool {
return strings.Count(ev.String(), " ") == 1 && ev.MessageID().Valid()
}
var stream_TAKETHIS = "TAKETHIS"
var stream_CHECK = "CHECK"
func createStreamEvent(cmd string, msgid MessageID) StreamEvent {
if msgid.Valid() {
return StreamEvent(fmt.Sprintf("%s %s", cmd, msgid))
} else {
return ""
}
}
func stream_rpl_Accept(msgid MessageID) StreamEvent {
return createStreamEvent(RPL_StreamingAccept, msgid)
}
func stream_rpl_Reject(msgid MessageID) StreamEvent {
return createStreamEvent(RPL_StreamingReject, msgid)
}
func stream_rpl_Defer(msgid MessageID) StreamEvent {
return createStreamEvent(RPL_StreamingDefer, msgid)
}
func stream_rpl_Failed(msgid MessageID) StreamEvent {
return createStreamEvent(RPL_StreamingFailed, msgid)
}
func stream_cmd_TAKETHIS(msgid MessageID) StreamEvent {
return createStreamEvent(stream_TAKETHIS, msgid)
}
func stream_cmd_CHECK(msgid MessageID) StreamEvent {
return createStreamEvent(stream_CHECK, msgid)
}

View File

@ -0,0 +1,5 @@
//
// main package for srndv2
// called from main
//
package srnd

View File

@ -0,0 +1,4 @@
//
// nntp article storage
//
package store

View File

@ -0,0 +1,309 @@
package store
import (
"encoding/base32"
"fmt"
log "github.com/Sirupsen/logrus"
"nntpchan/lib/crypto"
"io"
"io/ioutil"
"os"
"path/filepath"
"time"
)
// filesystem storage of nntp articles and attachments
type FilesystemStorage struct {
root string
discardAttachments bool
}
func (fs FilesystemStorage) String() string {
return fs.root
}
// ensure the filesystem storage exists and is well formed and read/writable
func (fs FilesystemStorage) Ensure() (err error) {
_, err = os.Stat(fs.String())
if os.IsNotExist(err) {
// directory does not exist, create it
err = os.Mkdir(fs.String(), 0755)
if err != nil {
log.WithFields(log.Fields{
"pkg": "fs-store",
"filepath": fs.String(),
}).Error("failed to ensure directory", err)
// failed to create initial directory
return
}
}
// ensure subdirectories
for _, subdir := range []string{"att", "thm", "articles", "tmp"} {
fpath := filepath.Join(fs.String(), subdir)
_, err = os.Stat(fpath)
if os.IsNotExist(err) {
// make subdirectory
err = os.Mkdir(fpath, 0755)
if err != nil {
log.WithFields(log.Fields{
"pkg": "fs-store",
"filepath": fpath,
}).Error("failed to ensure sub-directory", err)
// failed to create subdirectory
return
}
}
}
return
}
// get the temp file directory
func (fs FilesystemStorage) TempDir() string {
return filepath.Join(fs.String(), "tmp")
}
// get the directory path for attachments
func (fs FilesystemStorage) AttachmentDir() string {
return filepath.Join(fs.String(), "att")
}
// get the directory path for articles
func (fs FilesystemStorage) ArticleDir() string {
return filepath.Join(fs.String(), "articles")
}
// get a temporary file we can use for read/write that deletes itself on close
func (fs FilesystemStorage) obtainTempFile() (f *os.File, err error) {
fname := fmt.Sprintf("tempfile-%x-%d", crypto.RandBytes(4), time.Now().Unix())
log.WithFields(log.Fields{
"pkg": "fs-store",
"filepath": fname,
}).Debug("opening temp file")
f, err = os.OpenFile(filepath.Join(fs.TempDir(), fname), os.O_RDWR|os.O_CREATE, 0400)
return
}
// store an article from a reader to disk
func (fs FilesystemStorage) StoreArticle(r io.Reader, msgid, newsgroup string) (fpath string, err error) {
err = fs.HasArticle(msgid)
if err == nil {
// discard the body as we have it stored already
_, err = io.Copy(ioutil.Discard, r)
log.WithFields(log.Fields{
"pkg": "fs-store",
"msgid": msgid,
}).Debug("discard article")
} else if err == ErrNoSuchArticle {
log.WithFields(log.Fields{
"pkg": "fs-store",
"msgid": msgid,
}).Debug("storing article")
// don't have an article with this message id, write it to disk
var f *os.File
fpath = filepath.Join(fs.ArticleDir(), msgid)
f, err = os.OpenFile(fpath, os.O_CREATE|os.O_WRONLY, 0644)
if err == nil {
// file opened okay, defer the close
defer f.Close()
// write to disk
log.WithFields(log.Fields{
"pkg": "fs-store",
"msgid": msgid,
}).Debug("writing to disk")
var n int64
n, err = io.Copy(f, r)
if err == nil {
log.WithFields(log.Fields{
"pkg": "fs-store",
"msgid": msgid,
"written": n,
}).Debug("wrote article to disk")
// symlink
g := fs.newsgroupDir(newsgroup)
_, e := os.Stat(g)
if os.IsNotExist(e) {
err = os.Mkdir(g, 0700)
}
if err == nil {
err = os.Symlink(filepath.Join("..", msgid), filepath.Join(g, msgid))
}
if err != nil {
log.WithFields(log.Fields{
"pkg": "fs-store",
"msgid": msgid,
"group": newsgroup,
}).Debug("failed to link article")
}
} else {
log.WithFields(log.Fields{
"pkg": "fs-store",
"msgid": msgid,
"written": n,
}).Error("write to disk failed")
}
} else {
log.WithFields(log.Fields{
"pkg": "fs-store",
"msgid": msgid,
"filepath": fpath,
}).Error("did not open file for storage", err)
}
}
return
}
func (fs FilesystemStorage) newsgroupDir(group string) string {
return filepath.Join(fs.ArticleDir(), group)
}
// check if we have the artilce with this message id
func (fs FilesystemStorage) HasArticle(msgid string) (err error) {
fpath := fs.ArticleDir()
fpath = filepath.Join(fpath, msgid)
log.WithFields(log.Fields{
"pkg": "fs-store",
"msgid": msgid,
"filepath": fpath,
}).Debug("check for article")
_, err = os.Stat(fpath)
if os.IsNotExist(err) {
err = ErrNoSuchArticle
}
return
}
func (fs FilesystemStorage) DeleteArticle(msgid string) (err error) {
err = os.Remove(filepath.Join(fs.ArticleDir(), msgid))
return
}
// store attachment onto filesystem
func (fs FilesystemStorage) StoreAttachment(r io.Reader, filename string) (fpath string, err error) {
if fs.discardAttachments {
_, err = io.Copy(ioutil.Discard, r)
return
}
// open temp file for storage
var tf *os.File
tf, err = fs.obtainTempFile()
if err == nil {
// we have the temp file
// close tempfile when done
defer func() {
n := tf.Name()
tf.Close()
os.Remove(n)
}()
// create hasher
h := crypto.Hash()
// create multiwriter
mw := io.MultiWriter(tf, h)
log.WithFields(log.Fields{
"pkg": "fs-store",
"filename": filename,
}).Debug("writing to disk")
var n int64
// write all of the reader to the multiwriter
n, err = io.Copy(mw, r)
if err == nil {
// successful write
// get file checksum
d := h.Sum(nil)
// rename file to hash + extension from filename
fpath = base32.StdEncoding.EncodeToString(d) + filepath.Ext(filename)
fpath = filepath.Join(fs.AttachmentDir(), fpath)
_, err = os.Stat(fpath)
// is that file there?
if os.IsNotExist(err) {
// it's not there, let's write it
var f *os.File
f, err = os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE, 0644)
if err == nil {
// file opened
defer f.Close()
// seek to beginning of tempfile
tf.Seek(0, os.SEEK_SET)
// write all of the temp file to the storage file
n, err = io.Copy(f, tf)
// if err == nil by here it's all good
l := log.WithFields(log.Fields{
"pkg": "fs-store",
"filename": filename,
"hash": d,
"filepath": fpath,
"size": n,
})
if err == nil {
l.Debug("wrote attachment to disk")
} else {
l.Error("failed to write attachment to disk", err)
}
} else {
log.WithFields(log.Fields{
"pkg": "fs-store",
"filename": filename,
"hash": d,
"filepath": fpath,
}).Error("failed to open file")
}
} else {
log.WithFields(log.Fields{
"pkg": "fs-store",
"filename": filename,
"hash": d,
"filepath": fpath,
"size": n,
}).Debug("attachment exists on disk")
}
}
} else {
log.WithFields(log.Fields{
"pkg": "fs-store",
"filename": filename,
}).Error("cannot open temp file for attachment", err)
}
return
}
// open article given message-id
// does not check validity
func (fs FilesystemStorage) OpenArticle(msgid string) (r *os.File, err error) {
r, err = os.Open(filepath.Join(fs.ArticleDir(), msgid))
return
}
func (fs FilesystemStorage) ForEachInGroup(group string, chnl chan string) {
g := fs.newsgroupDir(group)
filepath.Walk(g, func(path string, info os.FileInfo, err error) error {
if info != nil {
chnl <- info.Name()
}
return nil
})
}
// create a new filesystem storage directory
// ensure directory and subdirectories
func NewFilesytemStorage(dirname string, unpackAttachments bool) (fs FilesystemStorage, err error) {
dirname, err = filepath.Abs(dirname)
if err == nil {
log.WithFields(log.Fields{
"pkg": "fs-store",
"filepath": dirname,
}).Info("Creating New Filesystem Storage")
fs = FilesystemStorage{
root: dirname,
discardAttachments: unpackAttachments,
}
err = fs.Ensure()
}
return
}

View File

@ -0,0 +1 @@
package store

View File

@ -0,0 +1,49 @@
package store
import (
"nntpchan/lib/util"
"io"
"os"
)
type nullStore struct{}
func (n *nullStore) discard(r io.Reader) (s string, err error) {
_, err = io.Copy(util.Discard, r)
s = "/dev/null"
return
}
func (n *nullStore) HasArticle(msgid string) error {
return ErrNoSuchArticle
}
func (n *nullStore) StoreAttachment(r io.Reader, filename string) (string, error) {
return n.discard(r)
}
func (n *nullStore) StoreArticle(r io.Reader, msgid, newsgroup string) (string, error) {
return n.discard(r)
}
func (n *nullStore) DeleteArticle(msgid string) (err error) {
return
}
func (n *nullStore) Ensure() (err error) {
return
}
func (n *nullStore) ForEachInGroup(newsgroup string, chnl chan string) {
return
}
func (n *nullStore) OpenArticle(msgid string) (r *os.File, err error) {
err = ErrNoSuchArticle
return
}
// create a storage backend that does nothing
func NewNullStorage() Storage {
return &nullStore{}
}

View File

@ -0,0 +1,41 @@
package store
import (
"errors"
"io"
"os"
)
var ErrNoSuchArticle = errors.New("no such article")
// storage for nntp articles and attachments
type Storage interface {
// store an attachment that we read from an io.Reader
// filename is used to hint to store what extension to store it as
// returns absolute filepath where attachment was stored and nil on success
// returns emtpy string and error if an error ocurred while storing
StoreAttachment(r io.Reader, filename string) (string, error)
// store an article that we read from an io.Reader
// message id is used to hint where the article is stored as well as newsgroup
// returns absolute filepath to where the article was stored and nil on success
// returns empty string and error if an error ocurred while storing
StoreArticle(r io.Reader, msgid, newsgroup string) (string, error)
// return nil if the article with the given message id exists in this storage
// return ErrNoSuchArticle if it does not exist or an error if another error occured while checking
HasArticle(msgid string) error
// delete article from underlying storage
DeleteArticle(msgid string) error
// open article for reading
OpenArticle(msgid string) (*os.File, error)
// ensure the underlying storage backend is created
Ensure() error
// iterate over all messages in a newsgroup
// send results down a channel
ForEachInGroup(newsgroup string, cnhl chan string)
}

View File

@ -0,0 +1,4 @@
//
// attachment thumbnailing
//
package thumbnail

Some files were not shown because too many files have changed in this diff Show More