2017-04-03 19:00:38 +05:00
|
|
|
package srnd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"log"
|
|
|
|
"net"
|
|
|
|
"net/http"
|
|
|
|
"net/url"
|
|
|
|
)
|
|
|
|
|
|
|
|
type VarnishCache struct {
|
2017-09-30 15:57:17 +05:00
|
|
|
varnish_url string
|
|
|
|
prefix string
|
|
|
|
handler *nullHandler
|
|
|
|
client *http.Client
|
2018-03-04 17:03:10 +05:00
|
|
|
workers int
|
2017-09-30 15:57:17 +05:00
|
|
|
threadsRegenChan chan ArticleEntry
|
2018-03-04 17:03:10 +05:00
|
|
|
invalidateChan chan *url.URL
|
2017-04-03 19:00:38 +05:00
|
|
|
}
|
|
|
|
|
|
|
|
func (self *VarnishCache) invalidate(r string) {
|
2017-10-17 17:50:47 +05:00
|
|
|
var langs []string
|
|
|
|
langs = append(langs, "")
|
|
|
|
self.handler.ForEachI18N(func(lang string) {
|
|
|
|
langs = append(langs, lang)
|
2017-04-03 19:00:38 +05:00
|
|
|
})
|
2017-10-17 17:50:47 +05:00
|
|
|
for _, lang := range langs {
|
|
|
|
u, _ := url.Parse(r)
|
|
|
|
if lang != "" {
|
|
|
|
q := u.Query()
|
|
|
|
q.Add("lang", lang)
|
|
|
|
u.RawQuery = q.Encode()
|
|
|
|
}
|
2018-03-04 17:03:10 +05:00
|
|
|
self.invalidateChan <- u
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (self *VarnishCache) doRequest(u *url.URL) {
|
|
|
|
if u == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
resp, err := self.client.Do(&http.Request{
|
|
|
|
Method: "PURGE",
|
|
|
|
URL: u,
|
|
|
|
})
|
|
|
|
if err == nil {
|
|
|
|
resp.Body.Close()
|
|
|
|
} else {
|
|
|
|
log.Println("varnish cache error", err)
|
2017-04-03 19:00:38 +05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (self *VarnishCache) DeleteBoardMarkup(group string) {
|
|
|
|
n, _ := self.handler.database.GetPagesPerBoard(group)
|
|
|
|
for n > 0 {
|
2017-09-30 15:57:17 +05:00
|
|
|
self.invalidate(fmt.Sprintf("%s%s%s-%d.html", self.varnish_url, self.prefix, group, n))
|
|
|
|
self.invalidate(fmt.Sprintf("%s%sb/%s/%d/", self.varnish_url, self.prefix, group, n))
|
2017-04-03 19:00:38 +05:00
|
|
|
n--
|
|
|
|
}
|
|
|
|
self.invalidate(fmt.Sprintf("%s%sb/%s/", self.varnish_url, self.prefix, group))
|
|
|
|
}
|
|
|
|
|
|
|
|
// try to delete root post's page
|
|
|
|
func (self *VarnishCache) DeleteThreadMarkup(root_post_id string) {
|
2017-09-30 17:55:38 +05:00
|
|
|
id := HashMessageID(root_post_id)
|
|
|
|
self.invalidate(fmt.Sprintf("%s%sthread-%s.json", self.varnish_url, self.prefix, id))
|
|
|
|
self.invalidate(fmt.Sprintf("%s%st/%s/json", self.varnish_url, self.prefix, id))
|
|
|
|
self.invalidate(fmt.Sprintf("%s%sthread-%s.html", self.varnish_url, self.prefix, id))
|
|
|
|
self.invalidate(fmt.Sprintf("%s%st/%s/", self.varnish_url, self.prefix, id))
|
2017-04-03 19:00:38 +05:00
|
|
|
}
|
|
|
|
|
|
|
|
// regen every newsgroup
|
|
|
|
func (self *VarnishCache) RegenAll() {
|
|
|
|
// we will do this as it's used by rengen on start for frontend
|
|
|
|
groups := self.handler.database.GetAllNewsgroups()
|
|
|
|
for _, group := range groups {
|
2017-09-30 15:57:17 +05:00
|
|
|
self.handler.database.GetGroupThreads(group, self.threadsRegenChan)
|
2017-04-03 19:00:38 +05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (self *VarnishCache) RegenFrontPage() {
|
|
|
|
self.invalidate(fmt.Sprintf("%s%s", self.varnish_url, self.prefix))
|
2017-08-25 20:07:22 +05:00
|
|
|
// TODO: this is also lazy af
|
|
|
|
self.invalidate(fmt.Sprintf("%s%shistory.html", self.varnish_url, self.prefix))
|
2017-09-30 15:57:17 +05:00
|
|
|
self.invalidateUkko(10)
|
2017-04-03 19:00:38 +05:00
|
|
|
}
|
|
|
|
|
2017-09-30 15:57:17 +05:00
|
|
|
func (self *VarnishCache) invalidateUkko(pages int) {
|
2017-04-03 19:00:38 +05:00
|
|
|
self.invalidate(fmt.Sprintf("%s%sukko.html", self.varnish_url, self.prefix))
|
|
|
|
self.invalidate(fmt.Sprintf("%s%so/", self.varnish_url, self.prefix))
|
2017-09-30 17:55:38 +05:00
|
|
|
self.invalidate(fmt.Sprintf("%s%sukko.json", self.varnish_url, self.prefix))
|
|
|
|
self.invalidate(fmt.Sprintf("%s%so/json", self.varnish_url, self.prefix))
|
2017-09-30 15:57:17 +05:00
|
|
|
n := 0
|
|
|
|
for n < pages {
|
2017-09-30 17:55:38 +05:00
|
|
|
self.invalidate(fmt.Sprintf("%s%so/%d/json", self.varnish_url, self.prefix, n))
|
2017-09-30 15:57:17 +05:00
|
|
|
self.invalidate(fmt.Sprintf("%s%so/%d/", self.varnish_url, self.prefix, n))
|
|
|
|
n++
|
2017-04-03 19:00:38 +05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// regen every page of the board
|
|
|
|
func (self *VarnishCache) RegenerateBoard(group string) {
|
|
|
|
n, _ := self.handler.database.GetPagesPerBoard(group)
|
|
|
|
for n > 0 {
|
2017-09-30 15:57:17 +05:00
|
|
|
self.invalidate(fmt.Sprintf("%s%s%s-%d.html", self.varnish_url, self.prefix, group, n))
|
|
|
|
self.invalidate(fmt.Sprintf("%s%sb/%s/%d/", self.varnish_url, self.prefix, group, n))
|
2017-04-03 19:00:38 +05:00
|
|
|
n--
|
|
|
|
}
|
|
|
|
self.invalidate(fmt.Sprintf("%s%sb/%s/", self.varnish_url, self.prefix, group))
|
|
|
|
}
|
|
|
|
|
|
|
|
// regenerate pages after a mod event
|
|
|
|
func (self *VarnishCache) RegenOnModEvent(newsgroup, msgid, root string, page int) {
|
2017-09-30 15:57:17 +05:00
|
|
|
self.Regen(ArticleEntry{newsgroup, root})
|
|
|
|
if page == 0 {
|
|
|
|
self.invalidate(fmt.Sprintf("%s%sb/%s/", self.varnish_url, self.prefix, newsgroup))
|
|
|
|
}
|
|
|
|
self.invalidate(fmt.Sprintf("%s%sb/%s/%d/", self.varnish_url, self.prefix, newsgroup, page))
|
2017-04-03 19:00:38 +05:00
|
|
|
}
|
|
|
|
|
2017-09-30 15:57:17 +05:00
|
|
|
func (self *VarnishCache) poll() {
|
|
|
|
for {
|
|
|
|
ent := <-self.threadsRegenChan
|
|
|
|
self.Regen(ent)
|
2017-10-13 16:58:10 +05:00
|
|
|
self.RegenerateBoard(ent.Newsgroup())
|
2017-09-30 15:57:17 +05:00
|
|
|
}
|
2017-04-03 19:00:38 +05:00
|
|
|
}
|
|
|
|
|
2017-09-30 15:57:17 +05:00
|
|
|
func (self *VarnishCache) Start() {
|
|
|
|
go self.poll()
|
2018-03-04 17:03:10 +05:00
|
|
|
workers := self.workers
|
|
|
|
if workers <= 0 {
|
|
|
|
workers = 1
|
|
|
|
}
|
|
|
|
for workers > 0 {
|
|
|
|
go self.doWorker()
|
|
|
|
workers--
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (self *VarnishCache) doWorker() {
|
|
|
|
for {
|
|
|
|
self.doRequest(<-self.invalidateChan)
|
|
|
|
}
|
2017-04-03 19:00:38 +05:00
|
|
|
}
|
|
|
|
|
2017-09-30 15:57:17 +05:00
|
|
|
func (self *VarnishCache) Regen(msg ArticleEntry) {
|
2017-09-30 17:55:38 +05:00
|
|
|
self.DeleteThreadMarkup(msg.MessageID())
|
2017-04-03 19:00:38 +05:00
|
|
|
}
|
|
|
|
|
2017-10-10 21:17:38 +05:00
|
|
|
func (self *VarnishCache) GetHandler() CacheHandler {
|
2017-04-03 19:00:38 +05:00
|
|
|
return self.handler
|
|
|
|
}
|
|
|
|
|
|
|
|
func (self *VarnishCache) Close() {
|
|
|
|
//nothig to do
|
|
|
|
}
|
|
|
|
|
|
|
|
func (self *VarnishCache) SetRequireCaptcha(required bool) {
|
|
|
|
self.handler.requireCaptcha = required
|
|
|
|
}
|
|
|
|
|
2018-03-04 17:03:10 +05:00
|
|
|
func NewVarnishCache(varnish_url, bind_addr, prefix, webroot, name, translations string, workers int, attachments bool, db Database, store ArticleStore) CacheInterface {
|
2017-04-03 19:00:38 +05:00
|
|
|
cache := new(VarnishCache)
|
2018-03-04 17:03:10 +05:00
|
|
|
cache.invalidateChan = make(chan *url.URL)
|
2017-09-30 15:57:17 +05:00
|
|
|
cache.threadsRegenChan = make(chan ArticleEntry)
|
2018-03-04 17:03:10 +05:00
|
|
|
cache.workers = workers
|
2017-04-03 19:00:38 +05:00
|
|
|
local_addr, err := net.ResolveTCPAddr("tcp", bind_addr)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("failed to resolve %s for varnish cache: %s", bind_addr, err)
|
|
|
|
}
|
|
|
|
cache.client = &http.Client{
|
|
|
|
Transport: &http.Transport{
|
|
|
|
Dial: func(network, addr string) (c net.Conn, err error) {
|
|
|
|
var remote_addr *net.TCPAddr
|
|
|
|
remote_addr, err = net.ResolveTCPAddr(network, addr)
|
|
|
|
if err == nil {
|
|
|
|
c, err = net.DialTCP(network, local_addr, remote_addr)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2017-04-19 20:55:51 +05:00
|
|
|
cache.prefix = "/"
|
2017-04-03 19:00:38 +05:00
|
|
|
cache.handler = &nullHandler{
|
|
|
|
prefix: prefix,
|
|
|
|
name: name,
|
|
|
|
attachments: attachments,
|
|
|
|
database: db,
|
|
|
|
requireCaptcha: true,
|
2017-10-10 21:17:38 +05:00
|
|
|
i18n: make(map[string]*I18N),
|
|
|
|
translations: translations,
|
2017-04-03 19:00:38 +05:00
|
|
|
}
|
|
|
|
cache.varnish_url = varnish_url
|
|
|
|
return cache
|
|
|
|
}
|