1
0
mirror of https://github.com/Mrs4s/go-cqhttp.git synced 2025-05-04 19:17:37 +08:00

feat: store cache with disk btree backend

This commit is contained in:
wdvxdr 2021-09-28 22:23:33 +08:00
parent 4da6584f10
commit 7c4be95c19
No known key found for this signature in database
GPG Key ID: 703F8C071DE7A1B6
7 changed files with 271 additions and 155 deletions

View File

@ -9,6 +9,7 @@ import (
"time"
"github.com/Mrs4s/go-cqhttp/db"
"github.com/Mrs4s/go-cqhttp/internal/cache"
"github.com/Mrs4s/go-cqhttp/global"
"github.com/Mrs4s/go-cqhttp/internal/base"
@ -548,29 +549,37 @@ func (bot *CQBot) groupDecrease(groupCode, userUin int64, operator *client.Group
}
func (bot *CQBot) checkMedia(e []message.IMessageElement) {
// TODO(wdvxdr): remove these old cache file in v1.0.0
for _, elem := range e {
switch i := elem.(type) {
case *message.GroupImageElement:
filename := hex.EncodeToString(i.Md5) + ".image"
if !global.PathExists(path.Join(global.ImagePath, filename)) {
_ = os.WriteFile(path.Join(global.ImagePath, filename), binary.NewWriterF(func(w *binary.Writer) {
data := binary.NewWriterF(func(w *binary.Writer) {
w.Write(i.Md5)
w.WriteUInt32(uint32(i.Size))
w.WriteString(i.ImageId)
w.WriteString(i.Url)
}), 0o644)
})
filename := hex.EncodeToString(i.Md5) + ".image"
if cache.EnableCacheDB {
cache.Image.Insert(i.Md5, data)
} else if !global.PathExists(path.Join(global.ImagePath, filename)) {
_ = os.WriteFile(path.Join(global.ImagePath, filename), data, 0o644)
}
case *message.FriendImageElement:
filename := hex.EncodeToString(i.Md5) + ".image"
if !global.PathExists(path.Join(global.ImagePath, filename)) {
_ = os.WriteFile(path.Join(global.ImagePath, filename), binary.NewWriterF(func(w *binary.Writer) {
data := binary.NewWriterF(func(w *binary.Writer) {
w.Write(i.Md5)
w.WriteUInt32(uint32(i.Size))
w.WriteString(i.ImageId)
w.WriteString(i.Url)
}), 0o644)
})
filename := hex.EncodeToString(i.Md5) + ".image"
if cache.EnableCacheDB {
cache.Image.Insert(i.Md5, data)
} else if !global.PathExists(path.Join(global.ImagePath, filename)) {
_ = os.WriteFile(path.Join(global.ImagePath, filename), data, 0o644)
}
case *message.VoiceElement:
// todo: don't download original file?
i.Name = strings.ReplaceAll(i.Name, "{", "")
i.Name = strings.ReplaceAll(i.Name, "}", "")
if !global.PathExists(path.Join(global.VoicePath, i.Name)) {
@ -582,16 +591,19 @@ func (bot *CQBot) checkMedia(e []message.IMessageElement) {
_ = os.WriteFile(path.Join(global.VoicePath, i.Name), b, 0o644)
}
case *message.ShortVideoElement:
filename := hex.EncodeToString(i.Md5) + ".video"
if !global.PathExists(path.Join(global.VideoPath, filename)) {
_ = os.WriteFile(path.Join(global.VideoPath, filename), binary.NewWriterF(func(w *binary.Writer) {
data := binary.NewWriterF(func(w *binary.Writer) {
w.Write(i.Md5)
w.Write(i.ThumbMd5)
w.WriteUInt32(uint32(i.Size))
w.WriteUInt32(uint32(i.ThumbSize))
w.WriteString(i.Name)
w.Write(i.Uuid)
}), 0o644)
})
filename := hex.EncodeToString(i.Md5) + ".video"
if cache.EnableCacheDB {
cache.Video.Insert(i.Md5, data)
} else if !global.PathExists(path.Join(global.VideoPath, filename)) {
_ = os.WriteFile(path.Join(global.VideoPath, filename), data, 0o644)
}
i.Name = filename
i.Url = bot.Client.GetShortVideoUrl(i.Uuid, i.Md5)

View File

@ -12,8 +12,8 @@ import (
const (
sha1Size = 20 // md5 sha1
tableSize = (2048 - 1) / int(unsafe.Sizeof(item{}))
cacheSlots = 13 // prime
tableSize = (1024 - 1) / int(unsafe.Sizeof(item{}))
cacheSlots = 11 // prime
superSize = int(unsafe.Sizeof(super{}))
tableStructSize = int(unsafe.Sizeof(table{}))
)
@ -40,8 +40,8 @@ type super struct {
alloc int64
}
// Btree ...
type Btree struct {
// DB ...
type DB struct {
fd *os.File
top int64
freeTop int64
@ -52,61 +52,61 @@ type Btree struct {
deleteLarger bool
}
func (bt *Btree) get(offset int64) *table {
func (d *DB) get(offset int64) *table {
assert(offset != 0)
// take from cache
slot := &bt.cache[offset%cacheSlots]
slot := &d.cache[offset%cacheSlots]
if slot.offset == offset {
return slot.table
}
table := new(table)
bt.fd.Seek(offset, io.SeekStart)
err := readTable(bt.fd, table)
d.fd.Seek(offset, io.SeekStart)
err := readTable(d.fd, table)
if err != nil {
panic(errors.Wrap(err, "btree I/O error"))
}
return table
}
func (bt *Btree) put(t *table, offset int64) {
func (d *DB) put(t *table, offset int64) {
assert(offset != 0)
/* overwrite cache */
slot := &bt.cache[offset%cacheSlots]
// overwrite cache
slot := &d.cache[offset%cacheSlots]
slot.table = t
slot.offset = offset
}
func (bt *Btree) flush(t *table, offset int64) {
func (d *DB) flush(t *table, offset int64) {
assert(offset != 0)
bt.fd.Seek(offset, io.SeekStart)
err := writeTable(bt.fd, t)
d.fd.Seek(offset, io.SeekStart)
err := writeTable(d.fd, t)
if err != nil {
panic(errors.Wrap(err, "btree I/O error"))
}
bt.put(t, offset)
d.put(t, offset)
}
func (bt *Btree) flushSuper() {
bt.fd.Seek(0, io.SeekStart)
func (d *DB) flushSuper() {
d.fd.Seek(0, io.SeekStart)
super := super{
top: bt.top,
freeTop: bt.freeTop,
alloc: bt.alloc,
top: d.top,
freeTop: d.freeTop,
alloc: d.alloc,
}
err := writeSuper(bt.fd, &super)
err := writeSuper(d.fd, &super)
if err != nil {
panic(errors.Wrap(err, "btree I/O error"))
}
}
// Open opens an existed btree file
func Open(name string) (*Btree, error) {
btree := new(Btree)
func Open(name string) (*DB, error) {
btree := new(DB)
fd, err := os.OpenFile(name, os.O_RDWR, 0o644)
if err != nil {
return nil, errors.Wrap(err, "btree open file failed")
@ -122,8 +122,8 @@ func Open(name string) (*Btree, error) {
}
// Create creates a database
func Create(name string) (*Btree, error) {
btree := new(Btree)
func Create(name string) (*DB, error) {
btree := new(DB)
fd, err := os.OpenFile(name, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0o644)
if err != nil {
return nil, errors.Wrap(err, "btree open file failed")
@ -136,16 +136,16 @@ func Create(name string) (*Btree, error) {
}
// Close closes the database
func (bt *Btree) Close() error {
_ = bt.fd.Sync()
err := bt.fd.Close()
func (d *DB) Close() error {
_ = d.fd.Sync()
err := d.fd.Close()
for i := 0; i < cacheSlots; i++ {
bt.cache[i] = cache{}
d.cache[i] = cache{}
}
return errors.Wrap(err, "btree close failed")
}
func collapse(bt *Btree, offset int64) int64 {
func collapse(bt *DB, offset int64) int64 {
table := bt.get(offset)
if table.size != 0 {
/* unable to collapse */
@ -165,7 +165,7 @@ func collapse(bt *Btree, offset int64) int64 {
// split a table. The pivot item is stored to 'sha1' and 'offset'.
// Returns offset to the new table.
func (bt *Btree) split(t *table, hash *byte, offset *int64) int64 {
func (d *DB) split(t *table, hash *byte, offset *int64) int64 {
copysha1(hash, &t.items[tableSize/2].sha1[0])
*offset = t.items[tableSize/2].offset
@ -176,61 +176,61 @@ func (bt *Btree) split(t *table, hash *byte, offset *int64) int64 {
copy(ntable.items[:ntable.size+1], t.items[tableSize/2+1:])
noff := bt.allocChunk(tableStructSize)
bt.flush(ntable, noff)
noff := d.allocChunk(tableStructSize)
d.flush(ntable, noff)
// make sure data is written before a reference is added to it
_ = bt.fd.Sync()
_ = d.fd.Sync()
return noff
}
// takeSmallest find and remove the smallest item from the given table. The key of the item
// is stored to 'sha1'. Returns offset to the item
func (bt *Btree) takeSmallest(toff int64, sha1 *byte) int64 {
table := bt.get(toff)
func (d *DB) takeSmallest(toff int64, sha1 *byte) int64 {
table := d.get(toff)
assert(table.size > 0)
var off int64
child := table.items[0].child
if child == 0 {
off = bt.remove(table, 0, sha1)
off = d.remove(table, 0, sha1)
} else {
/* recursion */
off = bt.takeSmallest(child, sha1)
table.items[0].child = collapse(bt, child)
off = d.takeSmallest(child, sha1)
table.items[0].child = collapse(d, child)
}
bt.flush(table, toff)
d.flush(table, toff)
// make sure data is written before a reference is added to it
_ = bt.fd.Sync()
_ = d.fd.Sync()
return off
}
// takeLargest find and remove the largest item from the given table. The key of the item
// is stored to 'sha1'. Returns offset to the item
func (bt *Btree) takeLargest(toff int64, sha1 *byte) int64 {
table := bt.get(toff)
func (d *DB) takeLargest(toff int64, sha1 *byte) int64 {
table := d.get(toff)
assert(table.size > 0)
var off int64
child := table.items[table.size].child
if child == 0 {
off = bt.remove(table, table.size-1, sha1)
off = d.remove(table, table.size-1, sha1)
} else {
/* recursion */
off = bt.takeLargest(child, sha1)
table.items[table.size].child = collapse(bt, child)
off = d.takeLargest(child, sha1)
table.items[table.size].child = collapse(d, child)
}
bt.flush(table, toff)
d.flush(table, toff)
// make sure data is written before a reference is added to it
_ = bt.fd.Sync()
_ = d.fd.Sync()
return off
}
// remove an item in position 'i' from the given table. The key of the
// removed item is stored to 'sha1'. Returns offset to the item.
func (bt *Btree) remove(t *table, i int, sha1 *byte) int64 {
func (d *DB) remove(t *table, i int, sha1 *byte) int64 {
assert(i < t.size)
if sha1 != nil {
@ -246,11 +246,11 @@ func (bt *Btree) remove(t *table, i int, sha1 *byte) int64 {
child tables */
var noff int64
if rand.Int()&1 != 0 {
noff = bt.takeLargest(lc, &t.items[i].sha1[0])
t.items[i].child = collapse(bt, lc)
noff = d.takeLargest(lc, &t.items[i].sha1[0])
t.items[i].child = collapse(d, lc)
} else {
noff = bt.takeSmallest(rc, &t.items[i].sha1[0])
t.items[i+1].child = collapse(bt, rc)
noff = d.takeSmallest(rc, &t.items[i].sha1[0])
t.items[i+1].child = collapse(d, rc)
}
t.items[i].child = noff
} else {
@ -268,8 +268,8 @@ func (bt *Btree) remove(t *table, i int, sha1 *byte) int64 {
return offset
}
func (bt *Btree) insert(toff int64, sha1 *byte, data []byte, size int) int64 {
table := bt.get(toff)
func (d *DB) insert(toff int64, sha1 *byte, data []byte, size int) int64 {
table := d.get(toff)
assert(table.size < tableSize-1)
left, right := 0, table.size
@ -279,7 +279,7 @@ func (bt *Btree) insert(toff int64, sha1 *byte, data []byte, size int) int64 {
case cmp == 0:
// already in the table
ret := table.items[mid].offset
bt.put(table, toff)
d.put(table, toff)
return ret
case cmp < 0:
right = mid
@ -293,25 +293,25 @@ func (bt *Btree) insert(toff int64, sha1 *byte, data []byte, size int) int64 {
lc := table.items[i].child
if lc != 0 {
/* recursion */
ret = bt.insert(lc, sha1, data, size)
ret = d.insert(lc, sha1, data, size)
/* check if we need to split */
child := bt.get(lc)
child := d.get(lc)
if child.size < tableSize-1 {
/* nothing to do */
bt.put(table, toff)
bt.put(child, lc)
d.put(table, toff)
d.put(child, lc)
return ret
}
/* overwrites SHA-1 */
rc = bt.split(child, sha1, &off)
rc = d.split(child, sha1, &off)
/* flush just in case changes happened */
bt.flush(child, lc)
d.flush(child, lc)
// make sure data is written before a reference is added to it
_ = bt.fd.Sync()
_ = d.fd.Sync()
} else {
off = bt.insertData(data, size)
off = d.insertData(data, size)
ret = off
}
@ -324,41 +324,41 @@ func (bt *Btree) insert(toff int64, sha1 *byte, data []byte, size int) int64 {
table.items[i].child = lc
table.items[i+1].child = rc
bt.flush(table, toff)
d.flush(table, toff)
return ret
}
func (bt *Btree) insertData(data []byte, size int) int64 {
func (d *DB) insertData(data []byte, size int) int64 {
if data == nil {
return int64(size)
}
assert(len(data) == size)
offset := bt.allocChunk(4 + len(data))
offset := d.allocChunk(4 + len(data))
bt.fd.Seek(offset, io.SeekStart)
err := write32(bt.fd, int32(len(data)))
d.fd.Seek(offset, io.SeekStart)
err := write32(d.fd, int32(len(data)))
if err != nil {
panic(errors.Wrap(err, "btree I/O error"))
}
_, err = bt.fd.Write(data)
_, err = d.fd.Write(data)
if err != nil {
panic(errors.Wrap(err, "btree I/O error"))
}
// make sure data is written before a reference is added to it
_ = bt.fd.Sync()
_ = d.fd.Sync()
return offset
}
// delete remove an item with key 'sha1' from the given table. The offset to the
// removed item is returned.
// Please note that 'sha1' is overwritten when called inside the allocator.
func (bt *Btree) delete(offset int64, hash *byte) int64 {
func (d *DB) delete(offset int64, hash *byte) int64 {
if offset == 0 {
return 0
}
table := bt.get(offset)
table := d.get(offset)
left, right := 0, table.size
for left < right {
@ -366,8 +366,8 @@ func (bt *Btree) delete(offset int64, hash *byte) int64 {
switch cmp := cmp(hash, &table.items[i].sha1[0]); {
case cmp == 0:
// found
ret := bt.remove(table, i, hash)
bt.flush(table, offset)
ret := d.remove(table, i, hash)
d.flush(table, offset)
return ret
case cmp < 0:
right = i
@ -379,39 +379,39 @@ func (bt *Btree) delete(offset int64, hash *byte) int64 {
// not found - recursion
i := left
child := table.items[i].child
ret := bt.delete(child, hash)
ret := d.delete(child, hash)
if ret != 0 {
table.items[i].child = collapse(bt, child)
table.items[i].child = collapse(d, child)
}
if ret == 0 && bt.deleteLarger && i < table.size {
ret = bt.remove(table, i, hash)
if ret == 0 && d.deleteLarger && i < table.size {
ret = d.remove(table, i, hash)
}
if ret != 0 {
/* flush just in case changes happened */
bt.flush(table, offset)
d.flush(table, offset)
} else {
bt.put(table, offset)
d.put(table, offset)
}
return ret
}
func (bt *Btree) insertTopLevel(toff *int64, sha1 *byte, data []byte, size int) int64 { // nolint:unparam
func (d *DB) insertTopLevel(toff *int64, sha1 *byte, data []byte, size int) int64 { // nolint:unparam
var off, ret, rc int64
if *toff != 0 {
ret = bt.insert(*toff, sha1, data, size)
ret = d.insert(*toff, sha1, data, size)
/* check if we need to split */
table := bt.get(*toff)
table := d.get(*toff)
if table.size < tableSize-1 {
/* nothing to do */
bt.put(table, *toff)
d.put(table, *toff)
return ret
}
rc = bt.split(table, sha1, &off)
bt.flush(table, *toff)
rc = d.split(table, sha1, &off)
d.flush(table, *toff)
} else {
off = bt.insertData(data, size)
off = d.insertData(data, size)
ret = off
}
@ -423,21 +423,21 @@ func (bt *Btree) insertTopLevel(toff *int64, sha1 *byte, data []byte, size int)
t.items[0].child = *toff
t.items[1].child = rc
ntoff := bt.allocChunk(tableStructSize)
bt.flush(t, ntoff)
ntoff := d.allocChunk(tableStructSize)
d.flush(t, ntoff)
*toff = ntoff
// make sure data is written before a reference is added to it
_ = bt.fd.Sync()
_ = d.fd.Sync()
return ret
}
func (bt *Btree) lookup(toff int64, sha1 *byte) int64 {
func (d *DB) lookup(toff int64, sha1 *byte) int64 {
if toff == 0 {
return 0
}
table := bt.get(toff)
table := d.get(toff)
left, right := 0, table.size
for left < right {
@ -446,7 +446,7 @@ func (bt *Btree) lookup(toff int64, sha1 *byte) int64 {
case cmp == 0:
// found
ret := table.items[mid].offset
bt.put(table, toff)
d.put(table, toff)
return ret
case cmp < 0:
right = mid
@ -457,38 +457,38 @@ func (bt *Btree) lookup(toff int64, sha1 *byte) int64 {
i := left
child := table.items[i].child
bt.put(table, toff)
return bt.lookup(child, sha1)
d.put(table, toff)
return d.lookup(child, sha1)
}
// Insert a new item with key 'sha1' with the contents in 'data' to the
// database file.
func (bt *Btree) Insert(csha1 *byte, data []byte) {
func (d *DB) Insert(csha1 *byte, data []byte) {
/* SHA-1 must be in writable memory */
var sha1 [sha1Size]byte
copysha1(&sha1[0], csha1)
_ = bt.insertTopLevel(&bt.top, &sha1[0], data, len(data))
freeQueued(bt)
bt.flushSuper()
_ = d.insertTopLevel(&d.top, &sha1[0], data, len(data))
freeQueued(d)
d.flushSuper()
}
// Get look up item with the given key 'sha1' in the database file. Length of the
// item is stored in 'len'. Returns a pointer to the contents of the item.
// The returned pointer should be released with free() after use.
func (bt *Btree) Get(sha1 *byte) []byte {
off := bt.lookup(bt.top, sha1)
func (d *DB) Get(sha1 *byte) []byte {
off := d.lookup(d.top, sha1)
if off == 0 {
return nil
}
bt.fd.Seek(off, io.SeekStart)
length, err := read32(bt.fd)
d.fd.Seek(off, io.SeekStart)
length, err := read32(d.fd)
if err != nil {
return nil
}
data := make([]byte, length)
n, err := io.ReadFull(bt.fd, data)
n, err := io.ReadFull(d.fd, data)
if err != nil {
return nil
}
@ -496,6 +496,6 @@ func (bt *Btree) Get(sha1 *byte) []byte {
}
// Delete remove item with the given key 'sha1' from the database file.
func (bt *Btree) Delete(sha1 *byte) error {
func (d *DB) Delete(sha1 *byte) error {
return errors.New("impl me")
}

View File

@ -50,3 +50,7 @@ func TestBtree(t *testing.T) {
}
assert2.NoError(t, bt.Close())
}
func TestOpen(t *testing.T) {
println(tableSize)
}

View File

@ -18,7 +18,7 @@ var (
fqueueLen = 0
)
func freeQueued(bt *Btree) {
func freeQueued(bt *DB) {
for i := 0; i < fqueueLen; i++ {
chunk := &fqueue[i]
bt.freeChunk(chunk.offset, chunk.len)
@ -26,13 +26,13 @@ func freeQueued(bt *Btree) {
fqueueLen = 0
}
func (bt *Btree) allocChunk(size int) int64 {
func (d *DB) allocChunk(size int) int64 {
assert(size > 0)
size = power2(size)
var offset int64
if bt.inAllocator {
if d.inAllocator {
const i32s = unsafe.Sizeof(int32(0))
/* create fake size SHA-1 */
@ -42,10 +42,10 @@ func (bt *Btree) allocChunk(size int) int64 {
*(*uint32)(unsafe.Add(p, i32s)) = uint32(size) // ((__be32 *) sha1)[1] = to_be32(size);
/* find free chunk with the larger or the same size/SHA-1 */
bt.inAllocator = true
bt.deleteLarger = true
offset = bt.delete(bt.freeTop, &sha1[0])
bt.deleteLarger = false
d.inAllocator = true
d.deleteLarger = true
offset = d.delete(d.freeTop, &sha1[0])
d.deleteLarger = false
if offset != 0 {
assert(*(*int32)(p) == -1) // assert(*(uint32_t *) sha1 == (uint32_t) -1)
flen := int(*(*uint32)(unsafe.Add(p, i32s))) // size_t free_len = from_be32(((__be32 *) sha1)[1])
@ -55,46 +55,46 @@ func (bt *Btree) allocChunk(size int) int64 {
/* delete buddy information */
resetsha1(&sha1[0])
*(*int64)(p) = offset
buddyLen := bt.delete(bt.freeTop, &sha1[0])
buddyLen := d.delete(d.freeTop, &sha1[0])
assert(buddyLen == int64(size))
bt.freeTop = collapse(bt, bt.freeTop)
d.freeTop = collapse(d, d.freeTop)
bt.inAllocator = false
d.inAllocator = false
/* free extra space at the end of the chunk */
for flen > size {
flen >>= 1
bt.freeChunk(offset+int64(flen), flen)
d.freeChunk(offset+int64(flen), flen)
}
} else {
bt.inAllocator = false
d.inAllocator = false
}
}
if offset == 0 {
/* not found, allocate from the end of the file */
offset = bt.alloc
offset = d.alloc
/* TODO: this wastes memory.. */
if offset&int64(size-1) != 0 {
offset += int64(size) - (offset & (int64(size) - 1))
}
bt.alloc = offset + int64(size)
d.alloc = offset + int64(size)
}
bt.flushSuper()
d.flushSuper()
// make sure the allocation tree is up-to-date before using the chunk
_ = bt.fd.Sync()
_ = d.fd.Sync()
return offset
}
/* Mark a chunk as unused in the database file */
func (bt *Btree) freeChunk(offset int64, size int) {
func (d *DB) freeChunk(offset int64, size int) {
assert(size > 0)
assert(offset != 0)
size = power2(size)
assert(offset&int64(size-1) == 0)
if bt.inAllocator {
if d.inAllocator {
chunk := &fqueue[fqueueLen]
fqueueLen++
chunk.offset = offset
@ -105,7 +105,7 @@ func (bt *Btree) freeChunk(offset int64, size int) {
/* create fake offset SHA-1 for buddy allocation */
var sha1 [sha1Size]byte
p := unsafe.Pointer(&sha1[0])
bt.inAllocator = true
d.inAllocator = true
const i32s = unsafe.Sizeof(int32(0))
@ -117,12 +117,12 @@ func (bt *Btree) freeChunk(offset int64, size int) {
*(*uint32)(unsafe.Add(p, i32s*3)) = rand.Uint32()
// insert_toplevel(btree, &btree->free_top, sha1, NULL, offset);
_ = bt.insertTopLevel(&bt.freeTop, &sha1[0], nil, int(offset))
bt.inAllocator = false
_ = d.insertTopLevel(&d.freeTop, &sha1[0], nil, int(offset))
d.inAllocator = false
bt.flushSuper()
d.flushSuper()
// make sure the allocation tree is up-to-date before removing
// references to the chunk
_ = bt.fd.Sync()
_ = d.fd.Sync()
}

93
internal/cache/cache.go vendored Normal file
View File

@ -0,0 +1,93 @@
package cache
import (
"sync"
log "github.com/sirupsen/logrus"
"github.com/Mrs4s/go-cqhttp/global"
"github.com/Mrs4s/go-cqhttp/internal/base"
"github.com/Mrs4s/go-cqhttp/internal/btree"
)
// todo(wdvxdr): always enable db-cache in v1.0.0
// EnableCacheDB 是否启用 btree db缓存图片等
var EnableCacheDB bool
// Media Cache DBs
var (
Image *Cache
Video *Cache
// todo: Voice?
)
// Cache wraps the btree.DB for concurrent safe
type Cache struct {
lock sync.RWMutex
db *btree.DB
}
// TODO(wdvxdr): cache use md5 key, but btree use sha1 key,
// maybe we can unify to md5 to save some space.
// Insert 添加媒体缓存
func (c *Cache) Insert(md5, data []byte) {
c.lock.Lock()
defer c.lock.Unlock()
var hash [20]byte
copy(hash[:], md5)
c.db.Insert(&hash[0], data)
}
// Get 获取缓存信息
func (c *Cache) Get(md5 []byte) []byte {
c.lock.RLock()
defer c.lock.RUnlock()
var hash [20]byte
copy(hash[:], md5)
return c.db.Get(&hash[0])
}
// Init 初始化 Cache
func Init() {
node, ok := base.Database["cache"]
if !ok {
return
}
EnableCacheDB = true
var conf map[string]string
err := node.Decode(&conf)
if err != nil {
log.Fatalf("failed to read cache config: %v", err)
}
if conf == nil {
conf = make(map[string]string)
}
if conf["image"] == "" {
conf["image"] = "data/image.db"
}
if conf["video"] == "" {
conf["video"] = "data/video.db"
}
var open = func(typ string, cache **Cache) {
if global.PathExists(conf[typ]) {
db, err := btree.Open(conf[typ])
if err != nil {
log.Fatalf("open %s cache failed: %v", typ, err)
}
*cache = &Cache{db: db}
} else {
db, err := btree.Create(conf[typ])
if err != nil {
log.Fatalf("create %s cache failed: %v", typ, err)
}
*cache = &Cache{db: db}
}
}
open("image", &Image)
open("video", &Video)
}

View File

@ -23,16 +23,17 @@ import (
"golang.org/x/crypto/pbkdf2"
"golang.org/x/term"
_ "github.com/Mrs4s/go-cqhttp/modules/mime" // mime检查模块
_ "github.com/Mrs4s/go-cqhttp/modules/silk" // silk编码模块
"github.com/Mrs4s/go-cqhttp/coolq"
"github.com/Mrs4s/go-cqhttp/global"
"github.com/Mrs4s/go-cqhttp/global/terminal"
"github.com/Mrs4s/go-cqhttp/internal/base"
"github.com/Mrs4s/go-cqhttp/internal/cache"
"github.com/Mrs4s/go-cqhttp/internal/selfupdate"
"github.com/Mrs4s/go-cqhttp/modules/config"
"github.com/Mrs4s/go-cqhttp/server"
_ "github.com/Mrs4s/go-cqhttp/modules/mime" // mime检查模块
_ "github.com/Mrs4s/go-cqhttp/modules/silk" // silk编码模块
)
// 允许通过配置文件设置的状态列表
@ -81,6 +82,7 @@ func main() {
mkCacheDir(global.VoicePath, "语音")
mkCacheDir(global.VideoPath, "视频")
mkCacheDir(global.CachePath, "发送图片")
cache.Init()
var byteKey []byte
arg := os.Args

View File

@ -75,6 +75,11 @@ database: # 数据库相关设置
# 关闭将无法使用 撤回 回复 get_msg 等上下文相关功能
enable: true
# 媒体文件缓存, 删除此项则使用缓存文件(旧版行为)
cache:
image: data/image.db
video: data/video.db
# 连接服务列表
servers:
# 添加方式,同一连接方式可添加多个,具体配置说明请查看文档