Progress meow
This commit is contained in:
parent
490b788e7b
commit
814316ab1e
11 changed files with 279 additions and 35 deletions
|
@ -29,6 +29,9 @@ type ConfigGeneral struct {
|
|||
PrivatePort int `toml:"private_port"`
|
||||
// The port under which the public can reach the server (useful if running behind a reverse proxy)
|
||||
PublicPort *int `toml:"public_port"`
|
||||
// File to write structured logs to (structured being formatted as json)
|
||||
// If not set, Linstrom won't write structured logs
|
||||
StructuredLogFile *string
|
||||
}
|
||||
|
||||
type ConfigWebAuthn struct {
|
||||
|
@ -51,9 +54,7 @@ type ConfigStorage struct {
|
|||
DatabaseUrl string `toml:"database_url"`
|
||||
// Whether the target of the database url is a postgres server
|
||||
DbIsPostgres *bool `toml:"db_is_postgres,omitempty"`
|
||||
// Whether to use Redis for caching in addition to an in memory one
|
||||
UseRedis bool `toml:"use_redis"`
|
||||
// Url to redis server. Expected to be set if UseRedis is true
|
||||
// Url to redis server. If empty, no redis is used
|
||||
RedisUrl *string `toml:"redis_url,omitempty"`
|
||||
// The maximum size of the in-memory cache in bytes
|
||||
MaxInMemoryCacheSize int64 `toml:"max_in_memory_cache_size"`
|
||||
|
@ -94,7 +95,6 @@ var defaultConfig Config = Config{
|
|||
Storage: ConfigStorage{
|
||||
DatabaseUrl: "db.sqlite",
|
||||
DbIsPostgres: other.IntoPointer(false),
|
||||
UseRedis: false,
|
||||
RedisUrl: nil,
|
||||
MaxInMemoryCacheSize: 1e6, // 1 Megabyte
|
||||
},
|
||||
|
|
3
frontend-noscript/index.html.gotmpl
Normal file
3
frontend-noscript/index.html.gotmpl
Normal file
|
@ -0,0 +1,3 @@
|
|||
<html>
|
||||
|
||||
</html>
|
44
main.go
44
main.go
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
|
@ -8,13 +9,12 @@ import (
|
|||
"github.com/rs/zerolog/log"
|
||||
"gitlab.com/mstarongitlab/linstrom/ap"
|
||||
"gitlab.com/mstarongitlab/linstrom/config"
|
||||
"gitlab.com/mstarongitlab/linstrom/storage"
|
||||
"gitlab.com/mstarongitlab/linstrom/storage/cache"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if *flagPrettyPrint {
|
||||
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr})
|
||||
log.Info().Msg("Pretty logging enabled")
|
||||
}
|
||||
setLogger()
|
||||
setLogLevel()
|
||||
if err := config.ReadAndWriteToGlobal(*flagConfigFile); err != nil {
|
||||
log.Fatal().
|
||||
|
@ -22,12 +22,32 @@ func main() {
|
|||
Str("config-file", *flagConfigFile).
|
||||
Msg("Failed to read config and couldn't write default")
|
||||
}
|
||||
// "@aufricus_athudath@activitypub.academy"
|
||||
res, err := ap.GetAccountWebfinger("@aufricus_athudath@activitypub.academy")
|
||||
log.Info().
|
||||
Err(err).
|
||||
Any("webfinger", res).
|
||||
Msg("Webfinger request result for @aufricus_athudath@activitypub.academy")
|
||||
storageCache, err := cache.NewCache(
|
||||
config.GlobalConfig.Storage.MaxInMemoryCacheSize,
|
||||
config.GlobalConfig.Storage.RedisUrl,
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("Failed to start cache")
|
||||
}
|
||||
var store *storage.Storage
|
||||
if config.GlobalConfig.Storage.DbIsPostgres != nil &&
|
||||
*config.GlobalConfig.Storage.DbIsPostgres {
|
||||
store, err = storage.NewStoragePostgres(
|
||||
config.GlobalConfig.Storage.DatabaseUrl,
|
||||
storageCache,
|
||||
)
|
||||
} else {
|
||||
store, err = storage.NewStorageSqlite(config.GlobalConfig.Storage.DatabaseUrl, storageCache)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("Failed to setup storage")
|
||||
}
|
||||
_ = store
|
||||
}
|
||||
|
||||
func setLogLevel() {
|
||||
|
@ -49,3 +69,17 @@ func setLogLevel() {
|
|||
zerolog.SetGlobalLevel(zerolog.InfoLevel)
|
||||
}
|
||||
}
|
||||
|
||||
func setLogger(extraLogWriters ...io.Writer) {
|
||||
if *flagPrettyPrint {
|
||||
console := zerolog.ConsoleWriter{Out: os.Stderr}
|
||||
log.Logger = zerolog.New(zerolog.MultiLevelWriter(append([]io.Writer{console}, extraLogWriters...)...)).
|
||||
With().
|
||||
Timestamp().
|
||||
Logger()
|
||||
} else {
|
||||
log.Logger = zerolog.New(zerolog.MultiLevelWriter(
|
||||
append([]io.Writer{log.Logger}, extraLogWriters...)...,
|
||||
)).With().Timestamp().Logger()
|
||||
}
|
||||
}
|
||||
|
|
13
remoteStorage/remoteServer.go
Normal file
13
remoteStorage/remoteServer.go
Normal file
|
@ -0,0 +1,13 @@
|
|||
package remotestorage
|
||||
|
||||
import "gitlab.com/mstarongitlab/linstrom/storage"
|
||||
|
||||
// Wrapper around db storage
|
||||
// storage.Storage is for the db and cache access only,
|
||||
// while this one wraps storage.Storage to also provide remote fetching of missing resources.
|
||||
// So if an account doesn't exist in db or cache, this wrapper will attempt to fetch it
|
||||
type RemoteStorage struct {
|
||||
store *storage.Storage
|
||||
}
|
||||
|
||||
// TODO: Implement just about everything storage has, but with remote fetching if storage fails
|
47
storage/cache.go
Normal file
47
storage/cache.go
Normal file
|
@ -0,0 +1,47 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
const (
|
||||
cacheUserHandleToIdPrefix = "acc-name-to-id:"
|
||||
cacheUserIdToAccPrefix = "acc-id-to-data:"
|
||||
)
|
||||
|
||||
var errCacheNotFound = errors.New("not found in cache")
|
||||
|
||||
// Find an account id in cache using a given user handle
|
||||
func (s *Storage) cacheHandleToAccUid(handle string) (*string, error) {
|
||||
// Where to put the data (in case it's found)
|
||||
var target string
|
||||
found, err := s.cache.Get(cacheUserHandleToIdPrefix+strings.TrimLeft(handle, "@"), &target)
|
||||
// If nothing was found, check error
|
||||
if !found {
|
||||
// Case error is set and NOT redis' error for nothing found: Return that error
|
||||
if err != nil && !errors.Is(err, redis.Nil) {
|
||||
return nil, err
|
||||
} else {
|
||||
// Else return errCacheNotFound
|
||||
return nil, errCacheNotFound
|
||||
}
|
||||
}
|
||||
return &target, nil
|
||||
}
|
||||
|
||||
// Find an account's data in cache using a given account id
|
||||
func (s *Storage) cacheAccIdToData(id string) (*Account, error) {
|
||||
var target Account
|
||||
found, err := s.cache.Get(cacheUserIdToAccPrefix+id, &target)
|
||||
if !found {
|
||||
if err != nil && !errors.Is(err, redis.Nil) {
|
||||
return nil, err
|
||||
} else {
|
||||
return nil, errCacheNotFound
|
||||
}
|
||||
}
|
||||
return &target, nil
|
||||
}
|
49
storage/cache/cache.go
vendored
49
storage/cache/cache.go
vendored
|
@ -1,6 +1,8 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/dgraph-io/ristretto"
|
||||
|
@ -10,7 +12,6 @@ import (
|
|||
ristretto_store "github.com/eko/gocache/store/ristretto/v4"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gitlab.com/mstarongitlab/linstrom/config"
|
||||
)
|
||||
|
||||
type Cache struct {
|
||||
|
@ -19,16 +20,20 @@ type Cache struct {
|
|||
encoders *EncoderPool
|
||||
}
|
||||
|
||||
func NewCache() (*Cache, error) {
|
||||
var ctxBackground = context.Background()
|
||||
|
||||
// TODO: Maybe also include metrics
|
||||
func NewCache(maxSize int64, redisUrl *string) (*Cache, error) {
|
||||
// ristretto is an in-memory cache
|
||||
log.Debug().Int64("max-size", maxSize).Msg("Setting up ristretto")
|
||||
ristrettoCache, err := ristretto.NewCache(&ristretto.Config{
|
||||
// The *10 is a recommendation from ristretto
|
||||
NumCounters: config.GlobalConfig.Storage.MaxInMemoryCacheSize * 10,
|
||||
MaxCost: config.GlobalConfig.Storage.MaxInMemoryCacheSize,
|
||||
NumCounters: maxSize * 10,
|
||||
MaxCost: maxSize,
|
||||
BufferItems: 64, // Same here
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("ristretto cache error: %w", err)
|
||||
}
|
||||
ristrettoStore := ristretto_store.NewRistretto(
|
||||
ristrettoCache,
|
||||
|
@ -36,12 +41,8 @@ func NewCache() (*Cache, error) {
|
|||
)
|
||||
|
||||
var cacheManager *cache.ChainCache[[]byte]
|
||||
if config.GlobalConfig.Storage.UseRedis {
|
||||
if config.GlobalConfig.Storage.RedisUrl == nil {
|
||||
log.Fatal().
|
||||
Msg("Told to use redis in addition to in-memory store, but no redis url provided!")
|
||||
}
|
||||
opts, err := redis.ParseURL(*config.GlobalConfig.Storage.RedisUrl)
|
||||
if redisUrl != nil {
|
||||
opts, err := redis.ParseURL(*redisUrl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -61,3 +62,29 @@ func NewCache() (*Cache, error) {
|
|||
encoders: NewEncoderPool(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Cache) Get(key string, target any) (bool, error) {
|
||||
data, err := c.cache.Get(ctxBackground, key)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = c.decoders.Decode(data, target)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
||||
func (c *Cache) Set(key string, value any) error {
|
||||
data, err := c.encoders.Encode(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = c.cache.Set(ctxBackground, key, data, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Cache) Delete(key string) {
|
||||
// Error return doesn't matter here. Delete is delete is gone
|
||||
_ = c.cache.Delete(ctxBackground, key)
|
||||
}
|
||||
|
|
2
storage/cache/coderPools.go
vendored
2
storage/cache/coderPools.go
vendored
|
@ -95,7 +95,7 @@ func NewDecoderPool() *DecoderPool {
|
|||
}
|
||||
|
||||
// Decode some value with gob
|
||||
func (p *DecoderPool) Encode(raw []byte, target any) error {
|
||||
func (p *DecoderPool) Decode(raw []byte, target any) error {
|
||||
var encoder *gobDecoder
|
||||
// First try to find an available encoder
|
||||
// Read only lock should be fine here since locks are atomic i
|
||||
|
|
|
@ -14,7 +14,7 @@ type Note struct {
|
|||
// Soft delete means that this entry still exists in the db, but gorm won't include it anymore unless specifically told to
|
||||
// If not null, this entry is marked as deleted
|
||||
DeletedAt gorm.DeletedAt `gorm:"index"`
|
||||
Creator string // Full handle of the creator, eg: @max@example.com
|
||||
Creator string // Id of the author in the db, not the handle
|
||||
Remote bool // Whether the note is originally a remote one and just "cached"
|
||||
// Raw content of the note. So without additional formatting applied
|
||||
// Might already have formatting applied beforehand from the origin server
|
||||
|
@ -29,3 +29,25 @@ type Note struct {
|
|||
OriginServer string // Url of the origin server. Also the primary key for those
|
||||
Tags []string `gorm:"serializer:json"` // Hashtags
|
||||
}
|
||||
|
||||
func (s *Storage) FindNoteById(id string) (*Note, error) {
|
||||
// TODO: Implement me
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (s *Storage) FindNotesByFuzzyContent(fuzzyContent string) ([]Note, error) {
|
||||
// TODO: Implement me
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (s *Storage) FindNotesByAuthorHandle(handle string) ([]Note, error) {
|
||||
// TODO: Implement me
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (s *Storage) FindNotesByAuthorId(id string) ([]Note, error) {
|
||||
// TODO: Implement me
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// Update, create, delete
|
||||
|
|
|
@ -19,3 +19,32 @@ type RemoteServer struct {
|
|||
Icon string // ID of a media file
|
||||
IsSelf bool // Whether this server is yours truly
|
||||
}
|
||||
|
||||
func (s *Storage) FindRemoteServer(url string) (*RemoteServer, error) {
|
||||
// TODO: Implement me
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// Find a remote server with a given display name
|
||||
func (s *Storage) FindRemoteServerByDisplayName(displayName string) (*RemoteServer, error) {
|
||||
// TODO: Implement me
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// Create a new remote server
|
||||
func (s *Storage) NewRemoteServer(
|
||||
url, displayName, icon string,
|
||||
serverType RemoteServerType,
|
||||
) (*RemoteServer, error) {
|
||||
// TODO: Implement me
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// Update a remote server with the given url
|
||||
// If displayName is set, update that
|
||||
// If icon is set, update that
|
||||
// Returns the updated version
|
||||
func (s *Storage) UpdateRemoteServer(url string, displayName, icon *string) (*RemoteServer, error) {
|
||||
// TODO: Implement me
|
||||
panic("not implemented")
|
||||
}
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/glebarez/sqlite"
|
||||
"gitlab.com/mstarongitlab/linstrom/storage/cache"
|
||||
"gorm.io/driver/postgres"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
@ -9,30 +12,33 @@ import (
|
|||
// Storage is responsible for all database, cache and media related actions
|
||||
// and serves as the lowest layer of the cake
|
||||
type Storage struct {
|
||||
db *gorm.DB
|
||||
db *gorm.DB
|
||||
cache *cache.Cache
|
||||
}
|
||||
|
||||
var ErrInvalidData = errors.New("invalid data")
|
||||
|
||||
// Build a new storage using sqlite as database backend
|
||||
func NewStorageSqlite(filePath string) (*Storage, error) {
|
||||
func NewStorageSqlite(filePath string, cache *cache.Cache) (*Storage, error) {
|
||||
db, err := gorm.Open(sqlite.Open(filePath))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return storageFromEmptyDb(db)
|
||||
return storageFromEmptyDb(db, cache)
|
||||
}
|
||||
|
||||
func NewStoragePostgres(dbUrl string) (*Storage, error) {
|
||||
func NewStoragePostgres(dbUrl string, cache *cache.Cache) (*Storage, error) {
|
||||
db, err := gorm.Open(postgres.Open(dbUrl))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return storageFromEmptyDb(db)
|
||||
return storageFromEmptyDb(db, cache)
|
||||
}
|
||||
|
||||
func storageFromEmptyDb(db *gorm.DB) (*Storage, error) {
|
||||
func storageFromEmptyDb(db *gorm.DB, cache *cache.Cache) (*Storage, error) {
|
||||
// AutoMigrate ensures the db is in a state where all the structs given here
|
||||
// have their own tables and relations setup. It also updates tables if necessary
|
||||
db.AutoMigrate(
|
||||
err := db.AutoMigrate(
|
||||
MediaFile{},
|
||||
Account{},
|
||||
RemoteServer{},
|
||||
|
@ -40,9 +46,13 @@ func storageFromEmptyDb(db *gorm.DB) (*Storage, error) {
|
|||
Role{},
|
||||
PasskeySession{},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// And finally, build the actual storage struct
|
||||
return &Storage{
|
||||
db: db,
|
||||
db: db,
|
||||
cache: cache,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package storage
|
|||
import (
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-webauthn/webauthn/webauthn"
|
||||
|
@ -94,6 +95,23 @@ type RemoteAccountLinks struct {
|
|||
func (s *Storage) FindAccountByFullHandle(handle string) (*Account, error) {
|
||||
log.Trace().Caller().Send()
|
||||
log.Debug().Str("account-handle", handle).Msg("Looking for account by handle")
|
||||
log.Debug().Str("account-handle", handle).Msg("Checking if there's a cache hit")
|
||||
|
||||
// Try and find the account in cache first
|
||||
cacheAccId, err := s.cacheHandleToAccUid(handle)
|
||||
if err == nil {
|
||||
log.Info().Str("account-handle", handle).Msg("Hit account handle in cache")
|
||||
// Then always load via id since unique key access should be faster than string matching
|
||||
return s.FindAccountById(*cacheAccId)
|
||||
} else {
|
||||
if !errors.Is(err, errCacheNotFound) {
|
||||
log.Error().Err(err).Str("account-handle", handle).Msg("Problem while checking cache for account")
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Failed to find in cache, go the slow route of hitting the db
|
||||
log.Debug().Str("account-handle", handle).Msg("Didn't hit account in cache, going to db")
|
||||
name, server, err := ap.SplitFullHandle(handle)
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Str("account-handle", handle).Msg("Failed to split up account handle")
|
||||
|
@ -110,7 +128,19 @@ func (s *Storage) FindAccountByFullHandle(handle string) (*Account, error) {
|
|||
}
|
||||
return nil, res.Error
|
||||
}
|
||||
log.Info().Str("account-handle", handle).Msg("Found account")
|
||||
log.Info().Str("account-handle", handle).Msg("Found account, also inserting into cache")
|
||||
if err = s.cache.Set(cacheUserIdToAccPrefix+acc.ID, &acc); err != nil {
|
||||
log.Warn().
|
||||
Err(err).
|
||||
Str("account-handle", handle).
|
||||
Msg("Found account but failed to insert into cache")
|
||||
}
|
||||
if err = s.cache.Set(cacheUserHandleToIdPrefix+strings.TrimLeft(handle, "@"), acc.ID); err != nil {
|
||||
log.Warn().
|
||||
Err(err).
|
||||
Str("account-handle", handle).
|
||||
Msg("Failed to store handle to id in cache")
|
||||
}
|
||||
return &acc, nil
|
||||
}
|
||||
|
||||
|
@ -118,8 +148,18 @@ func (s *Storage) FindAccountByFullHandle(handle string) (*Account, error) {
|
|||
func (s *Storage) FindAccountById(id string) (*Account, error) {
|
||||
log.Trace().Caller().Send()
|
||||
log.Debug().Str("account-id", id).Msg("Looking for account by id")
|
||||
acc := Account{}
|
||||
res := s.db.First(&acc, id)
|
||||
log.Debug().Str("account-id", id).Msg("First trying to hit cache")
|
||||
acc, err := s.cacheAccIdToData(id)
|
||||
if err == nil {
|
||||
log.Info().Str("account-id", id).Msg("Found account in cache")
|
||||
return acc, nil
|
||||
} else if !errors.Is(err, errCacheNotFound) {
|
||||
log.Error().Err(err).Str("account-id", id).Msg("Error while looking for account in cache")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debug().Str("account-id", id).Msg("Didn't hit account in cache, checking db")
|
||||
res := s.db.First(acc, id)
|
||||
if res.Error != nil {
|
||||
if errors.Is(res.Error, gorm.ErrRecordNotFound) {
|
||||
log.Warn().Str("account-id", id).Msg("Account not found")
|
||||
|
@ -128,8 +168,27 @@ func (s *Storage) FindAccountById(id string) (*Account, error) {
|
|||
}
|
||||
return nil, res.Error
|
||||
}
|
||||
log.Info().Str("account-id", id).Msg("Found account")
|
||||
return &acc, nil
|
||||
log.Info().Str("account-id", id).Msg("Found account in db, also adding to cache")
|
||||
if err = s.cache.Set(cacheUserIdToAccPrefix+id, acc); err != nil {
|
||||
log.Warn().Err(err).Str("account-id", id).Msg("Failed to add account to cache")
|
||||
}
|
||||
return acc, nil
|
||||
}
|
||||
|
||||
// Update a given account in storage and cache
|
||||
func (s *Storage) UpdateAccount(acc *Account) error {
|
||||
// If the account is nil or doesn't have an id, error out
|
||||
if acc == nil || acc.ID == "" {
|
||||
return ErrInvalidData
|
||||
}
|
||||
res := s.db.Save(acc)
|
||||
if res.Error != nil {
|
||||
return res.Error
|
||||
}
|
||||
if err := s.cache.Set(cacheUserIdToAccPrefix+acc.ID, acc); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Storage) NewEmptyAccount() (*Account, error) {
|
||||
|
|
Loading…
Reference in a new issue