Merge branch 'main' of gitlab.com:mstarongitlab/linstrom

also delete server-old with all its problems
This commit is contained in:
Melody Becker 2024-09-13 20:41:47 +02:00
commit d99efca667
Signed by: mstar
SSH key fingerprint: SHA256:vkXfS9FG2pVNVfvDrzd1VW9n8VJzqqdKQGljxxX8uK8
47 changed files with 2200 additions and 694 deletions

73
storage/cache.go Normal file
View file

@ -0,0 +1,73 @@
package storage
import (
"errors"
"strings"
"github.com/redis/go-redis/v9"
)
// various prefixes for accessing items in the cache (since it's a simple key-value store)
const (
cacheUserHandleToIdPrefix = "acc-name-to-id:"
cacheUserIdToAccPrefix = "acc-id-to-data:"
cacheNoteIdToNotePrefix = "note-id-to-data:"
)
// An error describing the case where some value was just not found in the cache
var errCacheNotFound = errors.New("not found in cache")
// Find an account id in cache using a given user handle
// accId contains the Id of the account if found
// err contains an error describing why an account's id couldn't be found
// The most common one should be errCacheNotFound
func (s *Storage) cacheHandleToAccUid(handle string) (accId *string, err error) {
// Where to put the data (in case it's found)
var target string
found, err := s.cache.Get(cacheUserHandleToIdPrefix+strings.TrimLeft(handle, "@"), &target)
// If nothing was found, check error
if !found {
// Case error is set and NOT redis' error for nothing found: Return that error
if err != nil && !errors.Is(err, redis.Nil) {
return nil, err
} else {
// Else return errCacheNotFound
return nil, errCacheNotFound
}
}
return &target, nil
}
// Find an account's data in cache using a given account id
// acc contains the full account as stored last time if found
// err contains an error describing why an account couldn't be found
// The most common one should be errCacheNotFound
func (s *Storage) cacheAccIdToData(id string) (acc *Account, err error) {
var target Account
found, err := s.cache.Get(cacheUserIdToAccPrefix+id, &target)
if !found {
if err != nil && !errors.Is(err, redis.Nil) {
return nil, err
} else {
return nil, errCacheNotFound
}
}
return &target, nil
}
// Find a cached note given its ID
// note contains the full note as stored last time if found
// err contains an error describing why a note couldn't be found
// The most common one should be errCacheNotFound
func (s *Storage) cacheNoteIdToData(id string) (note *Note, err error) {
target := Note{}
found, err := s.cache.Get(cacheNoteIdToNotePrefix+id, &target)
if !found {
if err != nil && !errors.Is(err, redis.Nil) {
return nil, err
} else {
return nil, errCacheNotFound
}
}
return &target, nil
}

90
storage/cache/cache.go vendored Normal file
View file

@ -0,0 +1,90 @@
package cache
import (
"context"
"fmt"
"time"
"github.com/dgraph-io/ristretto"
"github.com/eko/gocache/lib/v4/cache"
"github.com/eko/gocache/lib/v4/store"
redis_store "github.com/eko/gocache/store/redis/v4"
ristretto_store "github.com/eko/gocache/store/ristretto/v4"
"github.com/redis/go-redis/v9"
"github.com/rs/zerolog/log"
)
type Cache struct {
cache *cache.ChainCache[[]byte]
decoders *DecoderPool
encoders *EncoderPool
}
var ctxBackground = context.Background()
// TODO: Maybe also include metrics
func NewCache(maxSize int64, redisUrl *string) (*Cache, error) {
// ristretto is an in-memory cache
log.Debug().Int64("max-size", maxSize).Msg("Setting up ristretto")
ristrettoCache, err := ristretto.NewCache(&ristretto.Config{
// The *10 is a recommendation from ristretto
NumCounters: maxSize * 10,
MaxCost: maxSize,
BufferItems: 64, // Same here
})
if err != nil {
return nil, fmt.Errorf("ristretto cache error: %w", err)
}
ristrettoStore := ristretto_store.NewRistretto(
ristrettoCache,
store.WithExpiration(time.Second*10),
)
var cacheManager *cache.ChainCache[[]byte]
if redisUrl != nil {
opts, err := redis.ParseURL(*redisUrl)
if err != nil {
return nil, err
}
redisClient := redis.NewClient(opts)
redisStore := redis_store.NewRedis(redisClient, store.WithExpiration(time.Minute))
cacheManager = cache.NewChain(
cache.New[[]byte](ristrettoStore),
cache.New[[]byte](redisStore),
)
} else {
cacheManager = cache.NewChain(cache.New[[]byte](ristrettoStore))
}
return &Cache{
cache: cacheManager,
decoders: NewDecoderPool(),
encoders: NewEncoderPool(),
}, nil
}
func (c *Cache) Get(key string, target any) (bool, error) {
data, err := c.cache.Get(ctxBackground, key)
if err != nil {
return false, err
}
err = c.decoders.Decode(data, target)
if err != nil {
return false, err
}
return true, err
}
func (c *Cache) Set(key string, value any) error {
data, err := c.encoders.Encode(value)
if err != nil {
return err
}
err = c.cache.Set(ctxBackground, key, data, nil)
return err
}
func (c *Cache) Delete(key string) {
// Error return doesn't matter here. Delete is delete is gone
_ = c.cache.Delete(ctxBackground, key)
}

164
storage/cache/coderPools.go vendored Normal file
View file

@ -0,0 +1,164 @@
package cache
import (
"io"
"sync"
)
type EncoderPool struct {
encoders []*gobEncoder
lock sync.RWMutex
}
func NewEncoderPool() *EncoderPool {
return &EncoderPool{
encoders: []*gobEncoder{},
lock: sync.RWMutex{},
}
}
// Encode some value with gob
func (p *EncoderPool) Encode(raw any) ([]byte, error) {
var encoder *gobEncoder
// First try to find an available encoder
// Read only lock should be fine here since locks are atomic i
//and thus no two goroutines should be able to lock the same encoder at the same time
// One of those attempts is going to fail and continue looking for another available one
p.lock.RLock()
for _, v := range p.encoders {
// If we can lock one, it's available
if v.TryLock() {
// Keep the reference, then break
encoder = v
break
}
}
p.lock.RUnlock()
// Didn't find an available encoder, create new one and add to pool
if encoder == nil {
encoder = p.expand()
}
// Ensure we free the encoder at the end
defer encoder.Unlock()
// Clear the buffer to avoid funky output from previous operations
encoder.Buffer.Reset()
if err := encoder.Encoder.Encode(raw); err != nil {
return nil, err
}
data, err := io.ReadAll(encoder.Buffer)
if err != nil {
return nil, err
}
return data, nil
}
// Expands the pool of available encoders by one and returns a reference to the new one
// The new encoder is already locked and ready for use
func (p *EncoderPool) expand() *gobEncoder {
enc := newEncoder()
// Lock everything. First the pool fully since we need to overwrite the encoders slice
p.lock.Lock()
// And then the new encoder to make it available for use by the caller
// so that they don't have to search for it again
enc.Lock()
p.encoders = append(p.encoders, &enc)
p.lock.Unlock()
return &enc
}
// Prune all encoders not currently used from the pool
func (p *EncoderPool) Prune() {
stillActiveEncoders := []*gobEncoder{}
p.lock.Lock()
for _, v := range p.encoders {
if !v.TryLock() {
// Can't lock, encoder in use, keep it
stillActiveEncoders = append(stillActiveEncoders, v)
continue
}
// If we reach here, the encoder was available (since not locked), unlock and continue
v.Unlock()
}
// Overwrite list of available encoders to only contain the ones we found to still be active
p.encoders = stillActiveEncoders
p.lock.Unlock()
}
type DecoderPool struct {
encoders []*gobDecoder
lock sync.RWMutex
}
func NewDecoderPool() *DecoderPool {
return &DecoderPool{
encoders: []*gobDecoder{},
lock: sync.RWMutex{},
}
}
// Decode some value with gob
func (p *DecoderPool) Decode(raw []byte, target any) error {
var encoder *gobDecoder
// First try to find an available encoder
// Read only lock should be fine here since locks are atomic i
//and thus no two goroutines should be able to lock the same encoder at the same time
// One of those attempts is going to fail and continue looking for another available one
p.lock.RLock()
for _, v := range p.encoders {
// If we can lock one, it's available
if v.TryLock() {
// Keep the reference, then break
encoder = v
break
}
}
p.lock.RUnlock()
// Didn't find an available encoder, create new one and add to pool
if encoder == nil {
encoder = p.expand()
}
// Desure we free the encoder at the end
defer encoder.Unlock()
// Clear the buffer to avoid funky output from previous operations
encoder.Buffer.Reset()
// Write the raw data to the buffer, then decode it
// The write will always succeed (or panic)
_, _ = encoder.Buffer.Write(raw)
err := encoder.Decoder.Decode(target)
if err != nil {
return err
}
return nil
}
// Expands the pool of available encoders by one and returns a reference to the new one
// The new encoder is already locked and ready for use
func (p *DecoderPool) expand() *gobDecoder {
enc := newDecoder()
// Lock everything. First the pool fully since we need to overwrite the encoders slice
p.lock.Lock()
// And then the new encoder to make it available for use by the caller
// so that they don't have to search for it again
enc.Lock()
p.encoders = append(p.encoders, &enc)
p.lock.Unlock()
return &enc
}
// Prune all encoders not currently used from the pool
func (p *DecoderPool) Prune() {
stillActiveDecoders := []*gobDecoder{}
p.lock.Lock()
for _, v := range p.encoders {
if !v.TryLock() {
// Can't lock, encoder in use, keep it
stillActiveDecoders = append(stillActiveDecoders, v)
continue
}
// If we reach here, the encoder was available (since not locked), unlock and continue
v.Unlock()
}
// Overwrite list of available encoders to only contain the ones we found to still be active
p.encoders = stillActiveDecoders
p.lock.Unlock()
}

35
storage/cache/lockedCoders.go vendored Normal file
View file

@ -0,0 +1,35 @@
package cache
import (
"bytes"
"encoding/gob"
"sync"
)
type gobEncoder struct {
sync.Mutex
Encoder *gob.Encoder
Buffer *bytes.Buffer
}
func newEncoder() gobEncoder {
buf := bytes.Buffer{}
return gobEncoder{
Encoder: gob.NewEncoder(&buf),
Buffer: &buf,
}
}
type gobDecoder struct {
sync.Mutex
Decoder *gob.Decoder
Buffer *bytes.Buffer
}
func newDecoder() gobDecoder {
buf := bytes.Buffer{}
return gobDecoder{
Decoder: gob.NewDecoder(&buf),
Buffer: &buf,
}
}

11
storage/errors.go Normal file
View file

@ -0,0 +1,11 @@
package storage
import "errors"
type ErrNotImplemented struct{}
func (n ErrNotImplemented) Error() string {
return "Not implemented yet"
}
var ErrEntryNotFound = errors.New("entry not found")

6
storage/housekeeping.go Normal file
View file

@ -0,0 +1,6 @@
package storage
// Contains various functions for housekeeping
// Things like true deletion of soft deleted data after some time
// Or removing inactive access tokens
// All of this will be handled by goroutines

View file

@ -6,29 +6,72 @@ import (
"gorm.io/gorm"
)
type MediaFile struct {
ID string `gorm:"primarykey"`
CreatedAt time.Time
UpdatedAt time.Time
// MediaMetadata contains metadata about some media
// Metadata includes whether it's a remote file or not, what the name is,
// the MIME type, and an identifier pointing to its location
type MediaMetadata struct {
ID string `gorm:"primarykey"` // The unique ID of this media file
CreatedAt time.Time // When this entry was created
UpdatedAt time.Time // When this entry was last updated
// When this entry was deleted (for soft deletions)
// Soft delete means that this entry still exists in the db, but gorm won't include it anymore unless specifically told to
// If not null, this entry is marked as deleted
DeletedAt gorm.DeletedAt `gorm:"index"`
Remote bool // whether the attachment is a remote one
Link string // url if remote attachment, identifier if local
Type string // What media type this is, eg image/png
// Whether this media has been cached locally
// Only really used for user and server icons, not attachments
// If true, Link will be read as file path. url otherwise
// Reason: Attachments would take way to much space considering that they are often only loaded a few times at most
// And caching a file for those few times would be a waste of storage
// Caching user and server icons locally however should reduce burden on remote servers by quite a bit though
LocallyCached bool
Sensitive bool // Whether the media is marked as sensitive. If so, hide it in the UI by default
// Where the media is stored. Url if remote file,
Location string
Type string // What media type this is following mime types, eg image/png
// Descriptive name for a media file
// Emote name for example or servername.filetype for a server's icon
Name string
}
// Placeholder media file. Acts as placeholder for media file fields that have not been initialised yet but need a value
var placeholderMediaFile = &MediaFile{
ID: "placeholder",
Remote: false,
Link: "placeholder", // TODO: Replace this with a file path to a staticly included image
Type: "image/png",
LocallyCached: true,
func (s *Storage) NewMediaMetadata(location, mediaType, name string) (*MediaMetadata, error) {
newMedia := MediaMetadata{
Location: location,
Name: name,
Type: mediaType,
}
s.db.Create(&newMedia)
return nil, nil
}
func (s *Storage) FuzzyFindMediaMetadataByName(name string) ([]MediaMetadata, error) {
notes := []MediaMetadata{}
err := s.db.Where("name LIKE %?%", name).Find(notes).Error
if err != nil {
return nil, err
}
return notes, nil
}
func (s *Storage) GetMediaMetadataById(id string) (*MediaMetadata, error) {
media := MediaMetadata{ID: id}
err := s.db.First(&media).Error
if err != nil {
return nil, err
}
return &media, nil
}
func (s *Storage) FuzzyFindMediaMetadataByLocation(location string) ([]MediaMetadata, error) {
data := []MediaMetadata{}
if err := s.db.Where("location LIKE %?%", location).Find(data).Error; err != nil {
return nil, err
}
return data, nil
}
func (s *Storage) DeleteMediaMetadataById(id string) error {
return s.db.Delete(MediaMetadata{ID: id}).Error
}
func (s *Storage) DeleteMediaMetadataByFuzzyLocation(location string) error {
var tmp MediaMetadata
return s.db.Where("location LIKE %?%", location).Delete(&tmp).Error
}
func (s *Storage) DeleteMediaMetadataByFuzzyName(name string) error {
var tmp MediaMetadata
return s.db.Where("name LIKE %?%", name).Delete(&tmp).Error
}

View file

@ -0,0 +1,3 @@
package mediaprovider
// TODO: Implement me

View file

@ -5,19 +5,32 @@ import (
"errors"
)
// For pretty printing during debug
// If `go generate` is run, it'll generate the necessary function and data for pretty printing
//go:generate stringer -type NoteTarget
// What feed a note is targeting (public, home, followers or dm)
type NoteTarget uint8
const (
// The note is intended for the public
NOTE_TARGET_PUBLIC = NoteTarget(0)
NOTE_TARGET_HOME = NoteTarget(1 << iota)
// The note is intended only for the home screen
// not really any idea what the difference is compared to public
// Maybe home notes don't show up on the server feed but still for everyone's home feed if it reaches them via follow or boost
NOTE_TARGET_HOME = NoteTarget(1 << iota)
// The note is intended only for followers
NOTE_TARGET_FOLLOWERS
// The note is intended only for a DM to one or more targets
NOTE_TARGET_DM
)
// Converts the NoteTarget value into a type the DB can use
func (n *NoteTarget) Value() (driver.Value, error) {
return n, nil
}
// Converts the raw value from the DB into a NoteTarget
func (n *NoteTarget) Scan(value any) error {
vBig, ok := value.(int64)
if !ok {

View file

@ -1,17 +1,28 @@
package storage
import (
"fmt"
"time"
"github.com/rs/zerolog/log"
"gorm.io/gorm"
)
// Note represents an ActivityPub note
// ActivityPub notes can be quite a few things, depending on fields provided.
// A survey, a reply, a quote of another note, etc
// And depending on the origin server of a note, they are treated differently
// with for example rendering or available actions
// This struct attempts to contain all information necessary for easily working with a note
type Note struct {
ID string `gorm:"primarykey"` // Make ID a string (uuid) for other implementations
CreatedAt time.Time
UpdatedAt time.Time
ID string `gorm:"primarykey"` // Make ID a string (uuid) for other implementations
CreatedAt time.Time // When this entry was created
UpdatedAt time.Time // When this entry was last updated
// When this entry was deleted (for soft deletions)
// Soft delete means that this entry still exists in the db, but gorm won't include it anymore unless specifically told to
// If not null, this entry is marked as deleted
DeletedAt gorm.DeletedAt `gorm:"index"`
Creator string // Full handle of the creator, eg: @max@example.com
Creator string // Id of the author in the db, not the handle
Remote bool // Whether the note is originally a remote one and just "cached"
// Raw content of the note. So without additional formatting applied
// Might already have formatting applied beforehand from the origin server
@ -27,20 +38,88 @@ type Note struct {
Tags []string `gorm:"serializer:json"` // Hashtags
}
var placeholderNote = &Note{
ID: "placeholder",
Creator: "placeholder",
Remote: false,
RawContent: "placeholder",
ContentWarning: nil,
Attachments: []string{},
Emotes: []string{},
RepliesTo: nil,
Quotes: nil,
Target: NOTE_TARGET_HOME,
Pings: []string{},
OriginServer: "placeholder",
Tags: []string{},
func (s *Storage) FindNoteById(id string) (*Note, error) {
note := &Note{}
cacheNote, err := s.cacheNoteIdToData(id)
switch err {
case nil:
return cacheNote, nil
// Empty case, not found in cache means check db
case errCacheNotFound:
default:
return nil, err
}
switch err {
}
err = s.db.Find(note, id).Error
switch err {
case nil:
if err = s.cache.Set(cacheNoteIdToNotePrefix+id, note); err != nil {
log.Warn().Err(err).Str("note-id", id).Msg("Failed to place note in cache")
}
return note, nil
case gorm.ErrRecordNotFound:
return nil, ErrEntryNotFound
default:
return nil, err
}
}
func (s *Storage) FindNotesByFuzzyContent(fuzzyContent string) ([]Note, error) {
notes := []Note{}
// TODO: Figure out if cache can be used here too
err := s.db.Where("raw_content LIKE %?%", fuzzyContent).Find(notes).Error
if err != nil {
return nil, err
}
return notes, nil
}
func (s *Storage) FindNotesByAuthorHandle(handle string) ([]Note, error) {
acc, err := s.FindAccountByFullHandle(handle)
if err != nil {
return nil, fmt.Errorf("account with handle %s not found: %w", handle, err)
}
return s.FindNotesByAuthorId(acc.ID)
}
func (s *Storage) FindNotesByAuthorId(id string) ([]Note, error) {
notes := []Note{}
err := s.db.Where("creator = ?", id).Find(notes).Error
switch err {
case nil:
return notes, nil
case gorm.ErrRecordNotFound:
return nil, ErrEntryNotFound
default:
return nil, err
}
}
func (s *Storage) UpdateNote(note *Note) error {
if note == nil || note.ID == "" {
return ErrInvalidData
}
err := s.db.Save(note).Error
if err != nil {
return err
}
err = s.cache.Set(cacheNoteIdToNotePrefix+note.ID, note)
if err != nil {
log.Warn().Err(err).Msg("Failed to update note into cache. Cache and db might be out of sync, a force sync is recommended")
}
return nil
}
func (s *Storage) CreateNote() (*Note, error) {
// TODO: Think of good arguments and implement me
panic("not implemented")
}
func (s *Storage) DeleteNote(id string) {
s.cache.Delete(cacheNoteIdToNotePrefix + id)
s.db.Delete(Note{ID: id})
}
// Try and find a note with a given ID

View file

@ -0,0 +1,54 @@
package storage
import (
"github.com/go-webauthn/webauthn/webauthn"
"github.com/google/uuid"
"github.com/rs/zerolog/log"
)
// Session data used during login attempts with a passkey
// Not actually used afterwards to verify a normal session
// NOTE: Doesn't contain a DeletedAt field, thus deletions are automatically hard and not reversible
type PasskeySession struct {
ID string `gorm:"primarykey"`
Data webauthn.SessionData `gorm:"serializer:json"`
}
// ---- Section SessionStore
// Generate some id for a new session. Just returns a new uuid
func (s *Storage) GenSessionID() (string, error) {
x := uuid.NewString()
log.Debug().Str("session-id", x).Msg("Generated new passkey session id")
return x, nil
}
// Look for an active session with a given id
// Returns the session if found and a bool indicating if a session was found
func (s *Storage) GetSession(sessionId string) (*webauthn.SessionData, bool) {
log.Debug().Str("id", sessionId).Msg("Looking for passkey session")
session := PasskeySession{}
res := s.db.Where("id = ?", sessionId).First(&session)
if res.Error != nil {
return nil, false
}
log.Debug().Str("id", sessionId).Any("webauthn-data", &session).Msg("Found passkey session")
return &session.Data, true
}
// Save (or update) a session with the new data
func (s *Storage) SaveSession(token string, data *webauthn.SessionData) {
log.Debug().Str("id", token).Any("webauthn-data", data).Msg("Saving passkey session")
session := PasskeySession{
ID: token,
Data: *data,
}
s.db.Save(&session)
}
// Delete a session
// NOTE: This is a hard delete since the session struct contains no DeletedAt field
func (s *Storage) DeleteSession(token string) {
log.Debug().Str("id", token).Msg("Deleting passkey session (if one exists)")
s.db.Delete(&PasskeySession{ID: token})
}

View file

@ -7,9 +7,12 @@ import (
)
type RemoteServer struct {
ID string `gorm:"primarykey"` // ID is also server url
CreatedAt time.Time
UpdatedAt time.Time
ID string `gorm:"primarykey"` // ID is also server url
CreatedAt time.Time // When this entry was created
UpdatedAt time.Time // When this entry was last updated
// When this entry was deleted (for soft deletions)
// Soft delete means that this entry still exists in the db, but gorm won't include it anymore unless specifically told to
// If not null, this entry is marked as deleted
DeletedAt gorm.DeletedAt `gorm:"index"`
ServerType RemoteServerType // What software the server is running. Useful for formatting
Name string // What the server wants to be known as (usually same as url)
@ -17,10 +20,31 @@ type RemoteServer struct {
IsSelf bool // Whether this server is yours truly
}
var placeholderServer = &RemoteServer{
ID: "placeholder",
ServerType: REMOTE_SERVER_LINSTROM,
Name: "placeholder",
Icon: "placeholder",
IsSelf: false,
func (s *Storage) FindRemoteServer(url string) (*RemoteServer, error) {
// TODO: Implement me
panic("not implemented")
}
// Find a remote server with a given display name
func (s *Storage) FindRemoteServerByDisplayName(displayName string) (*RemoteServer, error) {
// TODO: Implement me
panic("not implemented")
}
// Create a new remote server
func (s *Storage) NewRemoteServer(
url, displayName, icon string,
serverType RemoteServerType,
) (*RemoteServer, error) {
// TODO: Implement me
panic("not implemented")
}
// Update a remote server with the given url
// If displayName is set, update that
// If icon is set, update that
// Returns the updated version
func (s *Storage) UpdateRemoteServer(url string, displayName, icon *string) (*RemoteServer, error) {
// TODO: Implement me
panic("not implemented")
}

7
storage/remoteUser.go Normal file
View file

@ -0,0 +1,7 @@
package storage
// TODO: More helper stuff
func (s *Storage) NewRemoteUser(fullHandle string) (*Account, error) {
return nil, nil
}

11
storage/roles.go Normal file
View file

@ -0,0 +1,11 @@
package storage
type Role struct {
// Name of the role
Name string
// If set, counts as all permissions being set and all restrictions being disabled
FullAdmin bool
// TODO: More control options
// Extend upon whatever Masto, Akkoma and Misskey have
// Lots of details please
}

View file

@ -5,6 +5,10 @@ import (
"errors"
)
// TODO: Decide whether to turn this into an int too to save resources
// And then use go:generate instead for pretty printing
// What software a server is running
// Mostly important for rendering
type RemoteServerType string

View file

@ -1,3 +1,8 @@
// TODO: Unify function names
// Storage is the handler for cache and db access
// It handles storing various data in the database as well as caching that data
// Said data includes notes, accounts, metadata about media files, servers and similar
package storage
import (
@ -5,6 +10,7 @@ import (
"fmt"
"github.com/glebarez/sqlite"
"gitlab.com/mstarongitlab/linstrom/storage/cache"
"gorm.io/driver/postgres"
"gorm.io/gorm"
)
@ -12,69 +18,52 @@ import (
// Storage is responsible for all database, cache and media related actions
// and serves as the lowest layer of the cake
type Storage struct {
db *gorm.DB
db *gorm.DB
cache *cache.Cache
}
var ErrAccountNotFound = errors.New("account not found")
var ErrInvalidData = errors.New("invalid data")
// Build a new storage using sqlite as database backend
func NewStorageSqlite(filePath string) (*Storage, error) {
func NewStorageSqlite(filePath string, cache *cache.Cache) (*Storage, error) {
db, err := gorm.Open(sqlite.Open(filePath))
if err != nil {
return nil, err
}
return storageFromEmptyDb(db)
return storageFromEmptyDb(db, cache)
}
func NewStoragePostgres(dbUrl string) (*Storage, error) {
func NewStoragePostgres(dbUrl string, cache *cache.Cache) (*Storage, error) {
db, err := gorm.Open(postgres.Open(dbUrl))
if err != nil {
return nil, err
}
return storageFromEmptyDb(db)
return storageFromEmptyDb(db, cache)
}
func storageFromEmptyDb(db *gorm.DB) (*Storage, error) {
func storageFromEmptyDb(db *gorm.DB, cache *cache.Cache) (*Storage, error) {
// AutoMigrate ensures the db is in a state where all the structs given here
// have their own tables and relations setup. It also updates tables if necessary
if err := db.AutoMigrate(
placeholderMediaFile,
placeholderUser,
placeholderNote,
placeholderServer,
); err != nil {
return nil, fmt.Errorf("problem while auto migrating: %w", err)
}
// Afterwards add the placeholder entries for each table.
// FirstOrCreate either creates a new entry or retrieves the first matching one
// We only care about the creation if there is none yet, so no need to carry the result over
if res := db.FirstOrCreate(placeholderMediaFile); res.Error != nil {
return nil, fmt.Errorf("failed to add placeholder media file: %w", res.Error)
}
if res := db.FirstOrCreate(placeholderUser()); res.Error != nil {
return nil, fmt.Errorf("failed to add placeholder media file: %w", res.Error)
}
if res := db.FirstOrCreate(placeholderNote); res.Error != nil {
return nil, fmt.Errorf("failed to add placeholder media file: %w", res.Error)
}
if res := db.FirstOrCreate(placeholderServer); res.Error != nil {
return nil, fmt.Errorf("failed to add placeholder media file: %w", res.Error)
err := db.AutoMigrate(
MediaMetadata{},
Account{},
RemoteServer{},
Note{},
Role{},
PasskeySession{},
)
if err != nil {
return nil, err
}
// And finally, build the actual storage struct
return &Storage{
db: db,
db: db,
cache: cache,
}, nil
}
func (s *Storage) FindLocalAccount(handle string) (*User, error) {
acc := User{}
res := s.db.Where("handle = ?", handle).First(&acc)
if res.Error != nil {
return nil, fmt.Errorf("failed to query db: %w", res.Error)
}
if res.RowsAffected == 0 {
return nil, ErrAccountNotFound
}
return nil, errors.New("unimplemented")
// TODO: Placeholder. Update to proper implementation later. Including signature
func (s *Storage) FindLocalAccount(handle string) (string, error) {
return handle, nil
}

View file

@ -1,24 +1,34 @@
package storage
import (
"crypto/rand"
"errors"
"fmt"
"strings"
"time"
"github.com/go-webauthn/webauthn/webauthn"
"github.com/google/uuid"
"github.com/mstarongithub/passkey"
"github.com/rs/zerolog/log"
"gitlab.com/mstarongitlab/linstrom/ap"
"gitlab.com/mstarongitlab/linstrom/config"
"gorm.io/gorm"
)
// Database representation of a user account
// This can be a bot, remote or not
// If remote, this is used for caching the account
type User struct {
ID string `gorm:"primarykey"` // ID is a uuid for this account
Handle string // Handle is the full handle, eg @max@example.com
CreatedAt time.Time // When this entry was created
UpdatedAt time.Time // When this account was last updated. Will also be used for refreshing remote accounts
type Account struct {
ID string `gorm:"primarykey"` // ID is a uuid for this account
// Handle of the user (eg "max" if the full username is @max@example.com)
// Assume unchangable (once set by a user) to be kind to other implementations
// Would be an easy avenue to fuck with them though
Handle string
CreatedAt time.Time // When this entry was created. Automatically set by gorm
// When this account was last updated. Will also be used for refreshing remote accounts. Automatically set by gorm
UpdatedAt time.Time
// When this entry was deleted (for soft deletions)
// Soft delete means that this entry still exists in the db, but gorm won't include it anymore unless specifically told to
// If not null, this entry is marked as deleted
DeletedAt gorm.DeletedAt `gorm:"index"`
Remote bool // Whether the account is a local or remote one
Server string // The url of the server this account is from
@ -39,110 +49,292 @@ type User struct {
RestrictedFollow bool
// List of things the owner identifies as
// Example [cat human robot] means that the owner probably identifies as
// a cyborg-catgirl/boy/human
IdentifiesAs []Being
// a cyborg-catgirl/boy/human or a cathuman shaped robot, refer to Gender for pronouns
IdentifiesAs []Being `gorm:"serializer:json"`
// List of pronouns the owner identifies with
// An unordered list since the owner can freely set it
// Examples: [she her], [it they its them]
Gender []string
Gender []string `gorm:"serializer:json"`
// The roles assocciated with an account
Roles []string `gorm:"serializer:json"`
// --- And internal account stuff ---
// Still public fields since they wouldn't be able to be stored in the db otherwise
PasswordHash []byte // Hash of the user's password
TotpToken []byte // Token for totp verification
// All the registered passkeys, name of passkey to credentials
// Could this be exported to another table? Yes
// Would it make sense? Probably not
// Will just take the performance hit of json conversion
// Access should be rare enough anyway
Passkeys map[string]webauthn.Credential `gorm:"serializer:json"`
PrivateKeyPem *string // The private key of the account. Nil if remote user
// Restrictions applied to the account
// Flag value, can be multiple
Restrictions AccountRestriction
PrivateKeyPem *string // The private key of the account. Nil if remote user
WebAuthnId []byte // The unique and random ID of this account used for passkey authentication
// Whether the account got verified and is allowed to be active
// For local accounts being active means being allowed to login and perform interactions
// For remote users, if an account is not verified, any interactions it sends are discarded
Verified bool
PasskeyCredentials []webauthn.Credential `gorm:"serializer:json"` // Webauthn credentials data
// Has a RemoteAccountLinks included if remote user
RemoteLinks *RemoteAccountLinks
}
var placeholderUser = &User{
ID: "placeholder",
Handle: "placeholder",
Remote: false,
Server: "placeholder",
DisplayName: "placeholder",
CustomFields: []uint{},
Description: "placeholder",
Tags: []string{},
IsBot: true,
Follows: []string{},
Followers: []string{},
Icon: "placeholder",
Background: "placeholder",
Banner: "placeholder",
Indexable: false,
PublicKeyPem: nil,
RestrictedFollow: false,
IdentifiesAs: []Being{BEING_ROBOT},
Gender: []string{"it", "its"},
PasswordHash: []byte("placeholder"),
TotpToken: []byte("placeholder"),
Passkeys: map[string]webauthn.Credential{},
PrivateKeyPem: nil,
// Contains static and cached info about a remote account, mostly links
type RemoteAccountLinks struct {
// ---- Section: gorm
// Sets this struct up as a value that an Account may have
gorm.Model
AccountID string
// Just about every link here is optional to accomodate for servers with only minimal accounts
// Minimal being handle, ap link and inbox
ApLink string
ViewLink *string
FollowersLink *string
FollowingLink *string
InboxLink string
OutboxLink *string
FeaturedLink *string
FeaturedTagsLink *string
}
func NewEmptyUser() *User {
return &User{
ID: uuid.NewString(),
Handle: "placeholder",
Remote: false,
Server: "placeholder",
DisplayName: "placeholder",
CustomFields: []uint{},
Description: "placeholder",
Tags: []string{},
IsBot: true,
Follows: []string{},
Followers: []string{},
Icon: "placeholder",
Background: "placeholder",
Banner: "placeholder",
Indexable: false,
PublicKeyPem: nil,
RestrictedFollow: false,
IdentifiesAs: []Being{BEING_ROBOT},
Gender: []string{"it", "its"},
PasswordHash: []byte("placeholder"),
TotpToken: []byte("placeholder"),
Passkeys: map[string]webauthn.Credential{},
PrivateKeyPem: nil,
// Find an account in the db using a given full handle (@max@example.com)
// Returns an account and nil if an account is found, otherwise nil and the error
func (s *Storage) FindAccountByFullHandle(handle string) (*Account, error) {
log.Trace().Caller().Send()
log.Debug().Str("account-handle", handle).Msg("Looking for account by handle")
log.Debug().Str("account-handle", handle).Msg("Checking if there's a cache hit")
// Try and find the account in cache first
cacheAccId, err := s.cacheHandleToAccUid(handle)
if err == nil {
log.Info().Str("account-handle", handle).Msg("Hit account handle in cache")
// Then always load via id since unique key access should be faster than string matching
return s.FindAccountById(*cacheAccId)
} else {
if !errors.Is(err, errCacheNotFound) {
log.Error().Err(err).Str("account-handle", handle).Msg("Problem while checking cache for account")
return nil, err
}
}
}
// Get a stored user by the ID the user has been stored with
// Either returns a valid user and nil for the error
// Or nil for the user and an error
func (s *Storage) GetUserByID(id string) (*User, error) {
user := User{}
res := s.db.First(&user, "id = ?", id)
// Check if any error except NotFound occured
// If so, wrap it a little
if res.Error != nil && !errors.Is(res.Error, gorm.ErrRecordNotFound) {
return nil, fmt.Errorf("problem while getting user from db: %w", res.Error)
// Failed to find in cache, go the slow route of hitting the db
log.Debug().Str("account-handle", handle).Msg("Didn't hit account in cache, going to db")
name, server, err := ap.SplitFullHandle(handle)
if err != nil {
log.Warn().Err(err).Str("account-handle", handle).Msg("Failed to split up account handle")
return nil, err
}
// Then check if an error occured and said error is NotFound
// If it is, just pass it forward
if res.Error != nil && errors.Is(res.Error, gorm.ErrRecordNotFound) {
acc := Account{}
res := s.db.Where("name = ?", name).Where("server = ?", server).First(&acc)
if res.Error != nil {
if errors.Is(res.Error, gorm.ErrRecordNotFound) {
log.Info().Str("account-handle", handle).Msg("Account with handle not found")
} else {
log.Error().Err(err).Str("account-handle", handle).Msg("Failed to get account with handle")
}
return nil, res.Error
}
return &user, nil
log.Info().Str("account-handle", handle).Msg("Found account, also inserting into cache")
if err = s.cache.Set(cacheUserIdToAccPrefix+acc.ID, &acc); err != nil {
log.Warn().
Err(err).
Str("account-handle", handle).
Msg("Found account but failed to insert into cache")
}
if err = s.cache.Set(cacheUserHandleToIdPrefix+strings.TrimLeft(handle, "@"), acc.ID); err != nil {
log.Warn().
Err(err).
Str("account-handle", handle).
Msg("Failed to store handle to id in cache")
}
return &acc, nil
}
// Get only the name part of the handle a user has
func (u *User) GetHandleNameOnly() string {
// First remove the leading @ (TrimPrefix) to achieve a format of username@server
// Then split at the @ to the server and user seperately
// And return the first element since that is the username
// Note: Getting the first element will always be safe
// since trim returns a string guaranteed (empty is ok)
// and if Split doesn't do anything (eg empty string) it just returns the input in the first elemen it just returns the input in the first element
return strings.Split(strings.TrimPrefix(u.Handle, "@"), "@")[0]
// Find an account given a specific ID
func (s *Storage) FindAccountById(id string) (*Account, error) {
log.Trace().Caller().Send()
log.Debug().Str("account-id", id).Msg("Looking for account by id")
log.Debug().Str("account-id", id).Msg("First trying to hit cache")
acc, err := s.cacheAccIdToData(id)
if err == nil {
log.Info().Str("account-id", id).Msg("Found account in cache")
return acc, nil
} else if !errors.Is(err, errCacheNotFound) {
log.Error().Err(err).Str("account-id", id).Msg("Error while looking for account in cache")
return nil, err
}
log.Debug().Str("account-id", id).Msg("Didn't hit account in cache, checking db")
res := s.db.First(acc, id)
if res.Error != nil {
if errors.Is(res.Error, gorm.ErrRecordNotFound) {
log.Warn().Str("account-id", id).Msg("Account not found")
} else {
log.Error().Err(res.Error).Str("account-id", id).Msg("Failed to look for account")
}
return nil, res.Error
}
log.Info().Str("account-id", id).Msg("Found account in db, also adding to cache")
if err = s.cache.Set(cacheUserIdToAccPrefix+id, acc); err != nil {
log.Warn().Err(err).Str("account-id", id).Msg("Failed to add account to cache")
}
return acc, nil
}
// Update a given account in storage and cache
func (s *Storage) UpdateAccount(acc *Account) error {
// If the account is nil or doesn't have an id, error out
if acc == nil || acc.ID == "" {
return ErrInvalidData
}
res := s.db.Save(acc)
if res.Error != nil {
return res.Error
}
if err := s.cache.Set(cacheUserIdToAccPrefix+acc.ID, acc); err != nil {
return err
}
return nil
}
// Create a new empty account for future use
func (s *Storage) NewEmptyAccount() (*Account, error) {
log.Trace().Caller().Send()
log.Debug().Msg("Creating new empty account")
acc := Account{}
// Generate the 64 bit id for passkey and webauthn stuff
data := make([]byte, 64)
c, err := rand.Read(data)
for err != nil || c != len(data) || c < 64 {
data = make([]byte, 64)
c, err = rand.Read(data)
}
acc.WebAuthnId = data
acc.Followers = []string{}
acc.Tags = []string{}
acc.Follows = []string{}
acc.Gender = []string{}
acc.CustomFields = []uint{}
acc.IdentifiesAs = []Being{}
acc.PasskeyCredentials = []webauthn.Credential{}
res := s.db.Save(acc)
if res.Error != nil {
log.Error().Err(res.Error).Msg("Failed to safe new account")
return nil, res.Error
}
log.Info().Str("account-id", acc.ID).Msg("Created new account")
return &acc, nil
}
// Create a new local account using the given handle
// The handle in this case is only the part before the domain (example: @bob@example.com would have a handle of bob)
// It also sets up a bunch of values that tend to be obvious for local accounts
func (s *Storage) NewLocalAccount(handle string) (*Account, error) {
log.Trace().Caller().Send()
log.Debug().Str("account-handle", handle).Msg("Creating new local account")
acc, err := s.NewEmptyAccount()
if err != nil {
log.Error().Err(err).Msg("Failed to create empty account for use")
return nil, err
}
acc.Handle = handle
acc.Server = config.GlobalConfig.General.GetFullDomain()
acc.Remote = false
acc.DisplayName = handle
log.Debug().
Str("account-handle", handle).
Str("account-id", acc.ID).
Msg("Saving new local account")
res := s.db.Save(acc)
if res.Error != nil {
log.Error().Err(res.Error).Any("account-full", acc).Msg("Failed to save local account")
return nil, res.Error
}
log.Info().
Str("account-handle", handle).
Str("account-id", acc.ID).
Msg("Created new local account")
return acc, nil
}
// ---- Section WebAuthn.User
// Implements the webauthn.User interface for interaction with passkeys
func (a *Account) WebAuthnID() []byte {
log.Trace().Caller().Send()
return a.WebAuthnId
}
func (u *Account) WebAuthnName() string {
log.Trace().Caller().Send()
return u.Handle
}
func (u *Account) WebAuthnDisplayName() string {
log.Trace().Caller().Send()
return u.DisplayName
}
func (u *Account) WebAuthnCredentials() []webauthn.Credential {
log.Trace().Caller().Send()
return u.PasskeyCredentials
}
func (u *Account) WebAuthnIcon() string {
log.Trace().Caller().Send()
return ""
}
// ---- Section passkey.User
// Implements the passkey.User interface
func (u *Account) PutCredential(new webauthn.Credential) {
log.Trace().Caller().Send()
u.PasskeyCredentials = append(u.PasskeyCredentials, new)
}
// Section passkey.UserStore
// Implements the passkey.UserStore interface
func (s *Storage) GetOrCreateUser(userID string) passkey.User {
log.Trace().Caller().Send()
log.Debug().
Str("account-handle", userID).
Msg("Looking for or creating account for passkey stuff")
acc := &Account{}
res := s.db.Where(Account{Handle: userID, Server: config.GlobalConfig.General.GetFullDomain()}).
First(acc)
if errors.Is(res.Error, gorm.ErrRecordNotFound) {
log.Debug().Str("account-handle", userID)
var err error
acc, err = s.NewLocalAccount(userID)
if err != nil {
log.Error().
Err(err).
Str("account-handle", userID).
Msg("Failed to create new account for webauthn request")
return nil
}
}
return acc
}
func (s *Storage) GetUserByWebAuthnId(id []byte) passkey.User {
log.Trace().Caller().Send()
log.Debug().Bytes("webauthn-id", id).Msg("Looking for account with webauthn id")
acc := Account{}
res := s.db.Where(Account{WebAuthnId: id}).First(&acc)
if res.Error != nil {
log.Error().
Err(res.Error).
Bytes("webauthn-id", id).
Msg("Failed to find user with webauthn ID")
return nil
}
log.Info().Msg("Found account with given webauthn id")
return &acc
}
func (s *Storage) SaveUser(rawUser passkey.User) {
log.Trace().Caller().Send()
user, ok := rawUser.(*Account)
if !ok {
log.Error().Any("raw-user", rawUser).Msg("Failed to cast raw user to db account")
}
s.db.Save(user)
}

View file

@ -1,10 +1,6 @@
package storage
import (
"database/sql/driver"
"errors"
)
// What kind of being a user identifies as
type Being string
const (
@ -15,16 +11,3 @@ const (
BEING_ROBOT = Being("robot")
BEING_DOLL = Being("doll")
)
func (r *Being) Value() (driver.Value, error) {
return r, nil
}
func (r *Being) Scan(raw any) error {
if v, ok := raw.(string); ok {
*r = Being(v)
return nil
} else {
return errors.New("value not a string")
}
}

View file

@ -6,9 +6,12 @@ import (
"gorm.io/gorm"
)
// Describes a custom attribute field for accounts
type UserInfoField struct {
gorm.Model // Can actually just embed this as is here as those are not something directly exposed :3
Name string
Value string
LastUrlCheckDate *time.Time // Used if the value is an url to somewhere. Empty if value is not an url
}
// TODO: Add functions to store, load, update and delete these