Merge branch 'main' of git.mstar.dev:mstar/linstrom
All checks were successful
/ docker (push) Successful in 1m58s

This commit is contained in:
Melody Becker 2025-07-07 21:49:44 +02:00
commit 9baf6ad5c7
Signed by: mstar
SSH key fingerprint: SHA256:vkXfS9FG2pVNVfvDrzd1VW9n8VJzqqdKQGljxxX8uK8
49 changed files with 1424 additions and 287 deletions

View file

@ -177,6 +177,12 @@ func importRemoteNoteRecursive(
// In which case, you need to remove tags that don't exist anymore
// and only create the ones not yet stored
err = dbgen.NoteToPing.Save(pings...)
if err != nil {
return "", err
}
err = dbgen.NoteTag.Save(hashtags...)
if err != nil {
return "", err
}
return dbNote.ID, nil
}

View file

@ -0,0 +1,23 @@
package translators
import (
"context"
"time"
)
// Announce is boost
type activityAnnounceOut struct {
Context any `json:"@context,omitempty"`
Id string
Type string // Always "Announce"
Actor string // The one doing the boost
Published time.Time
To []string
CC []string
Object string // Link to object being boosted
}
func AnnounceFromStorage(ctx context.Context, id string) (*activityAnnounceOut, error) {
panic("not implemented")
}

View file

@ -2,7 +2,9 @@ package translators
import (
"context"
"strings"
"git.mstar.dev/mstar/linstrom/config"
"git.mstar.dev/mstar/linstrom/storage-new/dbgen"
)
@ -10,17 +12,39 @@ type Media struct {
Type string `json:"type"`
Url string `json:"url"`
MediaType string `json:"mediaType"`
Name string `json:"name"`
Summary string `json:"summary"`
Sensitive bool `json:"sensitive"`
}
func MediaFromStorage(ctx context.Context, id string) (*Media, error) {
metadata, err := dbgen.MediaMetadata.Where(dbgen.MediaMetadata.ID.Eq(id)).First()
metadata, err := dbgen.MediaMetadata.Where(dbgen.MediaMetadata.ID.Eq(id), dbgen.MediaMetadata.Remote.Is(false)).
First()
if err != nil {
return nil, err
}
data := Media{
Type: "Image", // FIXME: Change this to a sort of dynamic detection based on mimetype
MediaType: metadata.Type,
Url: metadata.Location,
Url: config.GlobalConfig.General.GetFullPublicUrl() + "/media/" + id,
Name: metadata.AltText,
Summary: metadata.AltText,
Sensitive: metadata.Blurred,
}
switch strings.SplitN(metadata.Type, "/", 2)[0] {
case "audio":
data.Type = "Audio"
case "application":
data.Type = "Document"
case "image":
data.Type = "Image"
case "video":
data.Type = "Video"
case "text":
data.Type = "Document"
case "font":
data.Type = "Document"
default:
data.Type = "Document"
}
return &data, nil
}

View file

@ -51,6 +51,9 @@ func UserFromStorage(ctx context.Context, id string) (*User, error) {
Preload(dbgen.User.Icon).Preload(dbgen.User.Banner).
Preload(dbgen.User.BeingTypes).
First()
if err != nil {
return nil, err
}
err = storage.EnsureLocalUserIdHasLinks(id)
if err != nil {

View file

@ -15,6 +15,13 @@ import (
"git.mstar.dev/mstar/linstrom/storage-new/models"
)
// Helper for discovering a user during a dicovering login (user not known yet).
// Not thread safe and should only be used once.
// FoundUser will be set after a successful call to userWithPasskeyDiscoverer
type passkeyDiscoverer struct {
FoundUser *models.User
}
// TODO: Check if passkey encryption is viable
// Check if encryption for passkey info data is viable to implement
// and if we should do it.
@ -25,7 +32,7 @@ import (
// Start the login process via passkey for a given username.
// Returns the credential options the passkey needs to sign
func (a *Authenticator) StartPasskeyLogin(
func (a *Authenticator) StartPasskeyLoginWithUsername(
username string,
) (*protocol.CredentialAssertion, string, error) {
if ok, err := a.canUsernameLogin(username); !ok {
@ -58,7 +65,7 @@ func (a *Authenticator) StartPasskeyLogin(
// Complete a passkey login request
// Takes the username logging in as well as the raw request containing the passkey response
func (a *Authenticator) CompletePasskeyLogin(
func (a *Authenticator) CompletePasskeyLoginWithUsername(
username string,
sessionId string,
response *http.Request,
@ -132,6 +139,82 @@ func (a *Authenticator) CompletePasskeyLogin(
return dbAccessToken.Token, nil
}
// Start the login process via passkey for an unknown username.
// Returns the credential options the passkey needs to sign.
// The relevant user will be discovered during the completion stage
func (a *Authenticator) StartPasskeyLoginDiscovery() (*protocol.CredentialAssertion, string, error) {
panic("not implemented")
}
// Complete a passkey login request for an unknown user.
func (a *Authenticator) CompletePasskeyLoginDiscovery(
sessionId string,
response *http.Request,
) (accessToken string, err error) {
// Get user in question
// Get latest login token data
loginToken, err := dbgen.LoginProcessToken.Where(dbgen.LoginProcessToken.Token.Eq(sessionId)).
First()
if err != nil {
return "", other.Error(
"auth",
"failed to get user's login token for passkey login completion",
err,
)
}
// Check if that token has expired
if loginToken.ExpiresAt.Before(time.Now()) {
return "", ErrProcessTimeout
}
var pkeySession webauthn.SessionData
err = json.Unmarshal([]byte(loginToken.Name), &pkeySession)
if err != nil {
return "", other.Error("auth", "failed to unmarshal passkey session for user", err)
}
discoverer := a.getPasskeyDiscoverer()
// Hand data to webauthn for completion
newSession, err := a.webauthn.FinishDiscoverableLogin(
discoverer.userWithPasskeyDiscoverer,
pkeySession,
response,
)
if err != nil {
return "", other.Error("auth", "passkey completion failed", err)
}
// TODO: Utilise clone warning
// newSession.Authenticator.CloneWarning
jsonSessionId, err := json.Marshal(newSession.ID)
if err != nil {
return "", other.Error("auth", "failed to marshal session", err)
}
jsonSession, err := json.Marshal(newSession.ID)
if err != nil {
return "", other.Error("auth", "failed to marshal session", err)
}
// Update credentials
// WARN: I am not sure if this will work
// Using the ID of the passkey session *should* be unique enough to identify the correct one
// Of course, even then, there's still the problem of matching as
// I can't yet guarantee that the parsed json content for the ID would be the same
_, err = dbgen.UserAuthMethod.Where(dbgen.UserAuthMethod.Token.Like("%"+string(jsonSessionId)+"%")).
Update(dbgen.UserAuthMethod.Token, jsonSession)
if err != nil {
return "", other.Error("auth", "failed to update credentials", err)
}
dbAccessToken := models.AccessToken{
User: *discoverer.FoundUser,
UserId: discoverer.FoundUser.ID,
ExpiresAt: calcAccessExpirationTimestamp(),
}
err = dbgen.AccessToken.Omit(dbgen.AccessToken.Token).Create(&dbAccessToken)
if err != nil {
return "", other.Error("auth", "failed to generate access token", err)
}
return dbAccessToken.Token, nil
}
// Start the process of registrating a passkey to an account
func (a *Authenticator) StartPasskeyRegistration(
username string,
@ -219,3 +302,22 @@ func (a *Authenticator) CompletePasskeyRegistration(
}
return nil
}
// Get a new passkey discoverer.
func (a *Authenticator) getPasskeyDiscoverer() *passkeyDiscoverer {
// nothing special yet, might use session id for further verification
return &passkeyDiscoverer{}
}
// userWithPasskeyDiscoverer implements webauthn.DiscoverableUserHandler
// for the use during a discovering login process
func (d *passkeyDiscoverer) userWithPasskeyDiscoverer(
rawID, userHandle []byte,
) (user webauthn.User, err error) {
dbUser, err := dbgen.User.Where(dbgen.User.PasskeyId.Eq(userHandle)).First()
if err != nil {
return nil, err
}
d.FoundUser = dbUser
return &fakeUser{dbUser}, nil
}

View file

@ -54,6 +54,7 @@ func main() {
g.ApplyInterface(func(models.IAccessToken) {}, models.AccessToken{})
g.ApplyInterface(func(models.INote) {}, models.Note{})
g.ApplyInterface(func(models.IUserToUserRelation) {}, models.UserToUserRelation{})
g.ApplyInterface(func(models.IFailedOutboundRequest) {}, models.FailedOutboundRequest{})
log.Info().Msg("Extra features applied, starting generation")
g.Execute()

41
cmd/sample-id-gen/main.go Normal file
View file

@ -0,0 +1,41 @@
package main
import (
"flag"
"fmt"
"github.com/google/uuid"
"github.com/nrednav/cuid2"
"github.com/rs/xid"
)
var flagGenerator = flag.String(
"generator",
"xid",
"Which generator to showcase. Options: xid, cuid, uuid. Defaults to xid",
)
func main() {
flag.Parse()
generator := "xid"
switch *flagGenerator {
case "uuid":
generator = "uuid"
case "cuid":
generator = "cuid"
}
fmt.Printf("Generator used: %s\n", generator)
var gen func() string
switch generator {
case "xid":
gen = func() string { return xid.New().String() }
case "uuid":
gen = uuid.NewString
case "cuid":
gen = cuid2.Generate
}
fmt.Println("Generating 10 ids")
for range 10 {
fmt.Println(gen())
}
}

View file

@ -3,6 +3,7 @@ package config
import (
"fmt"
"os"
"path"
"strings"
"git.mstar.dev/mstar/goutils/other"
@ -73,6 +74,14 @@ type ConfigStorage struct {
MaxReconnectAttempts int `toml:"max_reconnect_attempts"`
}
type ConfigTranscoder struct {
SharedDirectory string `toml:"shared_directory"`
Secret string `toml:"secret"`
ServerAddress string `toml:"server_address"`
ServerPort int `toml:"server_port"`
IgnoreTranscoder bool `toml:"ignore_transcoder"`
}
type ConfigS3 struct {
KeyId string `toml:"key_id"`
Secret string `toml:"secret"`
@ -143,6 +152,7 @@ type Config struct {
Admin ConfigAdmin `toml:"admin"`
Webauthn ConfigWebAuthn `toml:"webauthn"`
Storage ConfigStorage `toml:"storage"`
Transcoder ConfigTranscoder `toml:"transcoder"`
Mail ConfigMail `toml:"mail"`
Self ConfigSelf `toml:"self"`
S3 ConfigS3 `toml:"s3"`
@ -185,13 +195,16 @@ var defaultConfig Config = Config{
Port: 5432,
SslMode: other.IntoPointer("disable"),
TimeZone: other.IntoPointer("Europe/Berlin"),
RedisUrl: nil,
MaxInMemoryCacheSize: 1e6, // 1 Megabyte
MaxInMemoryCacheTTL: 5,
MaxRedisCacheTTL: nil,
EncryptionKey: "Encryption key for sensitive information. DO NOT CHANGE THIS AFTER SETUP",
MaxReconnectAttempts: 3,
},
Transcoder: ConfigTranscoder{
SharedDirectory: "/tmp/linstrom-transcoder",
Secret: "The same secret as configured in the transcoder",
ServerAddress: "127.0.0.1",
ServerPort: 5594,
IgnoreTranscoder: false,
},
Mail: ConfigMail{
Host: "localhost",
Port: 587,
@ -275,6 +288,18 @@ func (sc *ConfigStorage) BuildPostgresDSN() string {
return dsn
}
func (tc *ConfigTranscoder) Address() string {
return fmt.Sprintf("%s:%d", tc.ServerAddress, tc.ServerPort)
}
func (tc *ConfigTranscoder) InDir() string {
return path.Join(tc.SharedDirectory, "input")
}
func (tc *ConfigTranscoder) OutDir() string {
return path.Join(tc.SharedDirectory, "output")
}
func WriteDefaultConfig(toFile string) error {
log.Trace().Caller().Send()
log.Info().Str("config-file", toFile).Msg("Writing default config to file")

View file

@ -1,4 +0,0 @@
export AWS_SECRET_ACCESS_KEY=b6d1c8ec97052b8a40ae953de34f336170d85554fbe7875acce0ff51464724ee
export AWS_ACCESS_KEY_ID=GK458f9d7315fc6c9686c41045
export AWS_DEFAULT_REGION="garage"
export AWS_ENDPOINT_URL="http://localhost:3900"

5
go.mod
View file

@ -3,11 +3,13 @@ module git.mstar.dev/mstar/linstrom
go 1.24.2
require (
git.mstar.dev/mstar/goutils v1.14.2
golang.org/x/sync v0.15.0
git.mstar.dev/mstar/goutils v1.16.1
github.com/BurntSushi/toml v1.5.0
github.com/PeerDB-io/gluabit32 v1.0.2
github.com/cjoudrey/gluahttp v0.0.0-20201111170219-25003d9adfa9
github.com/cosmotek/loguago v1.0.0
github.com/gabriel-vasile/mimetype v1.4.9
github.com/go-acme/lego/v4 v4.23.1
github.com/go-webauthn/webauthn v0.12.3
github.com/google/uuid v1.6.0
@ -79,7 +81,6 @@ require (
go.uber.org/mock v0.5.0 // indirect
golang.org/x/mod v0.24.0 // indirect
golang.org/x/net v0.39.0 // indirect
golang.org/x/sync v0.13.0 // indirect
golang.org/x/text v0.24.0 // indirect
golang.org/x/tools v0.32.0 // indirect
gorm.io/datatypes v1.2.5 // indirect

8
go.sum
View file

@ -1,7 +1,7 @@
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
git.mstar.dev/mstar/goutils v1.14.2 h1:2W9AtsAAGR9OeztPnyVCkxiuZDe7h1DlXzil35wU+vs=
git.mstar.dev/mstar/goutils v1.14.2/go.mod h1:juxY0eZEMnA95fedRp2LVXvUBgEjz66nE8SEdGKcxMA=
git.mstar.dev/mstar/goutils v1.16.1 h1:uVsT+a8Ad0DuYy7rnXAVZ5NjoE6AHit6DGxFn5XiSrU=
git.mstar.dev/mstar/goutils v1.16.1/go.mod h1:juxY0eZEMnA95fedRp2LVXvUBgEjz66nE8SEdGKcxMA=
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/PeerDB-io/gluabit32 v1.0.2 h1:AGI1Z7dwDVotakpuOOuyTX4/QGi5HUYsipL/VfodmO4=
@ -32,6 +32,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU=
github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
github.com/go-acme/lego/v4 v4.23.1 h1:lZ5fGtGESA2L9FB8dNTvrQUq3/X4QOb8ExkKyY7LSV4=
github.com/go-acme/lego/v4 v4.23.1/go.mod h1:7UMVR7oQbIYw6V7mTgGwi4Er7B6Ww0c+c8feiBM0EgI=
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
@ -191,6 +193,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

View file

@ -18,6 +18,7 @@ import (
"gorm.io/gorm"
"git.mstar.dev/mstar/linstrom/config"
"git.mstar.dev/mstar/linstrom/media"
"git.mstar.dev/mstar/linstrom/shared"
storagenew "git.mstar.dev/mstar/linstrom/storage-new"
"git.mstar.dev/mstar/linstrom/storage-new/dbgen"
@ -103,6 +104,14 @@ func newServer() {
if err = storagenew.InsertUnknownActorPlaceholder(); err != nil {
log.Fatal().Err(err).Msg("Failed to insert self properly")
}
log.Info().Msg("Connecting to s3 storage and transcoder")
mediaServer, err := media.NewServer()
if err != nil {
log.Fatal().Err(err).Msg("Failed to set up the media server")
}
media.GlobalServer = mediaServer
debugShutdownChan := make(chan *sync.WaitGroup, 1)
interuptChan := make(chan os.Signal, 1)
debugShutdownWaitgroup := sync.WaitGroup{}

98
media/addFile.go Normal file
View file

@ -0,0 +1,98 @@
package media
import (
"context"
"database/sql"
"errors"
"io"
"os"
"path"
"git.mstar.dev/mstar/goutils/other"
"github.com/gabriel-vasile/mimetype"
"github.com/minio/minio-go/v7"
"github.com/rs/zerolog/log"
"git.mstar.dev/mstar/linstrom/config"
"git.mstar.dev/mstar/linstrom/shared"
"git.mstar.dev/mstar/linstrom/storage-new/dbgen"
"git.mstar.dev/mstar/linstrom/storage-new/models"
)
var ErrFileAlreadyExists = errors.New("a file with that name already exists")
func (s *Server) AddFile(
fileReader io.Reader,
filename, userId string,
blurred bool,
altText string,
) (string, error) {
fileCount, err := dbgen.MediaMetadata.Where(dbgen.MediaMetadata.OwnedById.Eq(sql.NullString{Valid: true, String: userId}), dbgen.MediaMetadata.Name.Eq(filename)).
Count()
if err != nil {
return "", err
}
if fileCount > 0 {
return "", ErrFileAlreadyExists
}
transcoderInDir := config.GlobalConfig.Transcoder.InDir()
filePath := path.Join(transcoderInDir, filename)
file, err := os.Create(filePath)
if err != nil {
return "", err
}
if _, err = io.Copy(file, fileReader); err != nil {
_ = file.Close()
return "", err
}
_ = file.Close()
if s.transcoderClient == nil {
return s.addFileAsIs(filename, userId, filePath, nil, blurred, altText)
} else {
return s.addFileWithTranscoder(filename, userId, filePath, blurred, altText)
}
}
// adFileAsIs uploads the given file. If mtype (short for mimetype, shortened because of module naming conflict)
// is not nil, use that as the file's mimetype. Otherwise, the mimetype will be detected manually
func (s *Server) addFileAsIs(
filename, userId, filepath string,
mtype *string,
blurred bool,
altText string,
) (string, error) {
if mtype == nil {
mType, err := mimetype.DetectFile(filepath)
if err != nil {
return "", err
}
mtype = other.IntoPointer(mType.String())
}
id := shared.NewId()
s3Result, err := s.client.FPutObject(
context.TODO(),
config.GlobalConfig.S3.BucketName,
id,
filepath,
minio.PutObjectOptions{},
)
if err != nil {
return "", err
}
log.Debug().Any("result", s3Result).Msg("Upload result")
fileMetadata := models.MediaMetadata{
ID: id,
OwnedById: sql.NullString{Valid: true, String: userId},
Remote: false,
Location: s3Result.Key,
Type: *mtype,
Name: filename,
AltText: altText,
Blurred: blurred,
}
err = dbgen.MediaMetadata.Create(&fileMetadata)
if err != nil {
return "", err
}
return id, nil
}

17
media/fileInfo.go Normal file
View file

@ -0,0 +1,17 @@
package media
import (
"database/sql"
"git.mstar.dev/mstar/linstrom/storage-new/dbgen"
)
func (s *Server) FileExists(userid, filename string) (bool, error) {
mm := dbgen.MediaMetadata
c, err := mm.Where(mm.OwnedById.Eq(sql.NullString{Valid: true, String: userid}), mm.Name.Eq(filename)).
Count()
if err != nil {
return false, err
}
return c > 0, nil
}

View file

@ -3,29 +3,26 @@ package media
import (
"context"
"errors"
"net/rpc"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/rs/zerolog/log"
"git.mstar.dev/mstar/linstrom/config"
)
type Server struct {
client *minio.Client
client *minio.Client
transcoderClient *rpc.Client
}
/*
TODO: Figure out an api for a microservice for transcoding media, see https://tech.lgbt/@lina/114682780787448797
- Read endpoint from config
- Try to reach transcoder
- If transcoder is alive, use it for transcoding
- If not alive, store files as is
*/
var (
ErrNoBucketAccess = errors.New("can't access configured bucket")
)
var GlobalServer *Server
func NewServer() (*Server, error) {
client, err := minio.New(config.GlobalConfig.S3.Endpoint, &minio.Options{
Creds: credentials.NewStaticV4(
@ -55,24 +52,16 @@ func NewServer() (*Server, error) {
return nil, ErrNoBucketAccess
}
}
return &Server{client: client}, nil
}
// UsernameFilename converts a userId and filename into a proper filepath for s3.
// Reason for this is that the userId for external users is a valid url which needs to be encoded
func UsernameFilename(userId, filename string) string {
return userId + "//" + filename
}
func (s *Server) HasFileScoped(userId, filename string) (bool, error) {
info, err := s.client.StatObject(
context.Background(),
config.GlobalConfig.S3.BucketName,
UsernameFilename(userId, filename),
minio.GetObjectOptions{},
)
if err != nil {
return false, err
if config.GlobalConfig.Transcoder.IgnoreTranscoder {
return &Server{client: client, transcoderClient: nil}, nil
}
return info.IsDeleteMarker, nil
transcoderClient, err := rpc.DialHTTP("tcp", config.GlobalConfig.Transcoder.Address())
if err != nil {
log.Warn().Err(err).
Str("address", config.GlobalConfig.Transcoder.Address()).
Msg("failed to dial transcoder, various media related features won't be available")
transcoderClient = nil
}
return &Server{client: client, transcoderClient: transcoderClient}, nil
}

56
media/readFile.go Normal file
View file

@ -0,0 +1,56 @@
package media
import (
"context"
"database/sql"
"errors"
"io"
"github.com/minio/minio-go/v7"
"gorm.io/gorm"
"git.mstar.dev/mstar/linstrom/config"
"git.mstar.dev/mstar/linstrom/storage-new/dbgen"
)
var ErrFileNotFound = errors.New("file not found")
func (s *Server) ReadFile(userid, filename string) (io.ReadCloser, error) {
mm := dbgen.MediaMetadata
metadata, err := mm.Where(mm.OwnedById.Eq(sql.NullString{Valid: true, String: userid}), mm.Name.Eq(filename), mm.Remote.Is(false)).
Select(mm.ID, mm.Location).
First()
switch err {
case gorm.ErrRecordNotFound:
return nil, ErrFileNotFound
case nil:
default:
return nil, err
}
return s.client.GetObject(
context.TODO(),
config.GlobalConfig.S3.BucketName,
metadata.Location,
minio.GetObjectOptions{},
)
}
func (s *Server) ReadFileId(id string) (io.ReadCloser, error) {
mm := dbgen.MediaMetadata
metadata, err := mm.Where(mm.ID.Eq(id), mm.Remote.Is(false)).
Select(mm.Location).
First()
switch err {
case gorm.ErrRecordNotFound:
return nil, ErrFileNotFound
case nil:
default:
return nil, err
}
return s.client.GetObject(
context.TODO(),
config.GlobalConfig.S3.BucketName,
metadata.Location,
minio.GetObjectOptions{},
)
}

5
media/removeFile.go Normal file
View file

@ -0,0 +1,5 @@
package media
func (s *Server) RemoveFile(userId, filename string) error {
panic("not implemented")
}

82
media/services.go Normal file
View file

@ -0,0 +1,82 @@
package media
import (
"context"
"slices"
"git.mstar.dev/mstar/goutils/sliceutils"
"github.com/minio/minio-go/v7"
"github.com/rs/zerolog/log"
"git.mstar.dev/mstar/linstrom/config"
"git.mstar.dev/mstar/linstrom/storage-new/dbgen"
"git.mstar.dev/mstar/linstrom/storage-new/models"
)
// ServiceEnsureFileSynchronisation is a service function for ensuring data synchronicity between
// the db's metadata for the files and the actual files in s3.
// All files without matching metadata will be deleted. Same for all metadata without a matching file.
// No attempt at restoring a connection will be made
func (s *Server) ServiceEnsureFileSynchronisation() {
mm := dbgen.MediaMetadata
allFiles, err := mm.
Select(mm.ID, mm.OwnedById, mm.Name, mm.Location).
Where(mm.Location.NotLike("linstrom://%"), mm.Remote.Is(false)).
Find()
if err != nil {
log.Error().Err(err).Msg("Failed to get a list of all known media")
return
}
foundInDb := []string{}
objectMissingInDb := []minio.ObjectInfo{}
// Go over all objects in the bucket. Note down if it has an entry in the db or not
for obj := range s.client.ListObjects(context.TODO(), config.GlobalConfig.S3.BucketName, minio.ListObjectsOptions{
Recursive: true,
}) {
log.Debug().Str("object-key", obj.Key).Msg("Checking object")
if slices.ContainsFunc(allFiles, func(e *models.MediaMetadata) bool {
return e.Location == obj.Key
}) {
foundInDb = append(foundInDb, obj.Key)
} else {
objectMissingInDb = append(objectMissingInDb, obj)
}
}
// Find every db entry not in the list of found objects
entryMissingAnObject := []string{}
for _, dbFile := range allFiles {
if !slices.ContainsFunc(foundInDb, func(e string) bool {
return dbFile.Location == e
}) {
entryMissingAnObject = append(entryMissingAnObject, dbFile.ID)
}
}
// For every object missing in the db, delete it
minioErrChan := s.client.RemoveObjects(
context.TODO(),
config.GlobalConfig.S3.BucketName,
sliceutils.ToChannel(objectMissingInDb),
minio.RemoveObjectsOptions{GovernanceBypass: true},
)
s3Errors := sliceutils.FromChannel(minioErrChan, 0)
s3Errors = sliceutils.Filter(
s3Errors,
func(t minio.RemoveObjectError) bool { return t.Err != nil },
)
for _, s3Err := range s3Errors {
log.Error().
Err(s3Err.Err).
Str("object-name", s3Err.ObjectName).
Msg("Failed to delete object missing in db")
}
// And perform a batch delete
_, err = dbgen.MediaMetadata.Where(dbgen.MediaMetadata.ID.In(entryMissingAnObject...)).Delete()
if err != nil {
log.Error().
Err(err).
Strs("media-ids", entryMissingAnObject).
Msg("Failed to batch delete all media metadata without a matching object in s3")
}
}

45
media/transcoder.go Normal file
View file

@ -0,0 +1,45 @@
package media
import (
"github.com/rs/zerolog/log"
"git.mstar.dev/mstar/linstrom/config"
)
// WARN: These types need to always be in sync with linstrom-transcoder/transcode/transcoder.go
// TODO: Maybe move to a separate repo outside of linstrom-transcoder
type TranscodeArgs struct {
Secret string
Filename string
}
type TranscodeReply struct {
Error error
Mimetype string
ThumbnailFilename *string
Filename string
}
// addFileWithTranscoder will try to transcode the given file using the helper application.
// If the transcode fails, it uploads the file as is
func (s *Server) addFileWithTranscoder(
filename, userId, filepath string,
blurred bool,
altText string,
) (string, error) {
args := TranscodeArgs{
Secret: config.GlobalConfig.Transcoder.Secret,
Filename: filepath,
}
reply := TranscodeReply{}
err := s.transcoderClient.Call("Transcoder.Transcode", &args, &reply)
if err != nil {
return "", err
}
if reply.Error != nil {
log.Warn().Err(reply.Error).Msg("Transcoder failed, uploading raw file")
return s.addFileAsIs(filename, userId, filepath, nil, blurred, altText)
}
return s.addFileAsIs(filename, userId, reply.Filename, &reply.Mimetype, blurred, altText)
}

View file

@ -0,0 +1,30 @@
package cleaners
import (
"time"
"github.com/rs/zerolog/log"
"git.mstar.dev/mstar/linstrom/storage-new/dbgen"
)
const maxServerAge = time.Hour * 24 * 30 // One month
func init() {
cleanerBuilders = append(cleanerBuilders, buildKillDeadServers)
}
// Marks all servers where the last interaction time is older than maxServerAge
func tickKillDeadServers(now time.Time) {
_, err := dbgen.RemoteServer.Where(dbgen.RemoteServer.LastInteraction.Lt(now.Add(-maxServerAge)), dbgen.RemoteServer.IsSelf.Is(false)).
UpdateColumn(dbgen.RemoteServer.IsDead, true)
if err != nil {
log.Error().
Err(err).
Msg("Failed to mark servers without interaction for over a 30 days as dead")
}
}
func buildKillDeadServers() (onTick func(time.Time), name string, tickSpeed time.Duration) {
return tickKillDeadServers, "kill-dead-servers", time.Hour
}

View file

@ -0,0 +1,113 @@
package cleaners
import (
"net/http"
"strings"
"sync"
"time"
"github.com/rs/zerolog/log"
"golang.org/x/sync/errgroup"
"gorm.io/gen"
"git.mstar.dev/mstar/linstrom/storage-new/dbgen"
"git.mstar.dev/mstar/linstrom/storage-new/models"
webshared "git.mstar.dev/mstar/linstrom/web/shared"
)
const maxFailedRequestsBeforeDeath = 10
var (
reqErrStrUnknownHost = "no such host"
)
func init() {
cleanerBuilders = append(cleanerBuilders, buildRetryRequests)
}
func TickRetryRequests(now time.Time) {
batchResults := []*models.FailedOutboundRequest{}
fo := dbgen.FailedOutboundRequest
idsToDelete := []uint64{}
var idsDeleteLock sync.Mutex
err := fo.Preload(fo.TargetServer, fo.ActingUser).
// Join with server data to exclude dead servers
LeftJoin(dbgen.RemoteServer, dbgen.RemoteServer.ID.EqCol(fo.TargetServerId)).
Where(dbgen.RemoteServer.IsDead.Is(false)).
Order(fo.Id.Asc()).
FindInBatches(&batchResults, 50, func(tx gen.Dao, batch int) error {
var g errgroup.Group
for _, failedRequest := range batchResults {
g.Go(func() (err error) {
defer func() {
failedRequest.NrOfAttempts += 1
err = dbgen.FailedOutboundRequest.Save(failedRequest)
if err != nil {
return
}
if failedRequest.NrOfAttempts >= maxFailedRequestsBeforeDeath {
_, err = dbgen.RemoteServer.Where(dbgen.RemoteServer.ID.Eq(failedRequest.TargetServerId)).
UpdateColumn(dbgen.RemoteServer.IsDead, true)
}
}()
var res *http.Response
res, _, err = webshared.RequestSigned(
"POST",
failedRequest.Target,
failedRequest.RawData,
failedRequest.ActingUser,
)
if err != nil {
failedRequest.NrOfAttempts += 1
errString := err.Error()
// FIXME: Use the actual error types instead of error string
// Using substring matching is awful and probably not reliable. Using error type is likely more reliable
if strings.Contains(errString, reqErrStrUnknownHost) {
failedRequest.LastFailureReason = string(
models.RequestFailureRequestError,
)
} else {
failedRequest.LastFailureReason = string(models.RequestFailureRequestError)
}
return
}
if res.StatusCode < 400 {
idsDeleteLock.Lock()
idsToDelete = append(idsToDelete, failedRequest.Id)
idsDeleteLock.Unlock()
// Defer func will always add one (to make the expected failure case easier)
// Sub one here to prevent a potential server kill if it was at maxFailedRequestsBeforeDeath-1 failed requests before
failedRequest.NrOfAttempts -= 1
return nil
}
switch res.StatusCode {
case http.StatusInternalServerError:
failedRequest.LastFailureReason = string(models.RequestFailureInternalError)
case http.StatusForbidden:
failedRequest.LastFailureReason = string(models.RequestFailureRejected)
case http.StatusTooManyRequests:
// TODO: Check Timeout headers and write apropriate message
}
return nil
})
}
return nil
})
if err != nil {
log.Error().Err(err).Msg("Failed to batch-process all failed outbound requests")
}
_, err = fo.Where(fo.Id.In(idsToDelete...)).Delete()
if err != nil {
log.Error().Err(err).Msg("Failed to batch-delete all successful retries")
}
err = dbgen.FailedOutboundRequest.KillServers(maxFailedRequestsBeforeDeath)
if err != nil {
log.Error().Err(err).Msg("Failed to kill all servers with too many failed requests")
}
}
func buildRetryRequests() (onTick func(time.Time), name string, tickSpeed time.Duration) {
return TickRetryRequests, "retry-requests", time.Hour
}

View file

@ -7,6 +7,7 @@ package dbgen
import (
"context"
"database/sql"
"strings"
"git.mstar.dev/mstar/linstrom/storage-new/models"
"gorm.io/gorm"
@ -607,6 +608,34 @@ type IFailedOutboundRequestDo interface {
Returning(value interface{}, columns ...string) IFailedOutboundRequestDo
UnderlyingDB() *gorm.DB
schema.Tabler
KillServers(maxAttempts uint32) (err error)
}
// KillServers marks all servers where a request has more than X failed attempts as dead
//
// WITH servers_to_kill AS (
//
// SELECT target_server_id
// FROM failed_outbound_requests
// WHERE nr_of_attempts > @maxAttempts
//
// )
// UPDATE remote_servers
// SET is_dead = true
// WHERE id IN (SELECT target_server_id FROM servers_to_kill)
func (f failedOutboundRequestDo) KillServers(maxAttempts uint32) (err error) {
var params []interface{}
var generateSQL strings.Builder
params = append(params, maxAttempts)
generateSQL.WriteString("WITH servers_to_kill AS ( SELECT target_server_id FROM failed_outbound_requests WHERE nr_of_attempts > ? ) UPDATE remote_servers SET is_dead = true WHERE id IN (SELECT target_server_id FROM servers_to_kill) ")
var executeSQL *gorm.DB
executeSQL = f.UnderlyingDB().Exec(generateSQL.String(), params...) // ignore_security_alert
err = executeSQL.Error
return
}
func (f failedOutboundRequestDo) Debug() IFailedOutboundRequestDo {

View file

@ -55,6 +55,8 @@ func newRole(db *gorm.DB, opts ...gen.DOOption) role {
_role.CanMentionOthers = field.NewBool(tableName, "can_mention_others")
_role.HasMentionCountLimit = field.NewBool(tableName, "has_mention_count_limit")
_role.MentionLimit = field.NewUint32(tableName, "mention_limit")
_role.MaxIndividualFileSize = field.NewUint64(tableName, "max_individual_file_size")
_role.MaxTotalFileSize = field.NewUint64(tableName, "max_total_file_size")
_role.AutoNsfwMedia = field.NewBool(tableName, "auto_nsfw_media")
_role.AutoCwPosts = field.NewBool(tableName, "auto_cw_posts")
_role.AutoCwPostsText = field.NewString(tableName, "auto_cw_posts_text")
@ -118,6 +120,8 @@ type role struct {
CanMentionOthers field.Bool
HasMentionCountLimit field.Bool
MentionLimit field.Uint32
MaxIndividualFileSize field.Uint64
MaxTotalFileSize field.Uint64
AutoNsfwMedia field.Bool
AutoCwPosts field.Bool
AutoCwPostsText field.String
@ -187,6 +191,8 @@ func (r *role) updateTableName(table string) *role {
r.CanMentionOthers = field.NewBool(table, "can_mention_others")
r.HasMentionCountLimit = field.NewBool(table, "has_mention_count_limit")
r.MentionLimit = field.NewUint32(table, "mention_limit")
r.MaxIndividualFileSize = field.NewUint64(table, "max_individual_file_size")
r.MaxTotalFileSize = field.NewUint64(table, "max_total_file_size")
r.AutoNsfwMedia = field.NewBool(table, "auto_nsfw_media")
r.AutoCwPosts = field.NewBool(table, "auto_cw_posts")
r.AutoCwPostsText = field.NewString(table, "auto_cw_posts_text")
@ -228,7 +234,7 @@ func (r *role) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
}
func (r *role) fillFieldMap() {
r.fieldMap = make(map[string]field.Expr, 53)
r.fieldMap = make(map[string]field.Expr, 55)
r.fieldMap["id"] = r.ID
r.fieldMap["created_at"] = r.CreatedAt
r.fieldMap["updated_at"] = r.UpdatedAt
@ -257,6 +263,8 @@ func (r *role) fillFieldMap() {
r.fieldMap["can_mention_others"] = r.CanMentionOthers
r.fieldMap["has_mention_count_limit"] = r.HasMentionCountLimit
r.fieldMap["mention_limit"] = r.MentionLimit
r.fieldMap["max_individual_file_size"] = r.MaxIndividualFileSize
r.fieldMap["max_total_file_size"] = r.MaxTotalFileSize
r.fieldMap["auto_nsfw_media"] = r.AutoNsfwMedia
r.fieldMap["auto_cw_posts"] = r.AutoCwPosts
r.fieldMap["auto_cw_posts_text"] = r.AutoCwPostsText

View file

@ -39,6 +39,7 @@ func newServerMetadata(db *gorm.DB, opts ...gen.DOOption) serverMetadata {
_serverMetadata.LECSR = field.NewBytes(tableName, "lecsr")
_serverMetadata.LELastUpdate = field.NewField(tableName, "le_last_update")
_serverMetadata.LEUserPrivKey = field.NewBytes(tableName, "le_user_priv_key")
_serverMetadata.LastMigrationVersion = field.NewUint64(tableName, "last_migration_version")
_serverMetadata.fillFieldMap()
@ -48,19 +49,20 @@ func newServerMetadata(db *gorm.DB, opts ...gen.DOOption) serverMetadata {
type serverMetadata struct {
serverMetadataDo
ALL field.Asterisk
Id field.Uint64
CreatedAt field.Time
UpdatedAt field.Time
LEDomain field.String
LECertUrl field.String
LECertStableUrl field.String
LEPrivateKey field.Bytes
LECertificate field.Bytes
LEIssuerCertificate field.Bytes
LECSR field.Bytes
LELastUpdate field.Field
LEUserPrivKey field.Bytes
ALL field.Asterisk
Id field.Uint64
CreatedAt field.Time
UpdatedAt field.Time
LEDomain field.String
LECertUrl field.String
LECertStableUrl field.String
LEPrivateKey field.Bytes
LECertificate field.Bytes
LEIssuerCertificate field.Bytes
LECSR field.Bytes
LELastUpdate field.Field
LEUserPrivKey field.Bytes
LastMigrationVersion field.Uint64
fieldMap map[string]field.Expr
}
@ -89,6 +91,7 @@ func (s *serverMetadata) updateTableName(table string) *serverMetadata {
s.LECSR = field.NewBytes(table, "lecsr")
s.LELastUpdate = field.NewField(table, "le_last_update")
s.LEUserPrivKey = field.NewBytes(table, "le_user_priv_key")
s.LastMigrationVersion = field.NewUint64(table, "last_migration_version")
s.fillFieldMap()
@ -105,7 +108,7 @@ func (s *serverMetadata) GetFieldByName(fieldName string) (field.OrderExpr, bool
}
func (s *serverMetadata) fillFieldMap() {
s.fieldMap = make(map[string]field.Expr, 12)
s.fieldMap = make(map[string]field.Expr, 13)
s.fieldMap["id"] = s.Id
s.fieldMap["created_at"] = s.CreatedAt
s.fieldMap["updated_at"] = s.UpdatedAt
@ -118,6 +121,7 @@ func (s *serverMetadata) fillFieldMap() {
s.fieldMap["lecsr"] = s.LECSR
s.fieldMap["le_last_update"] = s.LELastUpdate
s.fieldMap["le_user_priv_key"] = s.LEUserPrivKey
s.fieldMap["last_migration_version"] = s.LastMigrationVersion
}
func (s serverMetadata) clone(db *gorm.DB) serverMetadata {

View file

@ -594,6 +594,7 @@ type IUserToUserRelationDo interface {
GetFollowingApLinksPagedForId(id string, page int) (result []string, err error)
CountFollowersForId(id string) (result int, err error)
CountFollowingForId(id string) (result int, err error)
GetLocalFollowerIdsOfId(id string) (result []string, err error)
}
// Get all inbox links for accounts following the user with the specified id
@ -720,6 +721,34 @@ func (u userToUserRelationDo) CountFollowingForId(id string) (result int, err er
return
}
// Get the ids of all local accounts following the user with the target id
//
// SELECT
// r.user_id
// FROM
//
// user_to_user_relations r
// LEFT JOIN users u ON r.user_id = u.id
// LEFT JOIN remote_servers s ON u.server_id = s.id
//
// WHERE
//
// s.is_self = true
// AND r.target_user_id = @id;
func (u userToUserRelationDo) GetLocalFollowerIdsOfId(id string) (result []string, err error) {
var params []interface{}
var generateSQL strings.Builder
params = append(params, id)
generateSQL.WriteString("SELECT r.user_id FROM user_to_user_relations r LEFT JOIN users u ON r.user_id = u.id LEFT JOIN remote_servers s ON u.server_id = s.id WHERE s.is_self = true AND r.target_user_id = ?; ")
var executeSQL *gorm.DB
executeSQL = u.UnderlyingDB().Raw(generateSQL.String(), params...).Find(&result) // ignore_security_alert
err = executeSQL.Error
return
}
func (u userToUserRelationDo) Debug() IUserToUserRelationDo {
return u.withDO(u.DO.Debug())
}

View file

@ -1,17 +1,16 @@
package storage
import (
"fmt"
"strings"
"git.mstar.dev/mstar/goutils/other"
"git.mstar.dev/mstar/goutils/sliceutils"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"gorm.io/gorm"
"git.mstar.dev/mstar/linstrom/storage-new/models"
"git.mstar.dev/mstar/linstrom/storage-new/dbgen"
)
const CurrentMigrationVersion = 1
// Auto-migrate all tables and types used
func Migrate(db *gorm.DB) error {
// Shut up gorm's queries during automigrate by setting log level to info during migration
@ -38,7 +37,10 @@ func Migrate(db *gorm.DB) error {
return other.Error("storage", "Failed to create Activitystreams Activity type", err)
}
if err := createCollectionTarget(db); err != nil {
return other.Error("storage", "Failed t ocreate collections target type", err)
return other.Error("storage", "Failed to create collections target type", err)
}
if err := preTypeMigrations(db); err != nil {
return other.Error("storage", "Failed to run pre-type migrations", err)
}
if err := migrateTypes(db); err != nil {
return other.Error("storage", "Failed to automigrate data structs", err)
@ -46,124 +48,17 @@ func Migrate(db *gorm.DB) error {
return nil
}
// Returns the raw error created by gorm, with no wrapping
func migrateTypes(db *gorm.DB) error {
if err := db.AutoMigrate(models.AllTypes...); err != nil {
return err
}
return nil
}
// Ensure the being enum exists for the user
func createBeingType(db *gorm.DB) error {
return migrateEnum(
db,
"being_type",
sliceutils.Map(models.AllBeings, func(t models.BeingType) string { return string(t) }),
)
}
func createAccountRelationType(db *gorm.DB) error {
return migrateEnum(
db,
"relation_type",
sliceutils.Map(
models.AllRelations,
func(t models.RelationType) string { return string(t) },
),
)
}
func createAccountAuthMethodType(db *gorm.DB) error {
return migrateEnum(
db,
"auth_method_type",
sliceutils.Map(
models.AllAuthMethods,
func(t models.AuthenticationMethodType) string { return string(t) },
),
)
}
func createRemoteServerSoftwareType(db *gorm.DB) error {
return migrateEnum(
db,
"server_software_type",
sliceutils.Map(
models.AllServerSoftwareTypes,
func(t models.ServerSoftwareType) string { return string(t) },
),
)
}
func createActitiystreamsObjectType(db *gorm.DB) error {
return migrateEnum(
db,
"activitystreams_object_type",
sliceutils.Map(
models.AllActivitystreamsObjectTypes,
func(t models.ActivitystreamsObjectType) string { return string(t) },
),
)
}
func createActitiystreamsActivityType(db *gorm.DB) error {
return migrateEnum(
db,
"activitystreams_activity_type",
sliceutils.Map(
models.AllActivitystreamsActivityTypes,
func(t models.ActivitystreamsActivityType) string { return string(t) },
),
)
}
// func createActitiystreamsActivityTargetType(db *gorm.DB) error {
// return migrateEnum(
// db,
// "activitystreams_activity_target_type",
// sliceutils.Map(
// models.AllActivitystreamsActivityTargetTypes,
// func(t models.ActivitystreamsActivityTargetType) string { return string(t) },
// ),
// )
// }
func createCollectionTarget(db *gorm.DB) error {
return migrateEnum(
db,
"collection_target_type",
sliceutils.Map(
models.AllCollectionTargetTypes,
func(t models.CollectionTargetType) string { return string(t) },
),
)
}
// Helper function for ensuring the existence of an enum with the given values
func migrateEnum(db *gorm.DB, name string, values []string) error {
if err := db.Exec("DROP TYPE IF EXISTS " + name + " CASCADE;").Error; err != nil {
return other.Error(
"storage",
fmt.Sprintf("Failed to remove old type %s (if it exists)", name),
err,
)
}
queryBuilder := strings.Builder{}
queryBuilder.WriteString("CREATE TYPE ")
queryBuilder.WriteString(name)
queryBuilder.WriteString(" AS ENUM (")
blen := len(values)
for i, btype := range values {
queryBuilder.WriteString("'" + string(btype) + "'")
// Append comma everywhere except last entry
if i+1 < blen {
queryBuilder.WriteString(",")
}
}
queryBuilder.WriteString(");")
if err := db.Exec(queryBuilder.String()).Error; err != nil {
return err
func preTypeMigrations(db *gorm.DB) error {
genTmp := dbgen.Use(db)
meta, err := genTmp.
ServerMetadata.Select(dbgen.ServerMetadata.LastMigrationVersion).
First()
if err != nil {
return nil
}
if meta.LastMigrationVersion == CurrentMigrationVersion {
return nil
}
log.Error().Msg("custom schema migrations not implemented yet")
return nil
}

View file

@ -16,3 +16,17 @@ type FailedOutboundRequest struct {
TargetServer *RemoteServer // The remote server being targeted. Included to determine if a request still has a chance of success
TargetServerId uint // Id of the target remote server
}
type IFailedOutboundRequest interface {
// KillServers marks all servers where a request has more than X failed attempts as dead
//
// WITH servers_to_kill AS (
// SELECT target_server_id
// FROM failed_outbound_requests
// WHERE nr_of_attempts > @maxAttempts
// )
// UPDATE remote_servers
// SET is_dead = true
// WHERE id IN (SELECT target_server_id FROM servers_to_kill)
KillServers(maxAttempts uint32) error
}

View file

@ -34,3 +34,4 @@ type Feed struct {
// Suffix added to feeds created as the default feed for a user
const FeedDefaultSuffix = "-default"
const GlobalFeedName = "global"

View file

@ -22,7 +22,7 @@ type MediaMetadata struct {
// OwnedBy User
OwnedById sql.NullString // Account id this media belongs to
Remote bool // whether the attachment is a remote one
// Where the media is stored. Url
// Where the media is stored. Url for remote files, the object name for local files
Location string
Type string // What media type this is following mime types, eg image/png
// Name of the file

View file

@ -85,6 +85,9 @@ type Role struct {
HasMentionCountLimit *bool // Local & remote
MentionLimit *uint32 // Local & remote
MaxIndividualFileSize *uint64 // Local
MaxTotalFileSize *uint64 // Local
// CanViewBoosts *bool
// CanViewQuotes *bool
// CanViewMedia *bool

View file

@ -42,6 +42,9 @@ var DefaultUserRole = Role{
uint32(math.MaxUint32),
), // Set this to max, even if not used due to *HasMentionCountLimit == false
MaxIndividualFileSize: other.IntoPointer(uint64(10 << 20)), // 10 MB
MaxTotalFileSize: other.IntoPointer(uint64(500 << 20)), // 500 MB
AutoNsfwMedia: other.IntoPointer(false),
AutoCwPosts: other.IntoPointer(false),
AutoCwPostsText: nil,

View file

@ -12,6 +12,7 @@ type ServerMetadata struct {
UpdatedAt time.Time
// ---- Section TLS LetsEncrypt
LEDomain string
LECertUrl string
LECertStableUrl string
@ -21,4 +22,7 @@ type ServerMetadata struct {
LECSR []byte // Encrypted
LELastUpdate sql.NullTime
LEUserPrivKey []byte // Encrypted
// ---- Section Database
LastMigrationVersion uint64
}

View file

@ -71,4 +71,17 @@ type IUserToUserRelation interface {
// r.user_id = @id AND
// r.relation = 'follow'
CountFollowingForId(id string) (int, error)
// Get the ids of all local accounts following the user with the target id
//
// SELECT
// r.user_id
// FROM
// user_to_user_relations r
// LEFT JOIN users u ON r.user_id = u.id
// LEFT JOIN remote_servers s ON u.server_id = s.id
// WHERE
// s.is_self = true
// AND r.target_user_id = @id;
GetLocalFollowerIdsOfId(id string) ([]string, error)
}

View file

@ -43,6 +43,9 @@ func InsertSelf() error {
if err = attachUserToRole(user); err != nil {
return other.Error("storage", "failed to save/update self user to full admin role", err)
}
if err = insertGlobalFeed(user); err != nil {
return other.Error("storage", "failed to ensure that the global feed exists", err)
}
return nil
}
@ -215,3 +218,26 @@ func attachUserToRole(user *models.User) error {
}
return nil
}
func insertGlobalFeed(serverActor *models.User) error {
globalFeed, err := dbgen.Feed.Where(dbgen.Feed.Name.Eq(models.GlobalFeedName)).First()
switch err {
case nil:
return nil
case gorm.ErrRecordNotFound:
globalFeed = &models.Feed{
Owner: *serverActor,
OwnerId: serverActor.ID,
IsDefault: true,
Name: models.GlobalFeedName,
PublicKey: sql.NullString{Valid: false},
}
err = dbgen.Feed.Create(globalFeed)
if err != nil {
return err
}
return nil
default:
return err
}
}

View file

@ -0,0 +1,123 @@
package storage
import (
"fmt"
"strings"
"git.mstar.dev/mstar/goutils/other"
"git.mstar.dev/mstar/goutils/sliceutils"
"gorm.io/gorm"
"git.mstar.dev/mstar/linstrom/storage-new/models"
)
// Returns the raw error created by gorm, with no wrapping
func migrateTypes(db *gorm.DB) error {
if err := db.AutoMigrate(models.AllTypes...); err != nil {
return err
}
return nil
}
// Ensure the being enum exists for the user
func createBeingType(db *gorm.DB) error {
return migrateEnum(
db,
"being_type",
sliceutils.Map(models.AllBeings, func(t models.BeingType) string { return string(t) }),
)
}
func createAccountRelationType(db *gorm.DB) error {
return migrateEnum(
db,
"relation_type",
sliceutils.Map(
models.AllRelations,
func(t models.RelationType) string { return string(t) },
),
)
}
func createAccountAuthMethodType(db *gorm.DB) error {
return migrateEnum(
db,
"auth_method_type",
sliceutils.Map(
models.AllAuthMethods,
func(t models.AuthenticationMethodType) string { return string(t) },
),
)
}
func createRemoteServerSoftwareType(db *gorm.DB) error {
return migrateEnum(
db,
"server_software_type",
sliceutils.Map(
models.AllServerSoftwareTypes,
func(t models.ServerSoftwareType) string { return string(t) },
),
)
}
func createActitiystreamsObjectType(db *gorm.DB) error {
return migrateEnum(
db,
"activitystreams_object_type",
sliceutils.Map(
models.AllActivitystreamsObjectTypes,
func(t models.ActivitystreamsObjectType) string { return string(t) },
),
)
}
func createActitiystreamsActivityType(db *gorm.DB) error {
return migrateEnum(
db,
"activitystreams_activity_type",
sliceutils.Map(
models.AllActivitystreamsActivityTypes,
func(t models.ActivitystreamsActivityType) string { return string(t) },
),
)
}
func createCollectionTarget(db *gorm.DB) error {
return migrateEnum(
db,
"collection_target_type",
sliceutils.Map(
models.AllCollectionTargetTypes,
func(t models.CollectionTargetType) string { return string(t) },
),
)
}
// Helper function for ensuring the existence of an enum with the given values
func migrateEnum(db *gorm.DB, name string, values []string) error {
if err := db.Exec("DROP TYPE IF EXISTS " + name + " CASCADE;").Error; err != nil {
return other.Error(
"storage",
fmt.Sprintf("Failed to remove old type %s (if it exists)", name),
err,
)
}
queryBuilder := strings.Builder{}
queryBuilder.WriteString("CREATE TYPE ")
queryBuilder.WriteString(name)
queryBuilder.WriteString(" AS ENUM (")
blen := len(values)
for i, btype := range values {
queryBuilder.WriteString("'" + string(btype) + "'")
// Append comma everywhere except last entry
if i+1 < blen {
queryBuilder.WriteString(",")
}
}
queryBuilder.WriteString(");")
if err := db.Exec(queryBuilder.String()).Error; err != nil {
return err
}
return nil
}

View file

@ -1,54 +0,0 @@
[general]
protocol = "https"
domain = "serveo.net"
subdomain = "31b5744cc8c043386fe5c4ec2fb414f6"
private_port = 8080
public_port = 443
[ssl]
handle_ssl = false
[admin]
username = "server-admin"
first_time_setup_otp = "Example otp password"
profiling_password = ""
allow_registration = true
auth_fetch_for_get = false
auth_fetch_for_non_get = true
[webauthn]
display_name = "Linstrom"
[storage]
host = "localhost"
username = "linstrom"
password = "linstrom"
db_name = "linstrom"
port = 5432
ssl_mode = "disable"
time_zone = "Europe/Berlin"
max_in_memory_cache_size = 1000000
max_in_memory_cache_ttl = 5
max_reconnect_attempts = 3
[mail]
host = "localhost"
port = 587
username = "linstrom"
password = "linstrom"
[self]
server_actor_display_name = "Server actor"
server_display_name = "Linstrom"
[s3]
key_id = "GK458f9d7315fc6c9686c41045"
secret = "b6d1c8ec97052b8a40ae953de34f336170d85554fbe7875acce0ff51464724ee"
region = "garage"
endpoint = "http://localhost:3900"
use_ssl = false
bucket_name = "linstrom-bucket"
[experimental]
use_ed25519_keys = false
auth_fetch_for_server_actor = false

72
web/debug/media.go Normal file
View file

@ -0,0 +1,72 @@
package webdebug
import (
"database/sql"
"net/http"
webutils "git.mstar.dev/mstar/goutils/http"
"git.mstar.dev/mstar/goutils/sliceutils"
"github.com/rs/zerolog/hlog"
"git.mstar.dev/mstar/linstrom/media"
"git.mstar.dev/mstar/linstrom/storage-new/dbgen"
"git.mstar.dev/mstar/linstrom/storage-new/models"
)
func uploadMedia(w http.ResponseWriter, r *http.Request) {
log := hlog.FromRequest(r)
_ = r.ParseMultipartForm(10 << 20) // 10MB
userId := r.FormValue("user-id")
blurred := r.FormValue("blurred") != ""
altText := r.FormValue("alt-text")
file, handler, err := r.FormFile("file")
if err != nil {
log.Warn().Err(err).Msg("Failed to get file from form")
_ = webutils.ProblemDetailsStatusOnly(w, http.StatusBadRequest)
return
}
defer func() { _ = file.Close() }()
_, err = media.GlobalServer.AddFile(file, handler.Filename, userId, blurred, altText)
if err != nil {
log.Error().Err(err).Msg("Failed to upload file to storage")
_ = webutils.ProblemDetailsStatusOnly(w, http.StatusInternalServerError)
}
}
func forceMediaSync(w http.ResponseWriter, r *http.Request) {
go media.GlobalServer.ServiceEnsureFileSynchronisation()
}
func getOwnedFiles(w http.ResponseWriter, r *http.Request) {
type File struct {
Name, Id, Mime string
}
type Outbound struct {
Files []File
}
log := hlog.FromRequest(r)
userId := r.FormValue("id")
mm := dbgen.MediaMetadata
files, err := mm.Where(mm.OwnedById.Eq(sql.NullString{Valid: true, String: userId})).
Select(mm.Name, mm.Type, mm.ID).
Find()
if err != nil {
log.Error().Err(err).Str("user-id", userId).Msg("Failed to get files of user")
_ = webutils.ProblemDetailsStatusOnly(w, http.StatusInternalServerError)
return
}
err = webutils.SendJson(w, Outbound{
Files: sliceutils.Map(files, func(t *models.MediaMetadata) File {
return File{
Name: t.Name,
Id: t.ID,
Mime: t.Type,
}
}),
})
if err != nil {
log.Error().Err(err).Msg("Failed to marshal response to json")
}
}

View file

@ -6,6 +6,7 @@ import (
"errors"
"io"
"net/http"
"slices"
webutils "git.mstar.dev/mstar/goutils/http"
"git.mstar.dev/mstar/goutils/sliceutils"
@ -23,10 +24,11 @@ import (
func postAs(w http.ResponseWriter, r *http.Request) {
type Inbound struct {
Username string `json:"username"`
Content string `json:"content"`
ContentWarning *string `json:"content_warning"`
ReplyTo *string `json:"reply_to"`
Username string `json:"username"`
Content string `json:"content"`
ContentWarning *string `json:"content_warning"`
ReplyTo *string `json:"reply_to"`
Mentions []string `json:"mentions"`
}
log := hlog.FromRequest(r)
dec := json.NewDecoder(r.Body)
@ -86,7 +88,9 @@ func postAs(w http.ResponseWriter, r *http.Request) {
return
}
}
mentions := webshared.MentionsFromContent(data.Content)
mentions := sliceutils.RemoveDuplicate(
slices.Concat(webshared.MentionsFromContent(data.Content), data.Mentions),
)
dbPings := []*models.NoteToPing{}
for _, mention := range mentions {
accId, err := activitypub.ImportRemoteAccountByHandle(mention)
@ -122,6 +126,11 @@ func postAs(w http.ResponseWriter, r *http.Request) {
return
}
err = tx.NoteToPing.Create(dbPings...)
if err != nil {
log.Error().Err(err).Msg("Failed to create note pings in db")
_ = webutils.ProblemDetailsStatusOnly(w, http.StatusInternalServerError)
return
}
activity := models.Activity{
Id: shared.NewId(),
Type: string(models.ActivityCreate),

View file

@ -24,19 +24,29 @@ type Server struct {
func New(addr string) *Server {
handler := http.NewServeMux()
handler.HandleFunc("GET /non-deleted", getNonDeletedUsers)
handler.HandleFunc("POST /local-user", createLocalUser)
handler.HandleFunc("GET /delete", deleteUser)
handler.HandleFunc("POST /post-as", postAs)
handler.HandleFunc("GET /notes-for", notesFrom)
handler.HandleFunc("GET /import-user", issueUserImport)
handler.HandleFunc("GET /keys-for", returnKeypair)
handler.HandleFunc("GET /import-server", importServerInfo)
handler.HandleFunc("GET /import-user", issueUserImport)
handler.HandleFunc("GET /request-follow", requestFollow)
handler.HandleFunc("POST /send-as", proxyMessageToTarget)
handler.HandleFunc("POST /follow", requestFollow)
handler.HandleFunc("POST /update-user", updateUser)
handler.HandleFunc("GET /non-deleted", getNonDeletedUsers)
handler.HandleFunc("GET /delete", deleteUser)
handler.HandleFunc("GET /notes-for", notesFrom)
handler.HandleFunc("GET /import-server", importServerInfo)
handler.HandleFunc("GET /replies-to/{id}", inReplyTo)
handler.HandleFunc("POST /fetch", requestAs)
handler.HandleFunc("POST /follow", requestFollow)
handler.HandleFunc("POST /like-note", likeNote)
handler.HandleFunc("POST /boost-note", boostNote)
handler.HandleFunc("GET /files-owned-by", getOwnedFiles)
handler.HandleFunc("POST /upload-file", uploadMedia)
handler.HandleFunc("/force-media-sync", forceMediaSync)
web := http.Server{
Addr: addr,
Handler: webutils.ChainMiddlewares(

View file

@ -448,3 +448,53 @@ func requestAs(w http.ResponseWriter, r *http.Request) {
body, _ := io.ReadAll(res.Body)
_, _ = fmt.Fprint(w, string(body))
}
func updateUser(w http.ResponseWriter, r *http.Request) {
type Inbound struct {
UserId string
Displayname *string
Description *string
RestrictedFollow *bool
}
log := hlog.FromRequest(r)
var data Inbound
err := json.NewDecoder(r.Body).Decode(&data)
if err != nil {
_ = webutils.ProblemDetailsStatusOnly(w, http.StatusBadRequest)
return
}
queryStart := dbgen.User.Where(dbgen.User.ID.Eq(data.UserId))
user, err := queryStart.First()
switch err {
case gorm.ErrRecordNotFound:
_ = webutils.ProblemDetailsStatusOnly(w, http.StatusNotFound)
return
case nil:
default:
log.Error().Err(err).Msg("Db error while trying to fetch user for updating")
_ = webutils.ProblemDetailsStatusOnly(w, http.StatusInternalServerError)
return
}
updateNeeded := false
if data.Displayname != nil {
user.DisplayName = *data.Displayname
updateNeeded = true
}
if data.Description != nil {
user.Description = *data.Description
updateNeeded = true
}
if data.RestrictedFollow != nil {
user.RestrictedFollow = *data.RestrictedFollow
updateNeeded = true
}
if !updateNeeded {
return
}
err = queryStart.Save(user)
if err != nil {
log.Error().Err(err).Msg("Failed to update user with new data")
_ = webutils.ProblemDetailsStatusOnly(w, http.StatusInternalServerError)
return
}
}

View file

@ -1,23 +1 @@
package activitypub
import (
"context"
"time"
)
// Announce is boost
type activityAnnounceOut struct {
Context any `json:"@context,omitempty"`
Id string
Type string // Always "Announce"
Actor string // The one doing the boost
Published time.Time
To []string
CC []string
Object string // Link to object being boosted
}
func announceFromStorage(ctx context.Context, id string) (*activityAnnounceOut, error) {
panic("not implemented")
}

View file

@ -1 +1,5 @@
package activitypub
import "net/http"
func activityLike(w http.ResponseWriter, r *http.Request) {}

View file

@ -8,12 +8,14 @@ import (
"io"
"net/http"
"regexp"
"slices"
"strconv"
"strings"
"time"
webutils "git.mstar.dev/mstar/goutils/http"
"git.mstar.dev/mstar/goutils/other"
"git.mstar.dev/mstar/goutils/sliceutils"
"github.com/mitchellh/mapstructure"
"github.com/rs/zerolog/hlog"
"github.com/rs/zerolog/log"
@ -139,6 +141,108 @@ func userInbox(w http.ResponseWriter, r *http.Request) {
}
}
func globalInbox(w http.ResponseWriter, r *http.Request) {
log := hlog.FromRequest(r)
body, err := io.ReadAll(r.Body)
log.Trace().
Err(err).
Bytes("body", body).
Any("headers", r.Header).
Msg("Global inbox message")
data := map[string]any{}
err = json.Unmarshal(body, &data)
if err != nil {
_ = webutils.ProblemDetails(
w,
http.StatusBadRequest,
"/errors/bad-request-data",
"Bad activity data",
other.IntoPointer("Body to inbox needs to be json"),
nil,
)
return
}
if _, ok := data["@context"]; !ok {
_ = webutils.ProblemDetails(
w,
http.StatusBadRequest,
"/errors/bad-request-data",
"Bad activity data",
other.IntoPointer("Request data needs to contain context"),
nil,
)
return
}
objectType, ok := data["type"].(string)
if !ok {
_ = webutils.ProblemDetails(
w,
http.StatusBadRequest,
"/errors/bad-request-data",
"Bad activity data",
other.IntoPointer(`Request data needs to contain a field "type" with a string value`),
nil,
)
return
}
_, ok = data["id"].(string)
if !ok {
_ = webutils.ProblemDetails(
w,
http.StatusBadRequest,
"/errors/bad-request-data",
"Bad activity data",
other.IntoPointer(`Request data needs to contain a field "id" with a string value`),
nil,
)
return
}
log.Debug().Str("object-type", objectType).Msg("Inbox message")
log.Trace().Bytes("body", body).Msg("Inbox raw message")
// TODO: Decide how to handle the handler failing for whatever reason
// Add object to unhandled message table and try again later?
// Discard it? And how would a handler return that it failed?
ok = true
switch objectType {
case "Like":
ok = handleLike(w, r, data)
case "Undo":
ok = handleUndo(w, r, data)
case "Accept":
ok = handleAccept(w, r, data)
case "Reject":
ok = handleReject(w, r, data)
case "Create":
ok = handleCreate(w, r, data)
default:
log.Warn().
Str("object-type", objectType).
Msg("Unknown message type, storing for later processing")
err = dbgen.UnhandledMessage.Create(&models.UnhandledMessage{
ForUserId: "",
GlobalInbox: true,
RawData: body,
})
if err != nil {
log.Error().Err(err).Msg("Failed to store unhandled message for later")
}
_ = webutils.ProblemDetailsStatusOnly(w, 500)
}
if !ok {
err = dbgen.UnhandledMessage.Create(&models.UnhandledMessage{
RawData: body,
ForUserId: "",
GlobalInbox: true,
})
if err != nil {
log.Error().
Err(err).
Bytes("body", body).
Msg("Failed to store failed global inbound message for later processing")
}
}
}
func handleLike(w http.ResponseWriter, r *http.Request, object map[string]any) bool {
log := hlog.FromRequest(r)
activityId := object["id"].(string)
@ -213,7 +317,7 @@ func handleLike(w http.ResponseWriter, r *http.Request, object map[string]any) b
Reactor: *liker,
ReactorId: liker.ID,
Emote: nil,
EmoteId: sql.NullInt64{Valid: false},
EmoteId: sql.NullString{Valid: false},
}
tx := dbgen.Q.Begin()
@ -780,13 +884,94 @@ func handleCreate(w http.ResponseWriter, r *http.Request, object map[string]any)
dbNote.RepliesTo = sql.NullString{Valid: true, String: replyUrl}
}
}
feed, err := dbgen.Feed.Where(dbgen.Feed.OwnerId.Eq(targetUserId), dbgen.Feed.IsDefault.Is(true)).
First()
if err != nil {
log.Error().Err(err).Msg("Failed to get feed for targeted user inbox")
_ = webutils.ProblemDetailsStatusOnly(w, http.StatusInternalServerError)
return false
totalNoteTargets := slices.Concat(objectNote.To, objectNote.CC)
// TODO: Find all feeds this note needs to be added to
// Depends on note targets (cc & to tags)
// Includes specific user IDs, nothing else -> DMs, only the personal feeds of those users
// Includes https://www.w3.org/ns/activitystreams#Public (& creator follower list) ->
// Public feed (unlisted has same combo, but iirc doesn't get sent out by server)
// Includes only creator follower list -> Follower only -> only feeds that explicitly follow creator
u2u := dbgen.UserToUserRelation
targetFeeds := []models.Feed{}
if sliceutils.Contains(totalNoteTargets, "https://www.w3.org/ns/activitystreams#Public") {
// Public post, add to global and following feeds
dbNote.AccessLevel = models.NOTE_TARGET_PUBLIC
followerIds, err := u2u.GetLocalFollowerIdsOfId(actingUser.ID)
if err != nil {
log.Error().
Err(err).
Str("follow-target", actingUser.ID).
Msg("Failed to get ids for followers")
return false
}
userFeeds, err := dbgen.Feed.Where(dbgen.Feed.OwnerId.In(followerIds...)).Find()
if err != nil {
log.Error().
Err(err).
Str("follow-target", actingUser.ID).
Strs("follower-ids", followerIds).
Msg("Failed to get feeds for followers")
return false
}
globalFeed, err := dbgen.Feed.Where(dbgen.Feed.Name.Eq(models.GlobalFeedName)).First()
if err != nil {
log.Error().
Err(err).
Msg("Failed to get global feed")
return false
}
targetFeeds = slices.Concat(
targetFeeds,
sliceutils.Map(userFeeds, func(t *models.Feed) models.Feed { return *t }),
)
targetFeeds = append(targetFeeds, *globalFeed)
} else {
if sliceutils.ContainsFunc(totalNoteTargets, func(x string) bool {
return strings.HasPrefix(x, actingUser.ID)
}) {
// Contains an url starting with the author's id, so assume that it's for followers
dbNote.AccessLevel = models.NOTE_TARGET_FOLLOWERS
followerIds, err := u2u.GetLocalFollowerIdsOfId(actingUser.ID)
if err != nil {
log.Error().
Err(err).
Str("follow-target", actingUser.ID).
Msg("Failed to get ids for followers")
return false
}
userFeeds, err := dbgen.Feed.Where(dbgen.Feed.OwnerId.In(followerIds...)).Find()
if err != nil {
log.Error().
Err(err).
Str("follow-target", actingUser.ID).
Strs("follower-ids", followerIds).
Msg("Failed to get feeds for followers")
return false
}
targetFeeds = sliceutils.Map(userFeeds, func(t *models.Feed) models.Feed { return *t })
} else {
// Neither followers collection url nor public marker, private message
dbNote.AccessLevel = models.NOTE_TARGET_DM
userFeeds, err := dbgen.Feed.
LeftJoin(dbgen.User, dbgen.User.ID.EqCol(dbgen.Feed.OwnerId)).
LeftJoin(dbgen.RemoteServer, dbgen.RemoteServer.ID.EqCol(dbgen.User.ServerId)).
LeftJoin(dbgen.UserRemoteLinks, dbgen.UserRemoteLinks.ID.EqCol(dbgen.User.RemoteInfoId)).
Where(dbgen.RemoteServer.IsSelf.Is(true)).Where(
dbgen.User.ID.In(totalNoteTargets...),
).Or(dbgen.UserRemoteLinks.ApLink.In(totalNoteTargets...)).Find()
if err != nil {
log.Error().
Err(err).
Str("follow-target", actingUser.ID).
Strs("targeted-ids", totalNoteTargets).
Msg("Failed to get feeds for directly messaged users")
return false
}
targetFeeds = sliceutils.Map(userFeeds, func(t *models.Feed) models.Feed { return *t })
}
}
tx := dbgen.Q.Begin()
err = tx.Note.Create(&dbNote)
if err != nil {
@ -801,6 +986,7 @@ func handleCreate(w http.ResponseWriter, r *http.Request, object map[string]any)
ObjectId: dbNote.ID,
ObjectType: uint32(models.ActivitystreamsActivityTargetNote),
}
err = tx.Activity.Create(&createActivity)
if err != nil {
_ = tx.Rollback()
@ -808,17 +994,25 @@ func handleCreate(w http.ResponseWriter, r *http.Request, object map[string]any)
_ = webutils.ProblemDetailsStatusOnly(w, http.StatusInternalServerError)
return false
}
err = tx.NoteToFeed.Create(&models.NoteToFeed{
Reason: string(models.FeedAppearanceReasonFollowUser),
NoteId: dbNote.ID,
FeedId: uint64(feed.ID),
feedRels := sliceutils.Map(targetFeeds, func(f models.Feed) *models.NoteToFeed {
return &models.NoteToFeed{
Reason: string(models.FeedAppearanceReasonFollowUser),
NoteId: dbNote.ID,
FeedId: uint64(f.ID),
}
})
err = tx.NoteToFeed.Create(feedRels...)
if err != nil {
_ = tx.Rollback()
log.Error().Err(err).Any("note", dbNote).Msg("Failed to create note to feed relation in db")
log.Error().
Err(err).
Any("note", dbNote).
Msg("Failed to create note to feed relations in db")
_ = webutils.ProblemDetailsStatusOnly(w, http.StatusInternalServerError)
return false
}
err = tx.Commit()
if err != nil {
log.Error().Err(err).Any("note", dbNote).Msg("Failed to submit note creation in db")

View file

@ -113,8 +113,7 @@ func undoLike(w http.ResponseWriter, r *http.Request, object map[string]any, tar
_ = webutils.ProblemDetailsStatusOnly(w, http.StatusInternalServerError)
return false
}
reactionId := uint(other.Must(strconv.ParseUint(act.ObjectId, 10, 64)))
reaction, err := dbgen.Reaction.Where(dbgen.Reaction.ID.Eq(reactionId)).First()
reaction, err := dbgen.Reaction.Where(dbgen.Reaction.ID.Eq(act.ObjectId)).First()
switch err {
case gorm.ErrRecordNotFound:
return true
@ -140,7 +139,7 @@ func undoLike(w http.ResponseWriter, r *http.Request, object map[string]any, tar
_ = tx.Rollback()
log.Error().
Err(err).
Uint("reaction-id", reaction.ID).
Str("reaction-id", reaction.ID).
Msg("Failed to delete reaction on undo")
_ = webutils.ProblemDetailsStatusOnly(w, http.StatusInternalServerError)
return false

View file

@ -0,0 +1,5 @@
package activitypub
import "net/http"
func objectEmote(w http.ResponseWriter, r *http.Request) {}

View file

@ -16,7 +16,10 @@ func BuildActivitypubRouter() http.Handler {
router.HandleFunc("GET /activity/reject/{id}", activityReject)
router.HandleFunc("GET /activity/update/{id}", activityUpdate)
router.HandleFunc("GET /activity/follow/{id}", activityFollow)
router.HandleFunc("GET /activity/like/{id}", activityLike)
router.HandleFunc("GET /note/{id}", objectNote)
router.HandleFunc("GET /note/{id}/reactions", noteReactions)
router.HandleFunc("GET /emote/{id}", objectEmote)
router.HandleFunc("POST /inbox", globalInbox)
return router
}

44
web/public/media.go Normal file
View file

@ -0,0 +1,44 @@
package webpublic
import (
"io"
"net/http"
webutils "git.mstar.dev/mstar/goutils/http"
"github.com/rs/zerolog/hlog"
"git.mstar.dev/mstar/linstrom/media"
"git.mstar.dev/mstar/linstrom/storage-new/dbgen"
)
func downloadMediaHander(w http.ResponseWriter, r *http.Request) {
mediaId := r.PathValue("id")
log := hlog.FromRequest(r)
mm := dbgen.MediaMetadata
fileReader, err := media.GlobalServer.ReadFileId(mediaId)
switch err {
case nil:
case media.ErrFileNotFound:
_ = webutils.ProblemDetailsStatusOnly(w, http.StatusNotFound)
return
default:
log.Error().Err(err).Str("file-id", mediaId).Msg("Failed to get file")
_ = webutils.ProblemDetailsStatusOnly(w, http.StatusInternalServerError)
return
}
defer func() { _ = fileReader.Close() }()
meta, err := mm.Where(mm.ID.Eq(mediaId)).Select(mm.Type).First()
if err != nil {
log.Error().
Err(err).
Str("file-id", mediaId).
Msg("Failed to get file metadata after already getting file")
_ = webutils.ProblemDetailsStatusOnly(w, http.StatusInternalServerError)
return
}
w.Header().Add("Content-Type", meta.Type)
_, err = io.Copy(w, fileReader)
if err != nil {
log.Error().Err(err).Msg("Failed to copy file to request")
}
}

View file

@ -56,6 +56,7 @@ func New(addr string, duckFs fs.FS) *Server {
handler.HandleFunc("GET /errors/{name}", errorTypeHandler)
handler.HandleFunc("GET /default-image", buildServeDefaultImage(duckFs))
handler.HandleFunc("GET /default-image.webp", buildServeDefaultImage(duckFs))
handler.HandleFunc("GET /media/{id}", downloadMediaHander)
rootHandler := webutils.ChainMiddlewares(
handler,
webutils.BuildLoggingMiddleware(

View file

@ -236,7 +236,7 @@ func postPostRequest(resp *http.Response, reqErr error, dbId uint64) {
// If response status is ok (< 400) delete entry in db to not process it again
if resp.StatusCode < 400 {
update = false
fr.Where(fr.Id.Eq(dbId)).Delete()
_, _ = fr.Where(fr.Id.Eq(dbId)).Delete()
return
}
if resp.StatusCode == 429 {