treewide: replace uint32 uids with opaque strings

Add a new models.UID type (an alias to string). Replace all occurrences
of uint32 being used as message UID or thread UID with models.UID.

Update all workers to only expose models.UID values and deal with the
conversion internally. Only IMAP needs to convert these to uint32. All
other backends already use plain strings as message identifiers, in
which case no conversion is even needed.

The directory tree implementation needed to be heavily refactored in
order to accommodate thread UID not being usable as a list index.

Signed-off-by: Robin Jarry <robin@jarry.cc>
Tested-by: Inwit <inwit@sindominio.net>
Tested-by: Tim Culverhouse <tim@timculverhouse.com>
This commit is contained in:
Robin Jarry
2024-08-14 16:59:11 +02:00
parent 2950d919a5
commit 73dc39c6ee
64 changed files with 543 additions and 651 deletions

View File

@@ -240,7 +240,7 @@ func (acct *AccountView) SelectedMessage() (*models.MessageInfo, error) {
return msg, nil
}
func (acct *AccountView) MarkedMessages() ([]uint32, error) {
func (acct *AccountView) MarkedMessages() ([]models.UID, error) {
if store := acct.Store(); store != nil {
return store.Marker().Marked(), nil
}
@@ -495,7 +495,7 @@ func (acct *AccountView) onMessage(msg types.WorkerMessage) {
acct.setTitle()
}
func (acct *AccountView) updateDirCounts(destination string, uids []uint32) {
func (acct *AccountView) updateDirCounts(destination string, uids []models.UID) {
// Only update the destination destDir if it is initialized
if destDir := acct.dirlist.Directory(destination); destDir != nil {
var recent, unseen int

View File

@@ -3,7 +3,6 @@ package app
import (
"fmt"
"sort"
"strconv"
"strings"
"time"
@@ -23,8 +22,6 @@ type DirectoryTree struct {
listIdx int
list []*types.Thread
treeDirs []string
virtual bool
virtualCb func()
}
@@ -33,7 +30,6 @@ func NewDirectoryTree(dirlist *DirectoryList) DirectoryLister {
dt := &DirectoryTree{
DirectoryList: dirlist,
listIdx: -1,
list: make([]*types.Thread, 0),
virtualCb: func() {},
}
return dt
@@ -48,13 +44,12 @@ func (dt *DirectoryTree) Selected() string {
return dt.DirectoryList.Selected()
}
node := dt.list[dt.listIdx]
sep := dt.DirectoryList.worker.PathSeparator()
elems := strings.Split(dt.treeDirs[getAnyUid(node)], sep)
elems := dt.nodeElems(node)
n := countLevels(node)
if n < 0 || n >= len(elems) {
return ""
}
return strings.Join(elems[:(n+1)], sep)
return strings.Join(elems[:(n+1)], dt.DirectoryList.worker.PathSeparator())
}
func (dt *DirectoryTree) SelectedDirectory() *models.Directory {
@@ -211,27 +206,24 @@ func (dt *DirectoryTree) SelectedMsgStore() (*lib.MessageStore, bool) {
if dt.virtual {
return nil, false
}
if findString(dt.treeDirs, dt.selected) < 0 {
selected := models.UID(dt.selected)
if _, node := dt.getTreeNode(selected); node == nil {
dt.buildTree()
if idx := findString(dt.treeDirs, dt.selected); idx >= 0 {
selIdx, node := dt.getTreeNode(uint32(idx))
if node != nil {
makeVisible(node)
dt.listIdx = selIdx
}
selIdx, node := dt.getTreeNode(selected)
if node != nil {
makeVisible(node)
dt.listIdx = selIdx
}
}
return dt.DirectoryList.SelectedMsgStore()
}
func (dt *DirectoryTree) reindex(name string) {
idx := findString(dt.treeDirs, name)
if idx >= 0 {
selIdx, node := dt.getTreeNode(uint32(idx))
if node != nil {
makeVisible(node)
dt.listIdx = selIdx
}
selIdx, node := dt.getTreeNode(models.UID(name))
if node != nil {
makeVisible(node)
dt.listIdx = selIdx
}
}
@@ -247,7 +239,8 @@ func (dt *DirectoryTree) Open(name string, query string, delay time.Duration, cb
return
}
again := false
if findString(dt.dirs, name) < 0 {
uid := models.UID(name)
if _, node := dt.getTreeNode(uid); node == nil {
again = true
} else {
dt.reindex(name)
@@ -300,13 +293,14 @@ func (dt *DirectoryTree) NextPrev(delta int) {
func (dt *DirectoryTree) selectIndex(i int) {
dt.listIdx = i
if path := dt.getDirectory(dt.list[dt.listIdx]); path != "" {
dt.virtual = false
dt.Select(path)
} else {
node := dt.list[dt.listIdx]
if node.Dummy {
dt.virtual = true
dt.NewContext()
dt.virtualCb()
} else {
dt.virtual = false
dt.Select(dt.getDirectory(node))
}
}
@@ -345,37 +339,43 @@ func (dt *DirectoryTree) countVisible(list []*types.Thread) (n int) {
return
}
func (dt *DirectoryTree) displayText(node *types.Thread) string {
elems := strings.Split(dt.treeDirs[getAnyUid(node)], dt.DirectoryList.worker.PathSeparator())
return fmt.Sprintf("%s%s%s",
threadPrefix(node, false, false),
getFlag(node), elems[countLevels(node)])
func (dt *DirectoryTree) nodeElems(node *types.Thread) []string {
dir := string(node.Uid)
sep := dt.DirectoryList.worker.PathSeparator()
return strings.Split(dir, sep)
}
func (dt *DirectoryTree) getDirectory(node *types.Thread) string {
if uid := node.Uid; int(uid) < len(dt.treeDirs) {
return dt.treeDirs[uid]
func (dt *DirectoryTree) nodeName(node *types.Thread) string {
if elems := dt.nodeElems(node); len(elems) > 0 {
return elems[len(elems)-1]
}
return ""
}
func (dt *DirectoryTree) getTreeNode(uid uint32) (int, *types.Thread) {
var found *types.Thread
var idx int
func (dt *DirectoryTree) displayText(node *types.Thread) string {
return fmt.Sprintf("%s%s%s",
threadPrefix(node, false, false),
getFlag(node), dt.nodeName(node))
}
func (dt *DirectoryTree) getDirectory(node *types.Thread) string {
return string(node.Uid)
}
func (dt *DirectoryTree) getTreeNode(uid models.UID) (int, *types.Thread) {
for i, node := range dt.list {
if node.Uid == uid {
found = node
idx = i
return i, node
}
}
return idx, found
return -1, nil
}
func (dt *DirectoryTree) hiddenDirectories() map[string]bool {
hidden := make(map[string]bool, 0)
for _, node := range dt.list {
if node.Hidden != 0 && node.FirstChild != nil {
elems := strings.Split(dt.treeDirs[getAnyUid(node)], dt.DirectoryList.worker.PathSeparator())
elems := dt.nodeElems(node)
if levels := countLevels(node); levels < len(elems) {
if node.FirstChild != nil && (levels+1) < len(elems) {
levels += 1
@@ -390,8 +390,9 @@ func (dt *DirectoryTree) hiddenDirectories() map[string]bool {
}
func (dt *DirectoryTree) setHiddenDirectories(hiddenDirs map[string]bool) {
log.Tracef("setHiddenDirectories: %#v", hiddenDirs)
for _, node := range dt.list {
elems := strings.Split(dt.treeDirs[getAnyUid(node)], dt.DirectoryList.worker.PathSeparator())
elems := dt.nodeElems(node)
if levels := countLevels(node); levels < len(elems) {
if node.FirstChild != nil && (levels+1) < len(elems) {
levels += 1
@@ -399,6 +400,7 @@ func (dt *DirectoryTree) setHiddenDirectories(hiddenDirs map[string]bool) {
strDir := strings.Join(elems[:levels], dt.DirectoryList.worker.PathSeparator())
if hidden, ok := hiddenDirs[strDir]; hidden && ok {
node.Hidden = 1
log.Tracef("setHiddenDirectories: %q -> %#v", strDir, node)
}
}
}
@@ -407,29 +409,15 @@ func (dt *DirectoryTree) setHiddenDirectories(hiddenDirs map[string]bool) {
func (dt *DirectoryTree) buildTree() {
if len(dt.list) != 0 {
hiddenDirs := dt.hiddenDirectories()
defer func() {
dt.setHiddenDirectories(hiddenDirs)
}()
defer dt.setHiddenDirectories(hiddenDirs)
}
sTree := make([][]string, 0)
for i, dir := range dt.dirs {
elems := strings.Split(dir, dt.DirectoryList.worker.PathSeparator())
if len(elems) == 0 {
continue
}
elems = append(elems, fmt.Sprintf("%d", i))
sTree = append(sTree, elems)
}
dt.treeDirs = make([]string, len(dt.dirs))
copy(dt.treeDirs, dt.dirs)
root := &types.Thread{Uid: 0}
dt.buildTreeNode(root, sTree, 0xFFFFFF, 1)
threads := make([]*types.Thread, 0)
dirs := make([]string, len(dt.dirs))
copy(dirs, dt.dirs)
root := &types.Thread{}
dt.buildTreeNode(root, dirs, 1)
var threads []*types.Thread
for iter := root.FirstChild; iter != nil; iter = iter.NextSibling {
iter.Parent = nil
threads = append(threads, iter)
@@ -437,16 +425,10 @@ func (dt *DirectoryTree) buildTree() {
// folders-sort
if dt.DirectoryList.acctConf.EnableFoldersSort {
toStr := func(t *types.Thread) string {
if elems := strings.Split(dt.treeDirs[getAnyUid(t)], dt.DirectoryList.worker.PathSeparator()); len(elems) > 0 {
return elems[0]
}
return ""
}
sort.Slice(threads, func(i, j int) bool {
foldersSort := dt.DirectoryList.acctConf.FoldersSort
iInFoldersSort := findString(foldersSort, toStr(threads[i]))
jInFoldersSort := findString(foldersSort, toStr(threads[j]))
iInFoldersSort := findString(foldersSort, dt.getDirectory(threads[i]))
jInFoldersSort := findString(foldersSort, dt.getDirectory(threads[j]))
if iInFoldersSort >= 0 && jInFoldersSort >= 0 {
return iInFoldersSort < jInFoldersSort
}
@@ -456,7 +438,7 @@ func (dt *DirectoryTree) buildTree() {
if jInFoldersSort >= 0 {
return false
}
return toStr(threads[i]) < toStr(threads[j])
return dt.getDirectory(threads[i]) < dt.getDirectory(threads[j])
})
}
@@ -472,43 +454,53 @@ func (dt *DirectoryTree) buildTree() {
}
}
func (dt *DirectoryTree) buildTreeNode(node *types.Thread, stree [][]string, defaultUid uint32, depth int) {
m := make(map[string][][]string)
for _, branch := range stree {
if len(branch) > 1 {
next := append(m[branch[0]], branch[1:]) //nolint:gocritic // intentional append to different slice
m[branch[0]] = next
}
}
keys := make([]string, 0)
for key := range m {
keys = append(keys, key)
}
sort.Strings(keys)
path := dt.getDirectory(node)
for _, key := range keys {
next := m[key]
var uid uint32 = defaultUid
for _, testStr := range next {
if len(testStr) == 1 {
if uidI, err := strconv.Atoi(next[0][0]); err == nil {
uid = uint32(uidI)
}
func (dt *DirectoryTree) buildTreeNode(node *types.Thread, dirs []string, depth int) {
dirmap := make(map[string][]string)
for _, dir := range dirs {
base, dir, cut := strings.Cut(
dir, dt.DirectoryList.worker.PathSeparator())
if _, found := dirmap[base]; found {
if cut {
dirmap[base] = append(dirmap[base], dir)
}
} else if cut {
dirmap[base] = append(dirmap[base], dir)
} else {
dirmap[base] = []string{}
}
nextNode := &types.Thread{Uid: uid}
}
bases := make([]string, 0, len(dirmap))
for base, dirs := range dirmap {
bases = append(bases, base)
sort.Strings(dirs)
}
sort.Strings(bases)
basePath := dt.getDirectory(node)
if depth > dt.UiConfig(basePath).DirListCollapse {
node.Hidden = 1
} else {
node.Hidden = 0
}
for _, base := range bases {
path := dt.childPath(basePath, base)
nextNode := &types.Thread{Uid: models.UID(path)}
nextNode.Dummy = findString(dt.dirs, path) == -1
node.AddChild(nextNode)
if dt.UiConfig(path).DirListCollapse != 0 && dt.listIdx < 0 {
if depth > dt.UiConfig(path).DirListCollapse {
node.Hidden = 1
} else {
node.Hidden = 0
}
}
dt.buildTreeNode(nextNode, next, defaultUid, depth+1)
dt.buildTreeNode(nextNode, dirmap[base], depth+1)
}
}
func (dt *DirectoryTree) childPath(base, relpath string) string {
if base == "" {
return relpath
}
return base + dt.DirectoryList.worker.PathSeparator() + relpath
}
func makeVisible(node *types.Thread) {
if node == nil {
return
@@ -519,27 +511,12 @@ func makeVisible(node *types.Thread) {
}
func isVisible(node *types.Thread) bool {
isVisible := true
for iter := node.Parent; iter != nil; iter = iter.Parent {
if iter.Hidden != 0 {
isVisible = false
break
return false
}
}
return isVisible
}
func getAnyUid(node *types.Thread) (uid uint32) {
err := node.Walk(func(t *types.Thread, l int, err error) error {
if t.FirstChild == nil {
uid = t.Uid
}
return nil
})
if err != nil {
log.Warnf("failed to get uid: %v", err)
}
return
return true
}
func countLevels(node *types.Thread) (level int) {
@@ -550,7 +527,7 @@ func countLevels(node *types.Thread) (level int) {
}
func getFlag(node *types.Thread) string {
if node == nil && node.FirstChild == nil {
if node == nil || node.FirstChild == nil {
return ""
}
if node.Hidden != 0 {

View File

@@ -44,7 +44,7 @@ func (ml *MessageList) Invalidate() {
}
type messageRowParams struct {
uid uint32
uid models.UID
needsHeaders bool
err error
uiConfig *config.UIConfig
@@ -61,7 +61,7 @@ func (ml *MessageList) AlignMessage(pos AlignPosition) {
idx := 0
iter := store.UidsIterator()
for i := 0; iter.Next(); i++ {
if store.SelectedUid() == iter.Value().(uint32) {
if store.SelectedUid() == iter.Value().(models.UID) {
idx = i
break
}
@@ -92,7 +92,7 @@ func (ml *MessageList) Draw(ctx *ui.Context) {
ml.UpdateScroller(ml.height, len(store.Uids()))
iter := store.UidsIterator()
for i := 0; iter.Next(); i++ {
if store.SelectedUid() == iter.Value().(uint32) {
if store.SelectedUid() == iter.Value().(models.UID) {
ml.EnsureScroll(i)
break
}
@@ -108,7 +108,7 @@ func (ml *MessageList) Draw(ctx *ui.Context) {
return
}
var needsHeaders []uint32
var needsHeaders []models.UID
data := state.NewDataSetter()
data.SetAccount(acct.acct)
@@ -166,7 +166,7 @@ func (ml *MessageList) Draw(ctx *ui.Context) {
if i < ml.Scroll() {
continue
}
uid := iter.Value().(uint32)
uid := iter.Value().(models.UID)
if showThreads {
threadView.Update(data, uid)
}
@@ -201,7 +201,7 @@ func (ml *MessageList) Draw(ctx *ui.Context) {
}
func addMessage(
store *lib.MessageStore, uid uint32,
store *lib.MessageStore, uid models.UID,
table *ui.Table, data state.DataSetter,
uiConfig *config.UIConfig,
) bool {
@@ -406,14 +406,14 @@ func (ml *MessageList) Select(index int) {
iter := store.UidsIterator()
var uid uint32
var uid models.UID
if index < 0 {
uid = uids[iter.EndIndex()]
} else {
uid = uids[iter.StartIndex()]
for i := 0; iter.Next(); i++ {
if i >= index {
uid = iter.Value().(uint32)
uid = iter.Value().(models.UID)
break
}
}
@@ -579,7 +579,7 @@ func newThreadView(store *lib.MessageStore) *threadView {
}
}
func (t *threadView) Update(data state.DataSetter, uid uint32) {
func (t *threadView) Update(data state.DataSetter, uid models.UID) {
thread, err := t.store.Thread(uid)
info := state.ThreadInfo{}
if thread != nil && err == nil {

View File

@@ -333,7 +333,7 @@ func (mv *MessageViewer) SelectedMessage() (*models.MessageInfo, error) {
return mv.msg.MessageInfo(), nil
}
func (mv *MessageViewer) MarkedMessages() ([]uint32, error) {
func (mv *MessageViewer) MarkedMessages() ([]models.UID, error) {
return mv.acct.MarkedMessages()
}

View File

@@ -26,5 +26,5 @@ type ProvidesMessages interface {
Store() *lib.MessageStore
SelectedAccount() *AccountView
SelectedMessage() (*models.MessageInfo, error)
MarkedMessages() ([]uint32, error)
MarkedMessages() ([]models.UID, error)
}

View File

@@ -35,7 +35,7 @@ func (c Clear) Execute(args []string) error {
}
if c.Selected {
defer store.Select(0)
defer store.Select("")
}
store.ApplyClear()
acct.SetStatus(state.SearchFilterClear())

View File

@@ -13,6 +13,7 @@ import (
"git.sr.ht/~rjarry/aerc/lib"
"git.sr.ht/~rjarry/aerc/lib/log"
"git.sr.ht/~rjarry/aerc/lib/xdg"
"git.sr.ht/~rjarry/aerc/models"
mboxer "git.sr.ht/~rjarry/aerc/worker/mbox"
"git.sr.ht/~rjarry/aerc/worker/types"
)
@@ -61,7 +62,7 @@ func (e ExportMbox) Execute(args []string) error {
app.PushStatus("Exporting to "+e.Filename, 10*time.Second)
// uids of messages to export
var uids []uint32
var uids []models.UID
// check if something is marked - we export that then
msgProvider, ok := app.SelectedTabContent().(app.ProvidesMessages)
@@ -98,7 +99,7 @@ func (e ExportMbox) Execute(args []string) error {
defer file.Close()
var mu sync.Mutex
var ctr uint32
var ctr uint
var retries int
done := make(chan bool)
@@ -159,15 +160,15 @@ func (e ExportMbox) Execute(args []string) error {
return nil
}
func sortMarkedUids(marked []uint32, store *lib.MessageStore) ([]uint32, error) {
lookup := map[uint32]bool{}
func sortMarkedUids(marked []models.UID, store *lib.MessageStore) ([]models.UID, error) {
lookup := map[models.UID]bool{}
for _, uid := range marked {
lookup[uid] = true
}
uids := []uint32{}
uids := []models.UID{}
iter := store.UidsIterator()
for iter.Next() {
uid, ok := iter.Value().(uint32)
uid, ok := iter.Value().(models.UID)
if !ok {
return nil, errors.New("Invalid message UID value")
}
@@ -179,11 +180,11 @@ func sortMarkedUids(marked []uint32, store *lib.MessageStore) ([]uint32, error)
return uids, nil
}
func sortAllUids(store *lib.MessageStore) ([]uint32, error) {
uids := []uint32{}
func sortAllUids(store *lib.MessageStore) ([]models.UID, error) {
uids := []models.UID{}
iter := store.UidsIterator()
for iter.Next() {
uid, ok := iter.Value().(uint32)
uid, ok := iter.Value().(models.UID)
if !ok {
return nil, errors.New("Invalid message UID value")
}

View File

@@ -85,7 +85,7 @@ func (np NextPrevMsg) Execute(args []string) error {
if nextMsg := store.Selected(); nextMsg != nil {
reloadViewer(nextMsg)
} else {
store.FetchHeaders([]uint32{store.SelectedUid()},
store.FetchHeaders([]models.UID{store.SelectedUid()},
func(msg types.WorkerMessage) {
if m, ok := msg.(*types.MessageInfo); ok {
reloadViewer(m.Info)

View File

@@ -191,7 +191,7 @@ func (s SearchFilter) Execute(args []string) error {
store.Sort(store.GetCurrentSortCriteria(), cb)
} else {
acct.SetStatus(state.Search("Searching..."))
cb := func(uids []uint32) {
cb := func(uids []models.UID) {
acct.SetStatus(state.Search(strings.Join(args, " ")))
log.Tracef("Search results: %v", uids)
store.ApplySearch(uids)

View File

@@ -89,7 +89,7 @@ func archive(msgs []*models.MessageInfo, mfs *types.MultiFileStrategy,
if err != nil {
return err
}
var uids []uint32
var uids []models.UID
for _, msg := range msgs {
uids = append(uids, msg.Uid)
}
@@ -98,7 +98,7 @@ func archive(msgs []*models.MessageInfo, mfs *types.MultiFileStrategy,
marker.ClearVisualMark()
next := findNextNonDeleted(uids, store)
var uidMap map[string][]uint32
var uidMap map[string][]models.UID
switch archiveType {
case ARCHIVE_MONTH:
uidMap = groupBy(msgs, func(msg *models.MessageInfo) string {
@@ -120,7 +120,7 @@ func archive(msgs []*models.MessageInfo, mfs *types.MultiFileStrategy,
return dir
})
case ARCHIVE_FLAT:
uidMap = make(map[string][]uint32)
uidMap = make(map[string][]models.UID)
uidMap[archiveDir] = commands.UidsFromMessageInfos(msgs)
}
@@ -164,8 +164,8 @@ func archive(msgs []*models.MessageInfo, mfs *types.MultiFileStrategy,
func groupBy(msgs []*models.MessageInfo,
grouper func(*models.MessageInfo) string,
) map[string][]uint32 {
m := make(map[string][]uint32)
) map[string][]models.UID {
m := make(map[string][]models.UID)
for _, msg := range msgs {
group := grouper(msg)
m[group] = append(m[group], msg.Uid)

View File

@@ -175,7 +175,7 @@ func (c Copy) Execute(args []string) error {
return nil
}
func (c Copy) CallBack(msg types.WorkerMessage, uids []uint32, store *lib.MessageStore) {
func (c Copy) CallBack(msg types.WorkerMessage, uids []models.UID, store *lib.MessageStore) {
dest := c.Folder
if len(c.Account) != 0 {
dest = fmt.Sprintf("%s in %s", c.Folder, c.Account)

View File

@@ -117,7 +117,7 @@ func (d Delete) Execute(args []string) error {
return nil
}
func findNextNonDeleted(deleted []uint32, store *lib.MessageStore) *models.MessageInfo {
func findNextNonDeleted(deleted []models.UID, store *lib.MessageStore) *models.MessageInfo {
var next, previous *models.MessageInfo
stepper := []func(){store.Next, store.Prev}
for _, stepFn := range stepper {
@@ -146,7 +146,7 @@ func findNextNonDeleted(deleted []uint32, store *lib.MessageStore) *models.Messa
return next
}
func contains(uids []uint32, uid uint32) bool {
func contains(uids []models.UID, uid models.UID) bool {
for _, item := range uids {
if item == uid {
return true

View File

@@ -131,7 +131,7 @@ func (f forward) Execute(args []string) error {
fetchFull = mv.MessageView().FetchFull
} else {
fetchFull = func(cb func(io.Reader)) {
store.FetchFull([]uint32{msg.Uid}, func(fm *types.FullMessage) {
store.FetchFull([]models.UID{msg.Uid}, func(fm *types.FullMessage) {
if fm == nil || (fm != nil && fm.Content == nil) {
return
}
@@ -164,7 +164,7 @@ func (f forward) Execute(args []string) error {
composer.AddAttachment(tmpFileName)
composer.OnClose(func(c *app.Composer) {
if c.Sent() {
store.Forwarded([]uint32{msg.Uid}, true, nil)
store.Forwarded([]models.UID{msg.Uid}, true, nil)
}
os.RemoveAll(tmpDir)
})
@@ -216,7 +216,7 @@ func (f forward) Execute(args []string) error {
composer.OnClose(func(c *app.Composer) {
if c.Sent() {
store.Forwarded([]uint32{msg.Uid}, true, nil)
store.Forwarded([]models.UID{msg.Uid}, true, nil)
}
})

View File

@@ -141,7 +141,7 @@ func (i invite) Execute(args []string) error {
composer.OnClose(func(c *app.Composer) {
if c.Sent() {
store.Answered([]uint32{msg.Uid}, true, nil)
store.Answered([]models.UID{msg.Uid}, true, nil)
}
})

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"git.sr.ht/~rjarry/aerc/commands"
"git.sr.ht/~rjarry/aerc/models"
)
type Mark struct {
@@ -28,7 +29,7 @@ func (Mark) Aliases() []string {
func (m Mark) Execute(args []string) error {
h := newHelper()
OnSelectedMessage := func(fn func(uint32)) error {
OnSelectedMessage := func(fn func(models.UID)) error {
if fn == nil {
return fmt.Errorf("no operation selected")
}
@@ -58,7 +59,7 @@ func (m Mark) Execute(args []string) error {
switch args[0] {
case "mark":
var modFunc func(uint32)
var modFunc func(models.UID)
if m.Toggle {
modFunc = marker.ToggleMark
} else {

View File

@@ -119,7 +119,7 @@ func (m Move) Execute(args []string) error {
// something is happening
app.PushStatus("Moving messages...", 10*time.Second)
var appended []uint32
var appended []models.UID
var timeout bool
go func() {
defer log.PanicHandler()
@@ -187,7 +187,7 @@ func (m Move) Execute(args []string) error {
func (m Move) CallBack(
msg types.WorkerMessage,
acct *app.AccountView,
uids []uint32,
uids []models.UID,
next *models.MessageInfo,
marker marker.Marker,
timeout bool,

View File

@@ -15,6 +15,7 @@ import (
"git.sr.ht/~rjarry/aerc/commands"
cryptoutil "git.sr.ht/~rjarry/aerc/lib/crypto/util"
"git.sr.ht/~rjarry/aerc/lib/log"
"git.sr.ht/~rjarry/aerc/models"
mboxer "git.sr.ht/~rjarry/aerc/worker/mbox"
"git.sr.ht/~rjarry/aerc/worker/types"
)
@@ -122,7 +123,7 @@ func (p Pipe) Run(cb func()) error {
app.PushStatus("Fetching messages ...", 10*time.Second)
if p.Full {
var uids []uint32
var uids []models.UID
var title string
h := newHelper()

View File

@@ -97,8 +97,8 @@ func (f FlagMsg) Execute(args []string) error {
}
// UIDs of messages to enable or disable the flag for.
var toEnable []uint32
var toDisable []uint32
var toEnable []models.UID
var toDisable []models.UID
if f.Toggle {
// If toggling, split messages into those that need to

View File

@@ -70,7 +70,7 @@ func (r Recall) Execute(args []string) error {
}
composer.Tab = app.NewTab(composer, subject)
composer.OnClose(func(composer *app.Composer) {
uids := []uint32{msgInfo.Uid}
uids := []models.UID{msgInfo.Uid}
deleteMessage := func() {
store.Delete(

View File

@@ -195,13 +195,13 @@ func (r reply) Execute(args []string) error {
composer.OnClose(func(c *app.Composer) {
switch {
case c.Sent() && c.Archive() != "" && !noStore:
store.Answered([]uint32{msg.Uid}, true, nil)
store.Answered([]models.UID{msg.Uid}, true, nil)
err := archive([]*models.MessageInfo{msg}, nil, c.Archive())
if err != nil {
app.PushStatus("Archive failed", 10*time.Second)
}
case c.Sent() && !noStore:
store.Answered([]uint32{msg.Uid}, true, nil)
store.Answered([]models.UID{msg.Uid}, true, nil)
case mv != nil && r.Close:
view := account.ViewMessage{Peek: true}
//nolint:errcheck // who cares?

View File

@@ -29,7 +29,7 @@ func newHelper() *helper {
}
}
func (h *helper) markedOrSelectedUids() ([]uint32, error) {
func (h *helper) markedOrSelectedUids() ([]models.UID, error) {
return commands.MarkedOrSelected(h.msgProvider)
}

View File

@@ -148,7 +148,7 @@ func listDir(path string, hidden bool) []string {
// MarkedOrSelected returns either all marked messages if any are marked or the
// selected message instead
func MarkedOrSelected(pm app.ProvidesMessages) ([]uint32, error) {
func MarkedOrSelected(pm app.ProvidesMessages) ([]models.UID, error) {
// marked has priority over the selected message
marked, err := pm.MarkedMessages()
if err != nil {
@@ -162,15 +162,15 @@ func MarkedOrSelected(pm app.ProvidesMessages) ([]uint32, error) {
if err != nil {
return nil, err
}
return expandFoldedThreads(pm, []uint32{msg.Uid}), nil
return expandFoldedThreads(pm, []models.UID{msg.Uid}), nil
}
func expandFoldedThreads(pm app.ProvidesMessages, uids []uint32) []uint32 {
func expandFoldedThreads(pm app.ProvidesMessages, uids []models.UID) []models.UID {
store := pm.Store()
if store == nil {
return uids
}
expanded := make([]uint32, len(uids))
expanded := make([]models.UID, len(uids))
copy(expanded, uids)
for _, uid := range uids {
thread, err := store.Thread(uid)
@@ -194,8 +194,8 @@ func expandFoldedThreads(pm app.ProvidesMessages, uids []uint32) []uint32 {
}
// UidsFromMessageInfos extracts a uid slice from a slice of MessageInfos
func UidsFromMessageInfos(msgs []*models.MessageInfo) []uint32 {
uids := make([]uint32, len(msgs))
func UidsFromMessageInfos(msgs []*models.MessageInfo) []models.UID {
uids := make([]models.UID, len(msgs))
i := 0
for _, msg := range msgs {
uids[i] = msg.Uid
@@ -204,9 +204,9 @@ func UidsFromMessageInfos(msgs []*models.MessageInfo) []uint32 {
return uids
}
func MsgInfoFromUids(store *lib.MessageStore, uids []uint32, statusInfo func(string)) ([]*models.MessageInfo, error) {
func MsgInfoFromUids(store *lib.MessageStore, uids []models.UID, statusInfo func(string)) ([]*models.MessageInfo, error) {
infos := make([]*models.MessageInfo, len(uids))
needHeaders := make([]uint32, 0)
needHeaders := make([]models.UID, 0)
for i, uid := range uids {
var ok bool
infos[i], ok = store.Messages[uid]

View File

@@ -19,8 +19,8 @@ func (fm *EmlMessage) NewReader() (io.ReadCloser, error) {
return io.NopCloser(bytes.NewReader(*fm)), nil
}
func (fm *EmlMessage) UID() uint32 {
return 0xFFFFFFF
func (fm *EmlMessage) UID() models.UID {
return ""
}
func (fm *EmlMessage) Labels() ([]string, error) {

View File

@@ -3,6 +3,7 @@ package iterator
import (
"errors"
"git.sr.ht/~rjarry/aerc/models"
"git.sr.ht/~rjarry/aerc/worker/types"
)
@@ -11,7 +12,7 @@ type defaultFactory struct{}
func (df *defaultFactory) NewIterator(a interface{}) Iterator {
switch data := a.(type) {
case []uint32:
case []models.UID:
return &defaultUid{data: data, index: len(data)}
case []*types.Thread:
return &defaultThread{data: data, index: len(data)}
@@ -21,7 +22,7 @@ func (df *defaultFactory) NewIterator(a interface{}) Iterator {
// defaultUid
type defaultUid struct {
data []uint32
data []models.UID
index int
}
@@ -70,7 +71,7 @@ type reverseFactory struct{}
func (rf *reverseFactory) NewIterator(a interface{}) Iterator {
switch data := a.(type) {
case []uint32:
case []models.UID:
return &reverseUid{data: data, index: -1}
case []*types.Thread:
return &reverseThread{data: data, index: -1}
@@ -80,7 +81,7 @@ func (rf *reverseFactory) NewIterator(a interface{}) Iterator {
// reverseUid
type reverseUid struct {
data []uint32
data []models.UID
index int
}

View File

@@ -4,10 +4,11 @@ import (
"testing"
"git.sr.ht/~rjarry/aerc/lib/iterator"
"git.sr.ht/~rjarry/aerc/models"
"git.sr.ht/~rjarry/aerc/worker/types"
)
func toThreads(uids []uint32) []*types.Thread {
func toThreads(uids []models.UID) []*types.Thread {
threads := make([]*types.Thread, len(uids))
for i, u := range uids {
threads[i] = &types.Thread{Uid: u}
@@ -16,8 +17,8 @@ func toThreads(uids []uint32) []*types.Thread {
}
func TestIterator_DefaultFactory(t *testing.T) {
input := []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9}
want := []uint32{9, 8, 7, 6, 5, 4, 3, 2, 1}
input := []models.UID{"1", "2", "3", "4", "5", "6", "7", "8", "9"}
want := []models.UID{"9", "8", "7", "6", "5", "4", "3", "2", "1"}
factory := iterator.NewFactory(false)
if factory == nil {
@@ -30,8 +31,8 @@ func TestIterator_DefaultFactory(t *testing.T) {
}
func TestIterator_ReverseFactory(t *testing.T) {
input := []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9}
want := []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9}
input := []models.UID{"1", "2", "3", "4", "5", "6", "7", "8", "9"}
want := []models.UID{"1", "2", "3", "4", "5", "6", "7", "8", "9"}
factory := iterator.NewFactory(true)
if factory == nil {
@@ -45,13 +46,13 @@ func TestIterator_ReverseFactory(t *testing.T) {
}
func checkUids(t *testing.T, factory iterator.Factory,
input []uint32, want []uint32, start, end int,
input []models.UID, want []models.UID, start, end int,
) {
label := "uids"
got := make([]uint32, 0)
got := make([]models.UID, 0)
iter := factory.NewIterator(input)
for iter.Next() {
got = append(got, iter.Value().(uint32))
got = append(got, iter.Value().(models.UID))
}
if len(got) != len(want) {
t.Errorf(label + "number of elements not correct")

View File

@@ -1,13 +1,15 @@
package marker
import "git.sr.ht/~rjarry/aerc/models"
// Marker provides the interface for the marking behavior of messages
type Marker interface {
Mark(uint32)
Unmark(uint32)
ToggleMark(uint32)
Mark(models.UID)
Unmark(models.UID)
ToggleMark(models.UID)
Remark()
Marked() []uint32
IsMarked(uint32) bool
Marked() []models.UID
IsMarked(models.UID) bool
IsVisualMark() bool
ToggleVisualMark(bool)
UpdateVisualMark()
@@ -16,30 +18,30 @@ type Marker interface {
// UIDProvider provides the underlying uids and the selected message index
type UIDProvider interface {
Uids() []uint32
Uids() []models.UID
SelectedIndex() int
}
type controller struct {
uidProvider UIDProvider
marked map[uint32]struct{}
lastMarked map[uint32]struct{}
visualStartUID uint32
marked map[models.UID]struct{}
lastMarked map[models.UID]struct{}
visualStartUID models.UID
visualMarkMode bool
visualBase map[uint32]struct{}
visualBase map[models.UID]struct{}
}
// New returns a new Marker
func New(up UIDProvider) Marker {
return &controller{
uidProvider: up,
marked: make(map[uint32]struct{}),
lastMarked: make(map[uint32]struct{}),
marked: make(map[models.UID]struct{}),
lastMarked: make(map[models.UID]struct{}),
}
}
// Mark markes the uid as marked
func (mc *controller) Mark(uid uint32) {
func (mc *controller) Mark(uid models.UID) {
if mc.visualMarkMode {
// visual mode has override, bogus input from user
return
@@ -48,7 +50,7 @@ func (mc *controller) Mark(uid uint32) {
}
// Unmark unmarks the uid
func (mc *controller) Unmark(uid uint32) {
func (mc *controller) Unmark(uid models.UID) {
if mc.visualMarkMode {
// user probably wanted to clear the visual marking
mc.ClearVisualMark()
@@ -63,7 +65,7 @@ func (mc *controller) Remark() {
}
// ToggleMark toggles the marked state for the given uid
func (mc *controller) ToggleMark(uid uint32) {
func (mc *controller) ToggleMark(uid models.UID) {
if mc.visualMarkMode {
// visual mode has override, bogus input from user
return
@@ -78,7 +80,7 @@ func (mc *controller) ToggleMark(uid uint32) {
// resetMark removes the marking from all messages
func (mc *controller) resetMark() {
mc.lastMarked = mc.marked
mc.marked = make(map[uint32]struct{})
mc.marked = make(map[models.UID]struct{})
}
// removeStaleUID removes uids that are no longer presents in the UIDProvider
@@ -98,15 +100,15 @@ func (mc *controller) removeStaleUID() {
}
// IsMarked checks whether the given uid has been marked
func (mc *controller) IsMarked(uid uint32) bool {
func (mc *controller) IsMarked(uid models.UID) bool {
_, marked := mc.marked[uid]
return marked
}
// Marked returns the uids of all marked messages
func (mc *controller) Marked() []uint32 {
func (mc *controller) Marked() []models.UID {
mc.removeStaleUID()
marked := make([]uint32, len(mc.marked))
marked := make([]models.UID, len(mc.marked))
i := 0
for uid := range mc.marked {
marked[i] = uid
@@ -132,7 +134,7 @@ func (mc *controller) ToggleVisualMark(clear bool) {
if idx := mc.uidProvider.SelectedIndex(); idx >= 0 && idx < len(uids) {
mc.visualStartUID = uids[idx]
mc.marked[mc.visualStartUID] = struct{}{}
mc.visualBase = make(map[uint32]struct{})
mc.visualBase = make(map[models.UID]struct{})
for key, value := range mc.marked {
mc.visualBase[key] = value
}
@@ -144,7 +146,7 @@ func (mc *controller) ToggleVisualMark(clear bool) {
func (mc *controller) ClearVisualMark() {
mc.resetMark()
mc.visualMarkMode = false
mc.visualStartUID = 0
mc.visualStartUID = ""
}
// UpdateVisualMark updates the index with the currently selected message
@@ -167,13 +169,13 @@ func (mc *controller) UpdateVisualMark() {
uids := mc.uidProvider.Uids()
var visUids []uint32
var visUids []models.UID
if selectedIdx > startIdx {
visUids = uids[startIdx : selectedIdx+1]
} else {
visUids = uids[selectedIdx : startIdx+1]
}
mc.marked = make(map[uint32]struct{})
mc.marked = make(map[models.UID]struct{})
for uid := range mc.visualBase {
mc.marked[uid] = struct{}{}
}

View File

@@ -4,16 +4,17 @@ import (
"testing"
"git.sr.ht/~rjarry/aerc/lib/marker"
"git.sr.ht/~rjarry/aerc/models"
)
// mockUidProvider implements the UidProvider interface and mocks the message
// store for testing
type mockUidProvider struct {
uids []uint32
uids []models.UID
idx int
}
func (mock *mockUidProvider) Uids() []uint32 {
func (mock *mockUidProvider) Uids() []models.UID {
return mock.uids
}
@@ -23,7 +24,7 @@ func (mock *mockUidProvider) SelectedIndex() int {
func createMarker() (marker.Marker, *mockUidProvider) {
uidProvider := &mockUidProvider{
uids: []uint32{1, 2, 3, 4},
uids: []models.UID{"1", "2", "3", "4"},
idx: 1,
}
m := marker.New(uidProvider)
@@ -32,7 +33,7 @@ func createMarker() (marker.Marker, *mockUidProvider) {
func TestMarker_MarkUnmark(t *testing.T) {
m, _ := createMarker()
uid := uint32(4)
uid := models.UID("4")
m.Mark(uid)
if !m.IsMarked(uid) {
@@ -47,7 +48,7 @@ func TestMarker_MarkUnmark(t *testing.T) {
func TestMarker_ToggleMark(t *testing.T) {
m, _ := createMarker()
uid := uint32(4)
uid := models.UID("4")
if m.IsMarked(uid) {
t.Errorf("ToggleMark: uid should not be marked")
@@ -66,9 +67,9 @@ func TestMarker_ToggleMark(t *testing.T) {
func TestMarker_Marked(t *testing.T) {
m, _ := createMarker()
expected := map[uint32]struct{}{
uint32(1): {},
uint32(4): {},
expected := map[models.UID]struct{}{
"1": {},
"4": {},
}
for uid := range expected {
m.Mark(uid)
@@ -81,7 +82,7 @@ func TestMarker_Marked(t *testing.T) {
for _, uid := range got {
if _, ok := expected[uid]; !ok {
t.Errorf("Marked: received uid %d as marked but it should not be", uid)
t.Errorf("Marked: received uid %q as marked but it should not be", uid)
}
}
}
@@ -93,15 +94,15 @@ func TestMarker_VisualMode(t *testing.T) {
m.ToggleVisualMark(false)
// marking should now fail silently because we're in visual mode
m.Mark(1)
if m.IsMarked(1) {
m.Mark("1")
if m.IsMarked("1") {
t.Errorf("marking in visual mode should not work")
}
// move selection index to last item
up.idx = len(up.uids) - 1
m.UpdateVisualMark()
expectedMarked := []uint32{2, 3, 4}
expectedMarked := []models.UID{"2", "3", "4"}
for _, uidMarked := range expectedMarked {
if !m.IsMarked(uidMarked) {
@@ -128,7 +129,7 @@ func TestMarker_VisualMode(t *testing.T) {
func TestMarker_MarkOutOfBound(t *testing.T) {
m, _ := createMarker()
outOfBoundUid := uint32(100)
outOfBoundUid := models.UID("100")
m.Mark(outOfBoundUid)
for _, markedUid := range m.Marked() {

View File

@@ -75,7 +75,7 @@ func NewMessageStoreView(messageInfo *models.MessageInfo, setSeen bool,
cb := func(msv MessageView, err error) {
if msv != nil && setSeen && err == nil &&
!messageInfo.Flags.Has(models.SeenFlag) {
store.Flag([]uint32{messageInfo.Uid}, models.SeenFlag, true, nil)
store.Flag([]models.UID{messageInfo.Uid}, models.SeenFlag, true, nil)
}
innerCb(msv, err)
}
@@ -147,7 +147,7 @@ func (msv *MessageStoreView) MessageDetails() *models.MessageDetails {
func (msv *MessageStoreView) FetchFull(cb func(io.Reader)) {
if msv.message == nil && msv.messageStore != nil {
msv.messageStore.FetchFull([]uint32{msv.messageInfo.Uid},
msv.messageStore.FetchFull([]models.UID{msv.messageInfo.Uid},
func(fm *types.FullMessage) {
cb(fm.Content.Reader)
})

View File

@@ -20,8 +20,8 @@ import (
type MessageStore struct {
sync.Mutex
Name string
Deleted map[uint32]interface{}
Messages map[uint32]*models.MessageInfo
Deleted map[models.UID]interface{}
Messages map[models.UID]*models.MessageInfo
Sorting bool
ui func() *config.UIConfig
@@ -30,21 +30,21 @@ type MessageStore struct {
ctx context.Context
// Ordered list of known UIDs
uids []uint32
uids []models.UID
threads []*types.Thread
// Visible UIDs
scrollOffset int
scrollLen int
selectedUid uint32
bodyCallbacks map[uint32][]func(*types.FullMessage)
selectedUid models.UID
bodyCallbacks map[models.UID][]func(*types.FullMessage)
// marking
marker marker.Marker
// Search/filter results
results []uint32
results []models.UID
resultIndex int
filter *types.SearchCriteria
@@ -60,11 +60,11 @@ type MessageStore struct {
onUpdate func(store *MessageStore) // TODO: multiple onUpdate handlers
onFilterChange func(store *MessageStore)
onUpdateDirs func()
pendingBodies map[uint32]interface{}
pendingHeaders map[uint32]interface{}
pendingBodies map[models.UID]interface{}
pendingHeaders map[models.UID]interface{}
worker *types.Worker
needsFlags []uint32
needsFlags []models.UID
fetchFlagsDebounce *time.Timer
fetchFlagsDelay time.Duration
@@ -85,7 +85,7 @@ type MessageStore struct {
onSelect func(*models.MessageInfo)
}
const MagicUid = 0xFFFFFFFF
const MagicUid = models.UID("")
func NewMessageStore(worker *types.Worker, name string,
ui func() *config.UIConfig,
@@ -97,8 +97,8 @@ func NewMessageStore(worker *types.Worker, name string,
) *MessageStore {
return &MessageStore{
Name: name,
Deleted: make(map[uint32]interface{}),
Messages: make(map[uint32]*models.MessageInfo),
Deleted: make(map[models.UID]interface{}),
Messages: make(map[models.UID]*models.MessageInfo),
ui: ui,
@@ -108,13 +108,12 @@ func NewMessageStore(worker *types.Worker, name string,
// default window height until account is drawn once
scrollLen: 25,
bodyCallbacks: make(map[uint32][]func(*types.FullMessage)),
pendingBodies: make(map[uint32]interface{}),
pendingHeaders: make(map[uint32]interface{}),
bodyCallbacks: make(map[models.UID][]func(*types.FullMessage)),
pendingBodies: make(map[models.UID]interface{}),
pendingHeaders: make(map[models.UID]interface{}),
worker: worker,
needsFlags: []uint32{},
needsFlags: []models.UID{},
fetchFlagsDelay: 50 * time.Millisecond,
triggerNewEmail: triggerNewEmail,
@@ -158,12 +157,12 @@ func (store *MessageStore) UpdateScroll(offset, length int) {
store.scrollLen = length
}
func (store *MessageStore) FetchHeaders(uids []uint32,
func (store *MessageStore) FetchHeaders(uids []models.UID,
cb func(types.WorkerMessage),
) {
// TODO: this could be optimized by pre-allocating toFetch and trimming it
// at the end. In practice we expect to get most messages back in one frame.
var toFetch []uint32
var toFetch []models.UID
for _, uid := range uids {
if _, ok := store.pendingHeaders[uid]; !ok {
toFetch = append(toFetch, uid)
@@ -189,10 +188,10 @@ func (store *MessageStore) FetchHeaders(uids []uint32,
}
}
func (store *MessageStore) FetchFull(uids []uint32, cb func(*types.FullMessage)) {
func (store *MessageStore) FetchFull(uids []models.UID, cb func(*types.FullMessage)) {
// TODO: this could be optimized by pre-allocating toFetch and trimming it
// at the end. In practice we expect to get most messages back in one frame.
var toFetch []uint32
var toFetch []models.UID
for _, uid := range uids {
if _, ok := store.pendingBodies[uid]; !ok {
toFetch = append(toFetch, uid)
@@ -220,7 +219,7 @@ func (store *MessageStore) FetchFull(uids []uint32, cb func(*types.FullMessage))
}
}
func (store *MessageStore) FetchBodyPart(uid uint32, part []int, cb func(io.Reader)) {
func (store *MessageStore) FetchBodyPart(uid models.UID, part []int, cb func(io.Reader)) {
store.worker.PostAction(&types.FetchMessageBodyPart{
Uid: uid,
Part: part,
@@ -253,7 +252,7 @@ func merge(to *models.MessageInfo, from *models.MessageInfo) {
}
func (store *MessageStore) Update(msg types.WorkerMessage) {
var newUids []uint32
var newUids []models.UID
update := false
updateThreads := false
directoryChange := false
@@ -265,7 +264,7 @@ func (store *MessageStore) Update(msg types.WorkerMessage) {
store.Sort(store.sortCriteria, nil)
update = true
case *types.DirectoryContents:
newMap := make(map[uint32]*models.MessageInfo, len(msg.Uids))
newMap := make(map[models.UID]*models.MessageInfo, len(msg.Uids))
for i, uid := range msg.Uids {
if msg, ok := store.Messages[uid]; ok {
newMap[uid] = msg
@@ -291,7 +290,7 @@ func (store *MessageStore) Update(msg types.WorkerMessage) {
store.uids = store.builder.Uids()
store.threads = msg.Threads
newMap := make(map[uint32]*models.MessageInfo, len(store.uids))
newMap := make(map[models.UID]*models.MessageInfo, len(store.uids))
for i, uid := range store.uids {
if msg, ok := store.Messages[uid]; ok {
newMap[uid] = msg
@@ -351,13 +350,13 @@ func (store *MessageStore) Update(msg types.WorkerMessage) {
break
}
toDelete := make(map[uint32]interface{})
toDelete := make(map[models.UID]interface{})
for _, uid := range msg.Uids {
toDelete[uid] = nil
delete(store.Messages, uid)
delete(store.Deleted, uid)
}
uids := make([]uint32, 0, len(store.uids)-len(msg.Uids))
uids := make([]models.UID, 0, len(store.uids)-len(msg.Uids))
for _, uid := range store.uids {
if _, deleted := toDelete[uid]; deleted {
continue
@@ -369,7 +368,7 @@ func (store *MessageStore) Update(msg types.WorkerMessage) {
store.Select(MagicUid)
}
var newResults []uint32
var newResults []models.UID
for _, res := range store.results {
if _, deleted := toDelete[res]; !deleted {
newResults = append(newResults, res)
@@ -528,7 +527,7 @@ func (store *MessageStore) runThreadBuilderNow() {
}
// Thread returns the thread for the given UId
func (store *MessageStore) Thread(uid uint32) (*types.Thread, error) {
func (store *MessageStore) Thread(uid models.UID) (*types.Thread, error) {
if store.builder == nil {
return nil, errors.New("no threads found")
}
@@ -540,15 +539,15 @@ func (store *MessageStore) SelectedThread() (*types.Thread, error) {
return store.Thread(store.SelectedUid())
}
func (store *MessageStore) Fold(uid uint32, toggle bool) error {
func (store *MessageStore) Fold(uid models.UID, toggle bool) error {
return store.doThreadFolding(uid, true, toggle)
}
func (store *MessageStore) Unfold(uid uint32, toggle bool) error {
func (store *MessageStore) Unfold(uid models.UID, toggle bool) error {
return store.doThreadFolding(uid, false, toggle)
}
func (store *MessageStore) doThreadFolding(uid uint32, hide bool, toggle bool) error {
func (store *MessageStore) doThreadFolding(uid models.UID, hide bool, toggle bool) error {
thread, err := store.Thread(uid)
if err != nil {
return err
@@ -596,7 +595,7 @@ func (store *MessageStore) doThreadFolding(uid uint32, hide bool, toggle bool) e
return nil
}
func (store *MessageStore) Delete(uids []uint32, mfs *types.MultiFileStrategy,
func (store *MessageStore) Delete(uids []models.UID, mfs *types.MultiFileStrategy,
cb func(msg types.WorkerMessage),
) {
for _, uid := range uids {
@@ -618,13 +617,13 @@ func (store *MessageStore) Delete(uids []uint32, mfs *types.MultiFileStrategy,
})
}
func (store *MessageStore) revertDeleted(uids []uint32) {
func (store *MessageStore) revertDeleted(uids []models.UID) {
for _, uid := range uids {
delete(store.Deleted, uid)
}
}
func (store *MessageStore) Copy(uids []uint32, dest string, createDest bool,
func (store *MessageStore) Copy(uids []models.UID, dest string, createDest bool,
mfs *types.MultiFileStrategy, cb func(msg types.WorkerMessage),
) {
if createDest {
@@ -646,7 +645,7 @@ func (store *MessageStore) Copy(uids []uint32, dest string, createDest bool,
})
}
func (store *MessageStore) Move(uids []uint32, dest string, createDest bool,
func (store *MessageStore) Move(uids []models.UID, dest string, createDest bool,
mfs *types.MultiFileStrategy, cb func(msg types.WorkerMessage),
) {
for _, uid := range uids {
@@ -699,7 +698,7 @@ func (store *MessageStore) Append(dest string, flags models.Flags, date time.Tim
})
}
func (store *MessageStore) Flag(uids []uint32, flags models.Flags,
func (store *MessageStore) Flag(uids []models.UID, flags models.Flags,
enable bool, cb func(msg types.WorkerMessage),
) {
store.worker.PostAction(&types.FlagMessages{
@@ -729,7 +728,7 @@ func (store *MessageStore) Flag(uids []uint32, flags models.Flags,
})
}
func (store *MessageStore) Answered(uids []uint32, answered bool,
func (store *MessageStore) Answered(uids []models.UID, answered bool,
cb func(msg types.WorkerMessage),
) {
store.worker.PostAction(&types.AnsweredMessages{
@@ -738,7 +737,7 @@ func (store *MessageStore) Answered(uids []uint32, answered bool,
}, cb)
}
func (store *MessageStore) Forwarded(uids []uint32, forwarded bool,
func (store *MessageStore) Forwarded(uids []models.UID, forwarded bool,
cb func(msg types.WorkerMessage),
) {
store.worker.PostAction(&types.ForwardedMessages{
@@ -747,7 +746,7 @@ func (store *MessageStore) Forwarded(uids []uint32, forwarded bool,
}, cb)
}
func (store *MessageStore) Uids() []uint32 {
func (store *MessageStore) Uids() []models.UID {
if store.ThreadedView() && store.builder != nil {
if uids := store.builder.Uids(); len(uids) > 0 {
return uids
@@ -764,7 +763,7 @@ func (store *MessageStore) Selected() *models.MessageInfo {
return store.Messages[store.selectedUid]
}
func (store *MessageStore) SelectedUid() uint32 {
func (store *MessageStore) SelectedUid() models.UID {
if store.selectedUid == MagicUid && len(store.Uids()) > 0 {
iter := store.UidsIterator()
idx := iter.StartIndex()
@@ -776,14 +775,14 @@ func (store *MessageStore) SelectedUid() uint32 {
return store.selectedUid
}
func (store *MessageStore) Select(uid uint32) {
func (store *MessageStore) Select(uid models.UID) {
store.selectPriv(uid, false)
if store.onSelect != nil {
store.onSelect(store.Selected())
}
}
func (store *MessageStore) selectPriv(uid uint32, lockHeld bool) {
func (store *MessageStore) selectPriv(uid models.UID, lockHeld bool) {
if !lockHeld {
store.threadsMutex.Lock()
}
@@ -844,14 +843,14 @@ func (store *MessageStore) Prev() {
store.NextPrev(-1)
}
func (store *MessageStore) Search(terms *types.SearchCriteria, cb func([]uint32)) {
func (store *MessageStore) Search(terms *types.SearchCriteria, cb func([]models.UID)) {
store.worker.PostAction(&types.SearchDirectory{
Context: store.ctx,
Criteria: terms,
}, func(msg types.WorkerMessage) {
if msg, ok := msg.(*types.SearchResults); ok {
allowedUids := store.Uids()
uids := make([]uint32, 0, len(msg.Uids))
uids := make([]models.UID, 0, len(msg.Uids))
for _, uid := range msg.Uids {
for _, uidCheck := range allowedUids {
if uid == uidCheck {
@@ -866,14 +865,14 @@ func (store *MessageStore) Search(terms *types.SearchCriteria, cb func([]uint32)
})
}
func (store *MessageStore) ApplySearch(results []uint32) {
func (store *MessageStore) ApplySearch(results []models.UID) {
store.results = results
store.resultIndex = -1
store.NextResult()
}
// IsResult returns true if uid is a search result
func (store *MessageStore) IsResult(uid uint32) bool {
func (store *MessageStore) IsResult(uid models.UID) bool {
for _, hit := range store.results {
if hit == uid {
return true
@@ -935,7 +934,7 @@ func (store *MessageStore) PrevResult() {
store.nextPrevResult(-1)
}
func (store *MessageStore) ModifyLabels(uids []uint32, add, remove []string,
func (store *MessageStore) ModifyLabels(uids []models.UID, add, remove []string,
cb func(msg types.WorkerMessage),
) {
store.worker.PostAction(&types.ModifyLabels{
@@ -999,7 +998,7 @@ func (store *MessageStore) Marker() marker.Marker {
}
// FindIndexByUid returns the index in store.Uids() or -1 if not found
func (store *MessageStore) FindIndexByUid(uid uint32) int {
func (store *MessageStore) FindIndexByUid(uid models.UID) int {
for idx, u := range store.Uids() {
if u == uid {
return idx
@@ -1029,7 +1028,7 @@ func (store *MessageStore) fetchFlags() {
Context: store.ctx,
Uids: store.needsFlags,
}, nil)
store.needsFlags = []uint32{}
store.needsFlags = []models.UID{}
store.Unlock()
})
}

View File

@@ -291,7 +291,7 @@ type RawMessage interface {
NewReader() (io.ReadCloser, error)
ModelFlags() (models.Flags, error)
Labels() ([]string, error)
UID() uint32
UID() models.UID
}
// MessageInfo populates a models.MessageInfo struct for the message.

View File

@@ -185,7 +185,7 @@ func (m *mockRawMessage) NewReader() (io.ReadCloser, error) {
}
func (m *mockRawMessage) ModelFlags() (models.Flags, error) { return 0, nil }
func (m *mockRawMessage) Labels() ([]string, error) { return nil, nil }
func (m *mockRawMessage) UID() uint32 { return 0 }
func (m *mockRawMessage) UID() models.UID { return "" }
func die(err error) {
if err != nil {

View File

@@ -6,6 +6,7 @@ import (
"sort"
"strings"
"git.sr.ht/~rjarry/aerc/models"
"git.sr.ht/~rjarry/aerc/worker/types"
)
@@ -61,9 +62,9 @@ func parseSortField(arg string) (types.SortField, error) {
// Sorts toSort by sortBy so that toSort becomes a permutation following the
// order of sortBy.
// toSort should be a subset of sortBy
func SortBy(toSort []uint32, sortBy []uint32) {
func SortBy(toSort []models.UID, sortBy []models.UID) {
// build a map from sortBy
uidMap := make(map[uint32]int)
uidMap := make(map[models.UID]int)
for i, uid := range sortBy {
uidMap[uid] = i
}

View File

@@ -15,41 +15,41 @@ import (
type ThreadBuilder struct {
sync.Mutex
threadBlocks map[uint32]jwz.Threadable
threadedUids []uint32
threadMap map[uint32]*types.Thread
threadBlocks map[models.UID]jwz.Threadable
threadedUids []models.UID
threadMap map[models.UID]*types.Thread
iterFactory iterator.Factory
bySubject bool
}
func NewThreadBuilder(i iterator.Factory, bySubject bool) *ThreadBuilder {
tb := &ThreadBuilder{
threadBlocks: make(map[uint32]jwz.Threadable),
threadBlocks: make(map[models.UID]jwz.Threadable),
iterFactory: i,
threadMap: make(map[uint32]*types.Thread),
threadMap: make(map[models.UID]*types.Thread),
bySubject: bySubject,
}
return tb
}
func (builder *ThreadBuilder) ThreadForUid(uid uint32) (*types.Thread, error) {
func (builder *ThreadBuilder) ThreadForUid(uid models.UID) (*types.Thread, error) {
builder.Lock()
defer builder.Unlock()
t, ok := builder.threadMap[uid]
var err error
if !ok {
err = fmt.Errorf("no thread found for uid '%d'", uid)
err = fmt.Errorf("no thread found for uid '%s'", uid)
}
return t, err
}
// Uids returns the uids in threading order
func (builder *ThreadBuilder) Uids() []uint32 {
func (builder *ThreadBuilder) Uids() []models.UID {
builder.Lock()
defer builder.Unlock()
if builder.threadedUids == nil {
return []uint32{}
return []models.UID{}
}
return builder.threadedUids
}
@@ -68,7 +68,7 @@ func (builder *ThreadBuilder) Update(msg *models.MessageInfo) {
}
// Threads returns a slice of threads for the given list of uids
func (builder *ThreadBuilder) Threads(uids []uint32, inverse bool, sort bool,
func (builder *ThreadBuilder) Threads(uids []models.UID, inverse bool, sort bool,
) []*types.Thread {
builder.Lock()
defer builder.Unlock()
@@ -91,7 +91,7 @@ func (builder *ThreadBuilder) Threads(uids []uint32, inverse bool, sort bool,
return threads
}
func (builder *ThreadBuilder) generateStructure(uids []uint32) jwz.Threadable {
func (builder *ThreadBuilder) generateStructure(uids []models.UID) jwz.Threadable {
jwzThreads := make([]jwz.Threadable, 0, len(builder.threadBlocks))
for _, uid := range uids {
if thr, ok := builder.threadBlocks[uid]; ok {
@@ -108,7 +108,7 @@ func (builder *ThreadBuilder) generateStructure(uids []uint32) jwz.Threadable {
}
func (builder *ThreadBuilder) buildAercThreads(structure jwz.Threadable,
uids []uint32, sort bool,
uids []models.UID, sort bool,
) []*types.Thread {
threads := make([]*types.Thread, 0, len(builder.threadBlocks))
@@ -121,7 +121,7 @@ func (builder *ThreadBuilder) buildAercThreads(structure jwz.Threadable,
// prepare bigger function
var bigger func(l, r *types.Thread) bool
if sort {
sortMap := make(map[uint32]int)
sortMap := make(map[models.UID]int)
for i, uid := range uids {
sortMap[uid] = i
}
@@ -148,7 +148,7 @@ func (builder *ThreadBuilder) buildAercThreads(structure jwz.Threadable,
}
// build thread tree
root := &types.Thread{Uid: 0}
root := &types.Thread{}
builder.buildTree(structure, root, bigger, true)
// copy top-level threads to thread slice
@@ -197,16 +197,16 @@ func (builder *ThreadBuilder) newThread(c jwz.Threadable, parent *types.Thread,
return nil
}
func (builder *ThreadBuilder) sortThreads(threads []*types.Thread, orderedUids []uint32) {
func (builder *ThreadBuilder) sortThreads(threads []*types.Thread, orderedUids []models.UID) {
types.SortThreadsBy(threads, orderedUids)
}
// RebuildUids rebuilds the uids from the given slice of threads
func (builder *ThreadBuilder) RebuildUids(threads []*types.Thread, inverse bool) {
uids := make([]uint32, 0, len(threads))
uids := make([]models.UID, 0, len(threads))
iterT := builder.iterFactory.NewIterator(threads)
for iterT.Next() {
var threaduids []uint32
var threaduids []models.UID
_ = iterT.Value().(*types.Thread).Walk(
func(t *types.Thread, level int, currentErr error) error {
stored, ok := builder.threadMap[t.Uid]
@@ -231,10 +231,10 @@ func (builder *ThreadBuilder) RebuildUids(threads []*types.Thread, inverse bool)
}
}
result := make([]uint32, 0, len(uids))
result := make([]models.UID, 0, len(uids))
iterU := builder.iterFactory.NewIterator(uids)
for iterU.Next() {
result = append(result, iterU.Value().(uint32))
result = append(result, iterU.Value().(models.UID))
}
builder.threadedUids = result
}
@@ -310,9 +310,9 @@ func cleanRefs(m, irp string, refs []string) []string {
return cleanRefs
}
func (t *threadable) UID() uint32 {
func (t *threadable) UID() models.UID {
if t.MsgInfo == nil {
return 0
return ""
}
return t.MsgInfo.Uid
}

View File

@@ -1,62 +0,0 @@
// Package uidstore provides a concurrency-safe two-way mapping between UIDs
// used by the UI and arbitrary string keys as used by different mail backends.
//
// Multiple Store instances can safely be created and the UIDs that they
// generate will be globally unique.
package uidstore
import (
"sync"
"sync/atomic"
)
var nextUID uint32 = 1
// Store holds a mapping between application keys and globally-unique UIDs.
type Store struct {
keyByUID map[uint32]string
uidByKey map[string]uint32
m sync.Mutex
}
// NewStore creates a new, empty Store.
func NewStore() *Store {
return &Store{
keyByUID: make(map[uint32]string),
uidByKey: make(map[string]uint32),
}
}
// GetOrInsert returns the UID for the provided key. If the key was already
// present in the store, the same UID value is returned. Otherwise, the key is
// inserted and the newly generated UID is returned.
func (s *Store) GetOrInsert(key string) uint32 {
s.m.Lock()
defer s.m.Unlock()
if uid, ok := s.uidByKey[key]; ok {
return uid
}
uid := atomic.AddUint32(&nextUID, 1)
s.keyByUID[uid] = key
s.uidByKey[key] = uid
return uid
}
// GetKey returns the key for the provided UID, if available.
func (s *Store) GetKey(uid uint32) (string, bool) {
s.m.Lock()
defer s.m.Unlock()
key, ok := s.keyByUID[uid]
return key, ok
}
// RemoveUID removes the specified UID from the store.
func (s *Store) RemoveUID(uid uint32) {
s.m.Lock()
defer s.m.Unlock()
key, ok := s.keyByUID[uid]
if ok {
delete(s.uidByKey, key)
}
delete(s.keyByUID, uid)
}

View File

@@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"io"
"strconv"
"strings"
"time"
@@ -107,6 +108,33 @@ func (c *Capabilities) Has(s string) bool {
return false
}
type UID string
func UidToUint32(uid UID) uint32 {
u, _ := strconv.ParseUint(string(uid), 10, 32)
return uint32(u)
}
func Uint32ToUid(u uint32) UID {
return UID(strconv.FormatUint(uint64(u), 10))
}
func UidToUint32List(uids []UID) []uint32 {
ulist := make([]uint32, 0, len(uids))
for _, uid := range uids {
ulist = append(ulist, UidToUint32(uid))
}
return ulist
}
func Uint32ToUidList(ulist []uint32) []UID {
uids := make([]UID, 0, len(ulist))
for _, u := range ulist {
uids = append(uids, Uint32ToUid(u))
}
return uids
}
// A MessageInfo holds information about the structure of a message
type MessageInfo struct {
BodyStructure *BodyStructure
@@ -118,7 +146,7 @@ type MessageInfo struct {
RFC822Headers *mail.Header
Refs []string
Size uint32
Uid uint32
Uid UID
Error error
}
@@ -169,13 +197,13 @@ func (mi *MessageInfo) References() ([]string, error) {
// A MessageBodyPart can be displayed in the message viewer
type MessageBodyPart struct {
Reader io.Reader
Uid uint32
Uid UID
}
// A FullMessage is the entire message
type FullMessage struct {
Reader io.Reader
Uid uint32
Uid UID
}
type BodyStructure struct {

View File

@@ -24,7 +24,7 @@ type CachedHeader struct {
BodyStructure models.BodyStructure
Envelope models.Envelope
InternalDate time.Time
Uid uint32
Uid models.UID
Size uint32
Header []byte
Created time.Time
@@ -34,7 +34,7 @@ var (
// cacheTag should be updated when changing the cache
// structure; this will ensure that the user's cache is cleared and
// reloaded when the underlying cache structure changes
cacheTag = []byte("0002")
cacheTag = []byte("0003")
cacheTagKey = []byte("cache.tag")
)
@@ -112,9 +112,9 @@ func (w *IMAPWorker) cacheHeader(mi *models.MessageInfo) {
}
}
func (w *IMAPWorker) getCachedHeaders(msg *types.FetchMessageHeaders) []uint32 {
func (w *IMAPWorker) getCachedHeaders(msg *types.FetchMessageHeaders) []models.UID {
w.worker.Tracef("Retrieving headers from cache: %v", msg.Uids)
var need []uint32
var need []models.UID
for _, uid := range msg.Uids {
key := w.headerKey(uid)
data, err := w.cache.Get(key, nil)
@@ -157,8 +157,8 @@ func (w *IMAPWorker) getCachedHeaders(msg *types.FetchMessageHeaders) []uint32 {
return need
}
func (w *IMAPWorker) headerKey(uid uint32) []byte {
key := fmt.Sprintf("header.%s.%d.%d",
func (w *IMAPWorker) headerKey(uid models.UID) []byte {
key := fmt.Sprintf("header.%s.%d.%s",
w.selected.Name, w.selected.UidValidity, uid)
return []byte(key)
}

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"git.sr.ht/~rjarry/aerc/lib/log"
"git.sr.ht/~rjarry/aerc/models"
"github.com/emersion/go-imap"
"github.com/emersion/go-imap/client"
"github.com/emersion/go-imap/commands"
@@ -19,7 +20,7 @@ func NewHandler(c *client.Client) *handler {
return &handler{client: c}
}
func (h handler) FetchEntireThreads(requested []uint32) ([]uint32, error) {
func (h handler) FetchEntireThreads(requested []models.UID) ([]models.UID, error) {
threadIds, err := h.fetchThreadIds(requested)
if err != nil {
return nil,
@@ -33,7 +34,7 @@ func (h handler) FetchEntireThreads(requested []uint32) ([]uint32, error) {
return uids, nil
}
func (h handler) fetchThreadIds(uids []uint32) ([]string, error) {
func (h handler) fetchThreadIds(uids []models.UID) ([]string, error) {
messages := make(chan *imap.Message)
done := make(chan error)
@@ -58,7 +59,9 @@ func (h handler) fetchThreadIds(uids []uint32) ([]string, error) {
}()
var set imap.SeqSet
set.AddNum(uids...)
for _, uid := range uids {
set.AddNum(models.UidToUint32(uid))
}
err := h.client.UidFetch(&set, items, messages)
<-done
@@ -69,18 +72,18 @@ func (h handler) fetchThreadIds(uids []uint32) ([]string, error) {
return thrid, err
}
func (h handler) searchUids(thrid []string) ([]uint32, error) {
func (h handler) searchUids(thrid []string) ([]models.UID, error) {
if len(thrid) == 0 {
return nil, errors.New("no thread IDs provided")
}
return h.runSearch(NewThreadIDSearch(thrid))
}
func (h handler) RawSearch(rawSearch string) ([]uint32, error) {
func (h handler) RawSearch(rawSearch string) ([]models.UID, error) {
return h.runSearch(NewRawSearch(rawSearch))
}
func (h handler) runSearch(cmd imap.Commander) ([]uint32, error) {
func (h handler) runSearch(cmd imap.Commander) ([]models.UID, error) {
if h.client.State() != imap.SelectedState {
return nil, errors.New("no mailbox selected")
}
@@ -90,5 +93,9 @@ func (h handler) runSearch(cmd imap.Commander) ([]uint32, error) {
if err != nil {
return nil, fmt.Errorf("imap execute failed: %w", err)
}
return res.Ids, status.Err()
var uids []models.UID
for _, i := range res.Ids {
uids = append(uids, models.Uint32ToUid(i))
}
return uids, status.Err()
}

View File

@@ -82,7 +82,7 @@ func (imapw *IMAPWorker) handleFetchMessageHeaders(
RFC822Headers: header,
Refs: parse.MsgIDList(header, "references"),
Size: _msg.Size,
Uid: _msg.Uid,
Uid: models.Uint32ToUid(_msg.Uid),
}
imapw.worker.PostMessage(&types.MessageInfo{
Message: types.RespondTo(msg),
@@ -126,7 +126,7 @@ func (imapw *IMAPWorker) handleFetchMessageBodyPart(
partHeaderSection.FetchItem(),
partBodySection.FetchItem(),
}
imapw.handleFetchMessages(msg, []uint32{msg.Uid}, items,
imapw.handleFetchMessages(msg, []models.UID{msg.Uid}, items,
func(_msg *imap.Message) error {
if len(_msg.Body) == 0 {
// ignore duplicate messages with only flag updates
@@ -154,7 +154,7 @@ func (imapw *IMAPWorker) handleFetchMessageBodyPart(
Message: types.RespondTo(msg),
Part: &models.MessageBodyPart{
Reader: part.Body,
Uid: _msg.Uid,
Uid: models.Uint32ToUid(_msg.Uid),
},
}, nil)
// Update flags (to mark message as read)
@@ -162,7 +162,7 @@ func (imapw *IMAPWorker) handleFetchMessageBodyPart(
Message: types.RespondTo(msg),
Info: &models.MessageInfo{
Flags: translateImapFlags(_msg.Flags),
Uid: _msg.Uid,
Uid: models.Uint32ToUid(_msg.Uid),
},
}, nil)
return nil
@@ -196,7 +196,7 @@ func (imapw *IMAPWorker) handleFetchFullMessages(
Message: types.RespondTo(msg),
Content: &models.FullMessage{
Reader: bufio.NewReader(r),
Uid: _msg.Uid,
Uid: models.Uint32ToUid(_msg.Uid),
},
}, nil)
// Update flags (to mark message as read)
@@ -204,7 +204,7 @@ func (imapw *IMAPWorker) handleFetchFullMessages(
Message: types.RespondTo(msg),
Info: &models.MessageInfo{
Flags: translateImapFlags(_msg.Flags),
Uid: _msg.Uid,
Uid: models.Uint32ToUid(_msg.Uid),
},
}, nil)
return nil
@@ -228,7 +228,7 @@ func (imapw *IMAPWorker) handleFetchMessageFlags(msg *types.FetchMessageFlags) {
Message: types.RespondTo(msg),
Info: &models.MessageInfo{
Flags: translateImapFlags(_msg.Flags),
Uid: _msg.Uid,
Uid: models.Uint32ToUid(_msg.Uid),
},
}, nil)
return nil
@@ -236,13 +236,13 @@ func (imapw *IMAPWorker) handleFetchMessageFlags(msg *types.FetchMessageFlags) {
}
func (imapw *IMAPWorker) handleFetchMessages(
msg types.WorkerMessage, uids []uint32, items []imap.FetchItem,
msg types.WorkerMessage, uids []models.UID, items []imap.FetchItem,
procFunc func(*imap.Message) error,
) {
messages := make(chan *imap.Message)
done := make(chan struct{})
missingUids := make(map[uint32]bool)
missingUids := make(map[models.UID]bool)
for _, uid := range uids {
missingUids[uid] = true
}
@@ -251,14 +251,14 @@ func (imapw *IMAPWorker) handleFetchMessages(
defer log.PanicHandler()
for _msg := range messages {
delete(missingUids, _msg.Uid)
delete(missingUids, models.Uint32ToUid(_msg.Uid))
err := procFunc(_msg)
if err != nil {
log.Errorf("failed to process message <%d>: %v", _msg.Uid, err)
imapw.worker.PostMessage(&types.MessageInfo{
Message: types.RespondTo(msg),
Info: &models.MessageInfo{
Uid: _msg.Uid,
Uid: models.Uint32ToUid(_msg.Uid),
Error: err,
},
}, nil)

View File

@@ -81,7 +81,7 @@ func (imapw *IMAPWorker) handleAnsweredMessages(msg *types.AnsweredMessages) {
Message: types.RespondTo(msg),
Info: &models.MessageInfo{
Flags: translateImapFlags(_msg.Flags),
Uid: _msg.Uid,
Uid: models.Uint32ToUid(_msg.Uid),
},
}, nil)
return nil
@@ -100,7 +100,7 @@ func (imapw *IMAPWorker) handleFlagMessages(msg *types.FlagMessages) {
Message: types.RespondTo(msg),
Info: &models.MessageInfo{
Flags: translateImapFlags(_msg.Flags),
Uid: _msg.Uid,
Uid: models.Uint32ToUid(_msg.Uid),
},
}, nil)
return nil
@@ -108,7 +108,7 @@ func (imapw *IMAPWorker) handleFlagMessages(msg *types.FlagMessages) {
}
func (imapw *IMAPWorker) handleStoreOps(
msg types.WorkerMessage, uids []uint32, item imap.StoreItem, flag interface{},
msg types.WorkerMessage, uids []models.UID, item imap.StoreItem, flag interface{},
procFunc func(*imap.Message) error,
) {
messages := make(chan *imap.Message)

View File

@@ -14,12 +14,12 @@ func init() {
imap.CharsetReader = charset.Reader
}
func toSeqSet(uids []uint32) *imap.SeqSet {
var set imap.SeqSet
func toSeqSet(uids []models.UID) *imap.SeqSet {
set := new(imap.SeqSet)
for _, uid := range uids {
set.AddNum(uid)
set.AddNum(models.UidToUint32(uid))
}
return &set
return set
}
func translateBodyStructure(bs *imap.BodyStructure) *models.BodyStructure {

View File

@@ -139,6 +139,6 @@ func (imapw *IMAPWorker) handleSearchDirectory(msg *types.SearchDirectory) {
imapw.worker.PostMessage(&types.SearchResults{
Message: types.RespondTo(msg),
Uids: uids,
Uids: models.Uint32ToUidList(uids),
}, nil)
}

View File

@@ -5,6 +5,7 @@ import (
sortthread "github.com/emersion/go-imap-sortthread"
"git.sr.ht/~rjarry/aerc/models"
"git.sr.ht/~rjarry/aerc/worker/types"
)
@@ -85,9 +86,10 @@ func (imapw *IMAPWorker) handleFetchDirectoryContents(
// Only initialize if we are not filtering
imapw.seqMap.Initialize(uids)
}
imapw.worker.PostMessage(&types.DirectoryContents{
Message: types.RespondTo(msg),
Uids: uids,
Uids: models.Uint32ToUidList(uids),
}, nil)
imapw.worker.PostMessage(&types.Done{Message: types.RespondTo(msg)}, nil)
}
@@ -146,7 +148,7 @@ func (imapw *IMAPWorker) handleDirectoryThreaded(
var uids []uint32
for i := len(aercThreads) - 1; i >= 0; i-- {
aercThreads[i].Walk(func(t *types.Thread, level int, currentErr error) error { //nolint:errcheck // error indicates skipped threads
uids = append(uids, t.Uid)
uids = append(uids, models.UidToUint32(t.Uid))
return nil
})
}
@@ -175,7 +177,7 @@ func convertThreads(threads []*sortthread.Thread, parent *types.Thread) ([]*type
for i := 0; i < len(threads); i++ {
t := threads[i]
conv[i] = &types.Thread{
Uid: t.Id,
Uid: models.Uint32ToUid(t.Id),
}
// Set the first child node

View File

@@ -289,7 +289,7 @@ func (w *IMAPWorker) handleImapUpdate(update client.Update) {
Envelope: translateEnvelope(msg.Envelope),
Flags: translateImapFlags(msg.Flags),
InternalDate: msg.InternalDate,
Uid: msg.Uid,
Uid: models.Uint32ToUid(msg.Uid),
},
}, nil)
case *client.ExpungeUpdate:
@@ -297,7 +297,7 @@ func (w *IMAPWorker) handleImapUpdate(update client.Update) {
w.worker.Errorf("ExpungeUpdate unknown seqnum: %d", update.SeqNum)
} else {
w.worker.PostMessage(&types.MessagesDeleted{
Uids: []uint32{uid},
Uids: []models.UID{models.Uint32ToUid(uid)},
}, nil)
}
}

View File

@@ -186,9 +186,9 @@ func (w *JMAPWorker) handleFetchDirectoryContents(msg *types.FetchDirectoryConte
}
}
uids := make([]uint32, 0, len(contents.MessageIDs))
uids := make([]models.UID, 0, len(contents.MessageIDs))
for _, id := range contents.MessageIDs {
uids = append(uids, w.uidStore.GetOrInsert(string(id)))
uids = append(uids, models.UID(id))
}
w.w.PostMessage(&types.DirectoryContents{
Message: types.RespondTo(msg),
@@ -214,9 +214,9 @@ func (w *JMAPWorker) handleSearchDirectory(msg *types.SearchDirectory) error {
for _, inv := range resp.Responses {
switch r := inv.Args.(type) {
case *email.QueryResponse:
var uids []uint32
var uids []models.UID
for _, id := range r.IDs {
uids = append(uids, w.uidStore.GetOrInsert(string(id)))
uids = append(uids, models.UID(id))
}
w.w.PostMessage(&types.SearchResults{
Message: types.RespondTo(msg),

View File

@@ -38,11 +38,7 @@ func (w *JMAPWorker) handleFetchMessageHeaders(msg *types.FetchMessageHeaders) e
emailIdsToFetch := make([]jmap.ID, 0, len(msg.Uids))
currentEmails := make([]*email.Email, 0, len(msg.Uids))
for _, uid := range msg.Uids {
id, ok := w.uidStore.GetKey(uid)
if !ok {
return fmt.Errorf("bug: no jmap id for message uid: %v", uid)
}
jid := jmap.ID(id)
jid := jmap.ID(uid)
m, err := w.cache.GetEmail(jid)
if err != nil {
// Message wasn't in cache; fetch it
@@ -103,13 +99,9 @@ func (w *JMAPWorker) handleFetchMessageHeaders(msg *types.FetchMessageHeaders) e
}
func (w *JMAPWorker) handleFetchMessageBodyPart(msg *types.FetchMessageBodyPart) error {
id, ok := w.uidStore.GetKey(msg.Uid)
if !ok {
return fmt.Errorf("bug: unknown message uid %d", msg.Uid)
}
mail, err := w.cache.GetEmail(jmap.ID(id))
mail, err := w.cache.GetEmail(jmap.ID(msg.Uid))
if err != nil {
return fmt.Errorf("bug: unknown message id %s: %w", id, err)
return fmt.Errorf("bug: unknown message id %s: %w", msg.Uid, err)
}
part := mail.BodyStructure
@@ -159,13 +151,9 @@ func (w *JMAPWorker) handleFetchMessageBodyPart(msg *types.FetchMessageBodyPart)
func (w *JMAPWorker) handleFetchFullMessages(msg *types.FetchFullMessages) error {
for _, uid := range msg.Uids {
id, ok := w.uidStore.GetKey(uid)
if !ok {
return fmt.Errorf("bug: unknown message uid %d", uid)
}
mail, err := w.cache.GetEmail(jmap.ID(id))
mail, err := w.cache.GetEmail(jmap.ID(uid))
if err != nil {
return fmt.Errorf("bug: unknown message id %s: %w", id, err)
return fmt.Errorf("bug: unknown message id %s: %w", uid, err)
}
buf, err := w.cache.GetBlob(mail.BlobID)
if err != nil {

View File

@@ -37,7 +37,7 @@ func (w *JMAPWorker) translateMsgInfo(m *email.Email) *models.MessageInfo {
return &models.MessageInfo{
Envelope: env,
Flags: keywordsToFlags(m.Keywords),
Uid: w.uidStore.GetOrInsert(string(m.ID)),
Uid: models.UID(m.ID),
BodyStructure: translateBodyStructure(m.BodyStructure),
RFC822Headers: translateJMAPHeader(m.Headers),
Refs: m.References,

View File

@@ -247,10 +247,9 @@ func (w *JMAPWorker) refresh(newState jmap.TypeState) error {
}
if w.selectedMbox == mboxId {
uids := make([]uint32, 0, len(ids))
uids := make([]models.UID, 0, len(ids))
for _, id := range ids {
uid := w.uidStore.GetOrInsert(string(id))
uids = append(uids, uid)
uids = append(uids, models.UID(id))
}
w.w.PostMessage(&types.DirectoryContents{
Uids: uids,

View File

@@ -10,15 +10,11 @@ import (
"git.sr.ht/~rockorager/go-jmap/mail/mailbox"
)
func (w *JMAPWorker) updateFlags(uids []uint32, flags models.Flags, enable bool) error {
func (w *JMAPWorker) updateFlags(uids []models.UID, flags models.Flags, enable bool) error {
var req jmap.Request
patches := make(map[jmap.ID]jmap.Patch)
for _, uid := range uids {
id, ok := w.uidStore.GetKey(uid)
if !ok {
return fmt.Errorf("bug: unknown uid %d", uid)
}
patch := jmap.Patch{}
for kw := range flagsToKeywords(flags) {
path := fmt.Sprintf("keywords/%s", kw)
@@ -28,7 +24,7 @@ func (w *JMAPWorker) updateFlags(uids []uint32, flags models.Flags, enable bool)
patch[path] = nil
}
}
patches[jmap.ID(id)] = patch
patches[jmap.ID(uid)] = patch
}
req.Invoke(&email.Set{
@@ -44,7 +40,7 @@ func (w *JMAPWorker) updateFlags(uids []uint32, flags models.Flags, enable bool)
return checkNotUpdated(resp)
}
func (w *JMAPWorker) moveCopy(uids []uint32, destDir string, deleteSrc bool) error {
func (w *JMAPWorker) moveCopy(uids []models.UID, destDir string, deleteSrc bool) error {
var req jmap.Request
var destMbox jmap.ID
var destroy []jmap.ID
@@ -62,13 +58,9 @@ func (w *JMAPWorker) moveCopy(uids []uint32, destDir string, deleteSrc bool) err
for _, uid := range uids {
dest := destMbox
id, ok := w.uidStore.GetKey(uid)
if !ok {
return fmt.Errorf("bug: unknown uid %d", uid)
}
mail, err := w.cache.GetEmail(jmap.ID(id))
mail, err := w.cache.GetEmail(jmap.ID(uid))
if err != nil {
return fmt.Errorf("bug: unknown message id %s: %w", id, err)
return fmt.Errorf("bug: unknown message id %s: %w", uid, err)
}
patch := w.moveCopyPatch(mail, dest, deleteSrc)
@@ -76,7 +68,7 @@ func (w *JMAPWorker) moveCopy(uids []uint32, destDir string, deleteSrc bool) err
destroy = append(destroy, mail.ID)
w.w.Debugf("destroying <%s>", mail.MessageID[0])
} else {
patches[jmap.ID(id)] = patch
patches[jmap.ID(uid)] = patch
}
}
@@ -161,11 +153,7 @@ func (w *JMAPWorker) handleModifyLabels(msg *types.ModifyLabels) error {
patches := make(map[jmap.ID]jmap.Patch)
for _, uid := range msg.Uids {
id, ok := w.uidStore.GetKey(uid)
if !ok {
return fmt.Errorf("bug: unknown uid %d", uid)
}
patches[jmap.ID(id)] = patch
patches[jmap.ID(uid)] = patch
}
req.Invoke(&email.Set{

View File

@@ -6,7 +6,6 @@ import (
"time"
"git.sr.ht/~rjarry/aerc/config"
"git.sr.ht/~rjarry/aerc/lib/uidstore"
"git.sr.ht/~rjarry/aerc/models"
"git.sr.ht/~rjarry/aerc/worker/handlers"
"git.sr.ht/~rjarry/aerc/worker/jmap/cache"
@@ -47,7 +46,6 @@ type JMAPWorker struct {
mbox2dir map[jmap.ID]string
roles map[mailbox.Role]jmap.ID
identities map[string]*identity.Identity
uidStore *uidstore.Store
changes chan jmap.TypeState
stop chan struct{}
@@ -56,7 +54,6 @@ type JMAPWorker struct {
func NewJMAPWorker(worker *types.Worker) (types.Backend, error) {
return &JMAPWorker{
w: worker,
uidStore: uidstore.NewStore(),
roles: make(map[mailbox.Role]jmap.ID),
dir2mbox: make(map[string]jmap.ID),
mbox2dir: make(map[jmap.ID]string),

View File

@@ -13,11 +13,11 @@ import (
"git.sr.ht/~rjarry/go-opt"
)
func Search(messages []rfc822.RawMessage, criteria *types.SearchCriteria) ([]uint32, error) {
func Search(messages []rfc822.RawMessage, criteria *types.SearchCriteria) ([]models.UID, error) {
criteria.PrepareHeader()
requiredParts := GetRequiredParts(criteria)
matchedUids := []uint32{}
var matchedUids []models.UID
for _, m := range messages {
success, err := SearchMessage(m, criteria, requiredParts)
if err != nil {

View File

@@ -11,7 +11,7 @@ import (
func Sort(messageInfos []*models.MessageInfo,
criteria []*types.SortCriterion,
) ([]uint32, error) {
) ([]models.UID, error) {
// loop through in reverse to ensure we sort by non-primary fields first
for i := len(criteria) - 1; i >= 0; i-- {
criterion := criteria[i]
@@ -56,7 +56,7 @@ func Sort(messageInfos []*models.MessageInfo,
})
}
}
var uids []uint32
var uids []models.UID
// copy in reverse as msgList displays backwards
for i := len(messageInfos) - 1; i >= 0; i-- {
uids = append(uids, messageInfos[i].Uid)

View File

@@ -9,7 +9,7 @@ import (
"github.com/emersion/go-maildir"
"git.sr.ht/~rjarry/aerc/lib/log"
"git.sr.ht/~rjarry/aerc/lib/uidstore"
"git.sr.ht/~rjarry/aerc/models"
"git.sr.ht/~rjarry/aerc/worker/lib"
)
@@ -17,8 +17,7 @@ import (
// the Maildir spec
type Container struct {
Store *lib.MaildirStore
uids *uidstore.Store
recentUIDS map[uint32]struct{} // used to set the recent flag
recentUIDS map[models.UID]struct{} // used to set the recent flag
}
// NewContainer creates a new container at the specified directory
@@ -28,8 +27,8 @@ func NewContainer(dir string, maildirpp bool) (*Container, error) {
return nil, err
}
return &Container{
Store: store, uids: uidstore.NewStore(),
recentUIDS: make(map[uint32]struct{}),
Store: store,
recentUIDS: make(map[models.UID]struct{}),
}, nil
}
@@ -40,8 +39,7 @@ func (c *Container) SyncNewMail(dir maildir.Dir) error {
return err
}
for _, key := range keys {
uid := c.uids.GetOrInsert(key)
c.recentUIDS[uid] = struct{}{}
c.recentUIDS[models.UID(key)] = struct{}{}
}
return nil
}
@@ -57,18 +55,18 @@ func (c *Container) OpenDirectory(name string) (maildir.Dir, error) {
}
// IsRecent returns if a uid has the Recent flag set
func (c *Container) IsRecent(uid uint32) bool {
func (c *Container) IsRecent(uid models.UID) bool {
_, ok := c.recentUIDS[uid]
return ok
}
// ClearRecentFlag removes the Recent flag from the message with the given uid
func (c *Container) ClearRecentFlag(uid uint32) {
func (c *Container) ClearRecentFlag(uid models.UID) {
delete(c.recentUIDS, uid)
}
// UIDs fetches the unique message identifiers for the maildir
func (c *Container) UIDs(d maildir.Dir) ([]uint32, error) {
func (c *Container) UIDs(d maildir.Dir) ([]models.UID, error) {
keys, err := d.Keys()
if err != nil && len(keys) == 0 {
return nil, fmt.Errorf("could not get keys for %s: %w", d, err)
@@ -77,39 +75,26 @@ func (c *Container) UIDs(d maildir.Dir) ([]uint32, error) {
log.Errorf("could not get all keys for %s: %s", d, err.Error())
}
sort.Strings(keys)
var uids []uint32
var uids []models.UID
for _, key := range keys {
uids = append(uids, c.uids.GetOrInsert(key))
uids = append(uids, models.UID(key))
}
return uids, err
}
// Message returns a Message struct for the given UID and maildir
func (c *Container) Message(d maildir.Dir, uid uint32) (*Message, error) {
if key, ok := c.uids.GetKey(uid); ok {
return &Message{
dir: d,
uid: uid,
key: key,
}, nil
}
return nil, fmt.Errorf("could not find message with uid %d in maildir %s",
uid, d)
}
func (c *Container) MessageFromKey(d maildir.Dir, key string) *Message {
uid := c.uids.GetOrInsert(key)
func (c *Container) Message(d maildir.Dir, uid models.UID) (*Message, error) {
return &Message{
dir: d,
uid: uid,
key: key,
}
key: string(uid),
}, nil
}
// DeleteAll deletes a set of messages by UID and returns the subset of UIDs
// which were successfully deleted, stopping upon the first error.
func (c *Container) DeleteAll(d maildir.Dir, uids []uint32) ([]uint32, error) {
var success []uint32
func (c *Container) DeleteAll(d maildir.Dir, uids []models.UID) ([]models.UID, error) {
var success []models.UID
for _, uid := range uids {
msg, err := c.Message(d, uid)
if err != nil {
@@ -124,46 +109,38 @@ func (c *Container) DeleteAll(d maildir.Dir, uids []uint32) ([]uint32, error) {
}
func (c *Container) CopyAll(
dest maildir.Dir, src maildir.Dir, uids []uint32,
dest maildir.Dir, src maildir.Dir, uids []models.UID,
) error {
for _, uid := range uids {
if err := c.copyMessage(dest, src, uid); err != nil {
return fmt.Errorf("could not copy message %d: %w", uid, err)
return fmt.Errorf("could not copy message %s: %w", uid, err)
}
}
return nil
}
func (c *Container) copyMessage(
dest maildir.Dir, src maildir.Dir, uid uint32,
dest maildir.Dir, src maildir.Dir, uid models.UID,
) error {
key, ok := c.uids.GetKey(uid)
if !ok {
return fmt.Errorf("could not find key for message id %d", uid)
}
_, err := src.Copy(dest, key)
_, err := src.Copy(dest, string(uid))
return err
}
func (c *Container) MoveAll(dest maildir.Dir, src maildir.Dir, uids []uint32) ([]uint32, error) {
var success []uint32
func (c *Container) MoveAll(dest maildir.Dir, src maildir.Dir, uids []models.UID) ([]models.UID, error) {
var success []models.UID
for _, uid := range uids {
if err := c.moveMessage(dest, src, uid); err != nil {
return success, fmt.Errorf("could not move message %d: %w", uid, err)
return success, fmt.Errorf("could not move message %s: %w", uid, err)
}
success = append(success, uid)
}
return success, nil
}
func (c *Container) moveMessage(dest maildir.Dir, src maildir.Dir, uid uint32) error {
key, ok := c.uids.GetKey(uid)
if !ok {
return fmt.Errorf("could not find key for message id %d", uid)
}
path, err := src.Filename(key)
func (c *Container) moveMessage(dest maildir.Dir, src maildir.Dir, uid models.UID) error {
path, err := src.Filename(string(uid))
if err != nil {
return fmt.Errorf("could not find path for message id %d", uid)
return fmt.Errorf("could not find path for message id %s: %w", uid, err)
}
// Remove encoded UID information from the key to prevent sync issues
name := lib.StripUIDFromMessageFilename(filepath.Base(path))

View File

@@ -15,7 +15,7 @@ import (
// A Message is an individual email inside of a maildir.Dir.
type Message struct {
dir maildir.Dir
uid uint32
uid models.UID
key string
}
@@ -135,7 +135,7 @@ func (m Message) NewBodyPartReader(requestedParts []int) (io.Reader, error) {
return rfc822.FetchEntityPartReader(msg, requestedParts)
}
func (m Message) UID() uint32 {
func (m Message) UID() models.UID {
return m.uid
}

View File

@@ -6,11 +6,12 @@ import (
"sync"
"git.sr.ht/~rjarry/aerc/lib/log"
"git.sr.ht/~rjarry/aerc/models"
"git.sr.ht/~rjarry/aerc/worker/lib"
"git.sr.ht/~rjarry/aerc/worker/types"
)
func (w *Worker) search(ctx context.Context, criteria *types.SearchCriteria) ([]uint32, error) {
func (w *Worker) search(ctx context.Context, criteria *types.SearchCriteria) ([]models.UID, error) {
criteria.PrepareHeader()
requiredParts := lib.GetRequiredParts(criteria)
w.worker.Debugf("Required parts bitmask for search: %b", requiredParts)
@@ -20,7 +21,7 @@ func (w *Worker) search(ctx context.Context, criteria *types.SearchCriteria) ([]
return nil, err
}
matchedUids := []uint32{}
var matchedUids []models.UID
mu := sync.Mutex{}
wg := sync.WaitGroup{}
// Hard limit at 2x CPU cores
@@ -33,7 +34,7 @@ func (w *Worker) search(ctx context.Context, criteria *types.SearchCriteria) ([]
default:
limit <- struct{}{}
wg.Add(1)
go func(key uint32) {
go func(key models.UID) {
defer log.PanicHandler()
defer wg.Done()
success, err := w.searchKey(key, criteria, requiredParts)
@@ -55,7 +56,7 @@ func (w *Worker) search(ctx context.Context, criteria *types.SearchCriteria) ([]
}
// Execute the search criteria for the given key, returns true if search succeeded
func (w *Worker) searchKey(key uint32, criteria *types.SearchCriteria,
func (w *Worker) searchKey(key models.UID, criteria *types.SearchCriteria,
parts lib.MsgParts,
) (bool, error) {
message, err := w.c.Message(*w.selected, key)

View File

@@ -460,7 +460,7 @@ func (w *Worker) handleFetchDirectoryContents(
msg *types.FetchDirectoryContents,
) error {
var (
uids []uint32
uids []models.UID
err error
)
if msg.Filter != nil {
@@ -494,7 +494,7 @@ func (w *Worker) handleFetchDirectoryContents(
return nil
}
func (w *Worker) sort(ctx context.Context, uids []uint32, criteria []*types.SortCriterion) ([]uint32, error) {
func (w *Worker) sort(ctx context.Context, uids []models.UID, criteria []*types.SortCriterion) ([]models.UID, error) {
if len(criteria) == 0 {
// At least sort by uid, parallel searching can create random
// order
@@ -516,7 +516,7 @@ func (w *Worker) sort(ctx context.Context, uids []uint32, criteria []*types.Sort
default:
limit <- struct{}{}
wg.Add(1)
go func(uid uint32) {
go func(uid models.UID) {
defer log.PanicHandler()
defer wg.Done()
info, err := w.msgHeadersFromUid(uid)
@@ -546,7 +546,7 @@ func (w *Worker) handleFetchDirectoryThreaded(
msg *types.FetchDirectoryThreaded,
) error {
var (
uids []uint32
uids []models.UID
err error
)
if msg.Filter != nil {
@@ -574,7 +574,7 @@ func (w *Worker) handleFetchDirectoryThreaded(
return nil
}
func (w *Worker) threads(ctx context.Context, uids []uint32,
func (w *Worker) threads(ctx context.Context, uids []models.UID,
criteria []*types.SortCriterion,
) ([]*types.Thread, error) {
builder := aercLib.NewThreadBuilder(iterator.NewFactory(false), false)
@@ -590,7 +590,7 @@ func (w *Worker) threads(ctx context.Context, uids []uint32,
default:
limit <- struct{}{}
wg.Add(1)
go func(uid uint32) {
go func(uid models.UID) {
defer log.PanicHandler()
defer wg.Done()
info, err := w.msgHeadersFromUid(uid)
@@ -903,7 +903,7 @@ func (w *Worker) handleSearchDirectory(msg *types.SearchDirectory) error {
return nil
}
func (w *Worker) msgInfoFromUid(uid uint32) (*models.MessageInfo, error) {
func (w *Worker) msgInfoFromUid(uid models.UID) (*models.MessageInfo, error) {
m, err := w.c.Message(*w.selected, uid)
if err != nil {
return nil, err
@@ -923,7 +923,7 @@ func (w *Worker) msgInfoFromUid(uid uint32) (*models.MessageInfo, error) {
return info, nil
}
func (w *Worker) msgHeadersFromUid(uid uint32) (*models.MessageInfo, error) {
func (w *Worker) msgHeadersFromUid(uid models.UID) (*models.MessageInfo, error) {
m, err := w.c.Message(*w.selected, uid)
if err != nil {
return nil, err

View File

@@ -12,7 +12,6 @@ import (
func Read(r io.Reader) ([]rfc822.RawMessage, error) {
mbr := mbox.NewReader(r)
uid := uint32(0)
messages := make([]rfc822.RawMessage, 0)
for {
msg, err := mbr.NextMessage()
@@ -28,10 +27,10 @@ func Read(r io.Reader) ([]rfc822.RawMessage, error) {
}
messages = append(messages, &message{
uid: uid, flags: models.SeenFlag, content: content,
uid: uidFromContents(content),
flags: models.SeenFlag,
content: content,
})
uid++
}
return messages, nil
}

View File

@@ -2,6 +2,8 @@ package mboxer
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
@@ -49,7 +51,7 @@ func (md *mailboxContainer) DirectoryInfo(file string) *models.DirectoryInfo {
}
}
func (md *mailboxContainer) Copy(dest, src string, uids []uint32) error {
func (md *mailboxContainer) Copy(dest, src string, uids []models.UID) error {
srcmbox, ok := md.Mailbox(src)
if !ok {
return fmt.Errorf("source %s not found", src)
@@ -69,15 +71,15 @@ func (md *mailboxContainer) Copy(dest, src string, uids []uint32) error {
if found {
msg, err := srcmbox.Message(uidSrc)
if err != nil {
return fmt.Errorf("could not get message with uid %d from folder %s", uidSrc, src)
return fmt.Errorf("could not get message with uid %s from folder %s", uidSrc, src)
}
r, err := msg.NewReader()
if err != nil {
return fmt.Errorf("could not get reader for message with uid %d", uidSrc)
return fmt.Errorf("could not get reader for message with uid %s", uidSrc)
}
flags, err := msg.ModelFlags()
if err != nil {
return fmt.Errorf("could not get flags for message with uid %d", uidSrc)
return fmt.Errorf("could not get flags for message with uid %s", uidSrc)
}
err = destmbox.Append(r, flags)
if err != nil {
@@ -94,24 +96,24 @@ type container struct {
messages []rfc822.RawMessage
}
func (f *container) Uids() []uint32 {
uids := make([]uint32, len(f.messages))
func (f *container) Uids() []models.UID {
uids := make([]models.UID, len(f.messages))
for i, m := range f.messages {
uids[i] = m.UID()
}
return uids
}
func (f *container) Message(uid uint32) (rfc822.RawMessage, error) {
func (f *container) Message(uid models.UID) (rfc822.RawMessage, error) {
for _, m := range f.messages {
if uid == m.UID() {
return m, nil
}
}
return &message{}, fmt.Errorf("uid [%d] not found", uid)
return &message{}, fmt.Errorf("uid [%s] not found", uid)
}
func (f *container) Delete(uids []uint32) (deleted []uint32) {
func (f *container) Delete(uids []models.UID) (deleted []models.UID) {
newMessages := make([]rfc822.RawMessage, 0)
for _, m := range f.messages {
del := false
@@ -131,32 +133,28 @@ func (f *container) Delete(uids []uint32) (deleted []uint32) {
return
}
func (f *container) newUid() (next uint32) {
for _, m := range f.messages {
if uid := m.UID(); uid > next {
next = uid
}
}
next++
return
}
func (f *container) Append(r io.Reader, flags models.Flags) error {
data, err := io.ReadAll(r)
if err != nil {
return err
}
f.messages = append(f.messages, &message{
uid: f.newUid(),
uid: uidFromContents(data),
flags: flags,
content: data,
})
return nil
}
func uidFromContents(data []byte) models.UID {
sum := sha256.New()
sum.Write(data)
return models.UID(hex.EncodeToString(sum.Sum(nil)))
}
// message implements the lib.RawMessage interface
type message struct {
uid uint32
uid models.UID
flags models.Flags
content []byte
}
@@ -173,7 +171,7 @@ func (m *message) Labels() ([]string, error) {
return nil, nil
}
func (m *message) UID() uint32 {
func (m *message) UID() models.UID {
return m.uid
}

View File

@@ -405,7 +405,7 @@ func (w *mboxWorker) PathSeparator() string {
return "/"
}
func filterUids(folder *container, uids []uint32, criteria *types.SearchCriteria) ([]uint32, error) {
func filterUids(folder *container, uids []models.UID, criteria *types.SearchCriteria) ([]models.UID, error) {
log.Debugf("Search with parsed criteria: %#v", criteria)
m := make([]rfc822.RawMessage, 0, len(uids))
for _, uid := range uids {
@@ -419,9 +419,9 @@ func filterUids(folder *container, uids []uint32, criteria *types.SearchCriteria
return lib.Search(m, criteria)
}
func sortUids(folder *container, uids []uint32,
func sortUids(folder *container, uids []models.UID,
criteria []*types.SortCriterion,
) ([]uint32, error) {
) ([]models.UID, error) {
var infos []*models.MessageInfo
needSize := false
for _, item := range criteria {

View File

@@ -10,7 +10,7 @@ import (
"git.sr.ht/~rjarry/aerc/lib/log"
"git.sr.ht/~rjarry/aerc/lib/notmuch"
"git.sr.ht/~rjarry/aerc/lib/uidstore"
"git.sr.ht/~rjarry/aerc/models"
"git.sr.ht/~rjarry/aerc/worker/types"
)
@@ -18,7 +18,6 @@ type DB struct {
path string
excludedTags []string
db *notmuch.Database
uidStore *uidstore.Store
}
func NewDB(path string, excludedTags []string) *DB {
@@ -28,7 +27,6 @@ func NewDB(path string, excludedTags []string) *DB {
db := &DB{
path: path,
excludedTags: excludedTags,
uidStore: uidstore.NewStore(),
db: nm,
}
return db
@@ -312,14 +310,6 @@ func (db *DB) MsgModifyTags(key string, add, remove []string) error {
return msg.SyncTagsToMaildirFlags()
}
func (db *DB) UidFromKey(key string) uint32 {
return db.uidStore.GetOrInsert(key)
}
func (db *DB) KeyFromUid(uid uint32) (string, bool) {
return db.uidStore.GetKey(uid)
}
func (db *DB) makeThread(parent *types.Thread, msgs *notmuch.Messages, threadContext bool) []*types.Thread {
var siblings []*types.Thread
for msgs.Next() {
@@ -338,7 +328,7 @@ func (db *DB) makeThread(parent *types.Thread, msgs *notmuch.Messages, threadCon
continue
}
node := &types.Thread{
Uid: db.uidStore.GetOrInsert(msgID),
Uid: models.UID(msgID),
Parent: parent,
}
switch threadContext {

View File

@@ -21,7 +21,7 @@ import (
)
type Message struct {
uid uint32
uid models.UID
key string
db *notmuch.DB
}
@@ -152,7 +152,7 @@ func (m *Message) ModelFlags() (models.Flags, error) {
return flags, nil
}
func (m *Message) UID() uint32 {
func (m *Message) UID() models.UID {
return m.uid
}

View File

@@ -457,27 +457,21 @@ func (w *worker) handleFetchMessageHeaders(
return nil
}
func (w *worker) uidsFromQuery(ctx context.Context, query string) ([]uint32, error) {
func (w *worker) uidsFromQuery(ctx context.Context, query string) ([]models.UID, error) {
msgIDs, err := w.db.MsgIDsFromQuery(ctx, query)
if err != nil {
return nil, err
}
var uids []uint32
var uids []models.UID
for _, id := range msgIDs {
uid := w.db.UidFromKey(id)
uids = append(uids, uid)
uids = append(uids, models.UID(id))
}
return uids, nil
}
func (w *worker) msgFromUid(uid uint32) (*Message, error) {
key, ok := w.db.KeyFromUid(uid)
if !ok {
return nil, fmt.Errorf("Invalid uid: %v", uid)
}
func (w *worker) msgFromUid(uid models.UID) (*Message, error) {
msg := &Message{
key: key,
key: string(uid),
uid: uid,
db: w.db,
}
@@ -613,7 +607,7 @@ func (w *worker) handleModifyLabels(msg *types.ModifyLabels) error {
for _, uid := range msg.Uids {
m, err := w.msgFromUid(uid)
if err != nil {
return fmt.Errorf("could not get message from uid %d: %w", uid, err)
return fmt.Errorf("could not get message from uid %s: %w", uid, err)
}
err = m.ModifyTags(msg.Add, msg.Remove)
if err != nil {
@@ -699,7 +693,7 @@ func (w *worker) emitDirectoryThreaded(parent types.WorkerMessage) error {
return nil
}
func (w *worker) emitMessageInfoError(msg types.WorkerMessage, uid uint32, err error) {
func (w *worker) emitMessageInfoError(msg types.WorkerMessage, uid models.UID, err error) {
w.w.PostMessage(&types.MessageInfo{
Info: &models.MessageInfo{
Envelope: &models.Envelope{},
@@ -743,9 +737,9 @@ func (w *worker) emitLabelList() {
w.w.PostMessage(&types.LabelList{Labels: tags}, nil)
}
func (w *worker) sort(uids []uint32,
func (w *worker) sort(uids []models.UID,
criteria []*types.SortCriterion,
) ([]uint32, error) {
) ([]models.UID, error) {
if len(criteria) == 0 {
return uids, nil
}
@@ -796,7 +790,7 @@ func (w *worker) handleDeleteMessages(msg *types.DeleteMessages) error {
return errUnsupported
}
var deleted []uint32
var deleted []models.UID
folders, _ := w.store.FolderMap()
curDir := folders[w.currentQueryName]
@@ -874,7 +868,7 @@ func (w *worker) handleMoveMessages(msg *types.MoveMessages) error {
return errUnsupported
}
var moved []uint32
var moved []models.UID
folders, _ := w.store.FolderMap()

View File

@@ -146,29 +146,29 @@ type RemoveDirectory struct {
type FetchMessageHeaders struct {
Message
Context context.Context
Uids []uint32
Uids []models.UID
}
type FetchFullMessages struct {
Message
Uids []uint32
Uids []models.UID
}
type FetchMessageBodyPart struct {
Message
Uid uint32
Uid models.UID
Part []int
}
type FetchMessageFlags struct {
Message
Context context.Context
Uids []uint32
Uids []models.UID
}
type DeleteMessages struct {
Message
Uids []uint32
Uids []models.UID
MultiFileStrategy *MultiFileStrategy
}
@@ -177,32 +177,32 @@ type FlagMessages struct {
Message
Enable bool
Flags models.Flags
Uids []uint32
Uids []models.UID
}
type AnsweredMessages struct {
Message
Answered bool
Uids []uint32
Uids []models.UID
}
type ForwardedMessages struct {
Message
Forwarded bool
Uids []uint32
Uids []models.UID
}
type CopyMessages struct {
Message
Destination string
Uids []uint32
Uids []models.UID
MultiFileStrategy *MultiFileStrategy
}
type MoveMessages struct {
Message
Destination string
Uids []uint32
Uids []models.UID
MultiFileStrategy *MultiFileStrategy
}
@@ -244,12 +244,12 @@ type DirectoryInfo struct {
type DirectoryContents struct {
Message
Uids []uint32
Uids []models.UID
}
type SearchResults struct {
Message
Uids []uint32
Uids []models.UID
}
type MessageInfo struct {
@@ -270,24 +270,24 @@ type MessageBodyPart struct {
type MessagesDeleted struct {
Message
Uids []uint32
Uids []models.UID
}
type MessagesCopied struct {
Message
Destination string
Uids []uint32
Uids []models.UID
}
type MessagesMoved struct {
Message
Destination string
Uids []uint32
Uids []models.UID
}
type ModifyLabels struct {
Message
Uids []uint32
Uids []models.UID
Add []string
Remove []string
}

View File

@@ -6,10 +6,11 @@ import (
"sort"
"git.sr.ht/~rjarry/aerc/lib/log"
"git.sr.ht/~rjarry/aerc/models"
)
type Thread struct {
Uid uint32
Uid models.UID
Parent *Thread
PrevSibling *Thread
NextSibling *Thread
@@ -77,11 +78,11 @@ func (t *Thread) Root() *Thread {
}
// Uids returns all associated uids for the given thread and its children
func (t *Thread) Uids() []uint32 {
func (t *Thread) Uids() []models.UID {
if t == nil {
return nil
}
uids := make([]uint32, 0)
uids := make([]models.UID, 0)
err := t.Walk(func(node *Thread, _ int, _ error) error {
uids = append(uids, node.Uid)
return nil
@@ -96,20 +97,20 @@ func (t *Thread) String() string {
if t == nil {
return "<nil>"
}
parent := -1
var parent models.UID
if t.Parent != nil {
parent = int(t.Parent.Uid)
parent = t.Parent.Uid
}
next := -1
var next models.UID
if t.NextSibling != nil {
next = int(t.NextSibling.Uid)
next = t.NextSibling.Uid
}
child := -1
var child models.UID
if t.FirstChild != nil {
child = int(t.FirstChild.Uid)
child = t.FirstChild.Uid
}
return fmt.Sprintf(
"[%d] (parent:%v, next:%v, child:%v)",
"[%s] (parent:%s, next:%s, child:%s)",
t.Uid, parent, next, child,
)
}
@@ -141,9 +142,9 @@ type NewThreadWalkFn func(t *Thread, level int, currentErr error) error
// Implement interface to be able to sort threads by newest (max UID)
type ByUID []*Thread
func getMaxUID(thread *Thread) uint32 {
func getMaxUID(thread *Thread) models.UID {
// TODO: should we make this part of the Thread type to avoid recomputation?
var Uid uint32
var Uid models.UID
_ = thread.Walk(func(t *Thread, _ int, currentErr error) error {
if t.Deleted || t.Hidden > 0 {
@@ -171,9 +172,9 @@ func (s ByUID) Less(i, j int) bool {
return maxUID_i < maxUID_j
}
func SortThreadsBy(toSort []*Thread, sortBy []uint32) {
func SortThreadsBy(toSort []*Thread, sortBy []models.UID) {
// build a map from sortBy
uidMap := make(map[uint32]int)
uidMap := make(map[models.UID]int)
for i, uid := range sortBy {
uidMap[uid] = i
}

View File

@@ -4,16 +4,16 @@ import (
"fmt"
"strings"
"testing"
"git.sr.ht/~rjarry/aerc/models"
)
func genFakeTree() *Thread {
tree := &Thread{
Uid: 0,
}
tree := new(Thread)
var prevChild *Thread
for i := 1; i < 3; i++ {
for i := uint32(1); i < uint32(3); i++ {
child := &Thread{
Uid: uint32(i * 10),
Uid: models.Uint32ToUid(i * 10),
Parent: tree,
PrevSibling: prevChild,
}
@@ -26,9 +26,9 @@ func genFakeTree() *Thread {
}
prevChild = child
var prevSecond *Thread
for j := 1; j < 3; j++ {
for j := uint32(1); j < uint32(3); j++ {
second := &Thread{
Uid: child.Uid + uint32(j),
Uid: models.Uint32ToUid(models.UidToUint32(child.Uid) + j),
Parent: child,
PrevSibling: prevSecond,
}
@@ -41,13 +41,13 @@ func genFakeTree() *Thread {
}
prevSecond = second
var prevThird *Thread
limit := 3
limit := uint32(3)
if j == 2 {
limit = 8
}
for k := 1; k < limit; k++ {
for k := uint32(1); k < limit; k++ {
third := &Thread{
Uid: second.Uid*10 + uint32(k),
Uid: models.Uint32ToUid(models.UidToUint32(second.Uid)*10 + j),
Parent: second,
PrevSibling: prevThird,
}
@@ -107,7 +107,7 @@ func TestNewWalk(t *testing.T) {
func uidSeq(tree *Thread) string {
var seq []string
tree.Walk(func(t *Thread, _ int, _ error) error {
seq = append(seq, fmt.Sprintf("%d", t.Uid))
seq = append(seq, string(t.Uid))
return nil
})
return strings.Join(seq, ".")
@@ -116,25 +116,25 @@ func uidSeq(tree *Thread) string {
func TestThread_AddChild(t *testing.T) {
tests := []struct {
name string
seq []int
seq []models.UID
want string
}{
{
name: "ascending",
seq: []int{1, 2, 3, 4, 5, 6},
want: "0.1.2.3.4.5.6",
seq: []models.UID{"1", "2", "3", "4", "5", "6"},
want: ".1.2.3.4.5.6",
},
{
name: "descending",
seq: []int{6, 5, 4, 3, 2, 1},
want: "0.6.5.4.3.2.1",
seq: []models.UID{"6", "5", "4", "3", "2", "1"},
want: ".6.5.4.3.2.1",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
tree := &Thread{Uid: 0}
tree := new(Thread)
for _, i := range test.seq {
tree.AddChild(&Thread{Uid: uint32(i)})
tree.AddChild(&Thread{Uid: i})
}
if got := uidSeq(tree); got != test.want {
t.Errorf("got: %s, but wanted: %s", got,
@@ -147,30 +147,30 @@ func TestThread_AddChild(t *testing.T) {
func TestThread_OrderedInsert(t *testing.T) {
tests := []struct {
name string
seq []int
seq []models.UID
want string
}{
{
name: "ascending",
seq: []int{1, 2, 3, 4, 5, 6},
want: "0.1.2.3.4.5.6",
seq: []models.UID{"1", "2", "3", "4", "5", "6"},
want: ".1.2.3.4.5.6",
},
{
name: "descending",
seq: []int{6, 5, 4, 3, 2, 1},
want: "0.1.2.3.4.5.6",
seq: []models.UID{"6", "5", "4", "3", "2", "1"},
want: ".1.2.3.4.5.6",
},
{
name: "mixed",
seq: []int{2, 1, 6, 3, 4, 5},
want: "0.1.2.3.4.5.6",
seq: []models.UID{"2", "1", "6", "3", "4", "5"},
want: ".1.2.3.4.5.6",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
tree := &Thread{Uid: 0}
tree := new(Thread)
for _, i := range test.seq {
tree.OrderedInsert(&Thread{Uid: uint32(i)})
tree.OrderedInsert(&Thread{Uid: i})
}
if got := uidSeq(tree); got != test.want {
t.Errorf("got: %s, but wanted: %s", got,
@@ -183,32 +183,32 @@ func TestThread_OrderedInsert(t *testing.T) {
func TestThread_InsertCmd(t *testing.T) {
tests := []struct {
name string
seq []int
seq []models.UID
want string
}{
{
name: "ascending",
seq: []int{1, 2, 3, 4, 5, 6},
want: "0.6.4.2.1.3.5",
seq: []models.UID{"1", "2", "3", "4", "5", "6"},
want: ".6.4.2.1.3.5",
},
{
name: "descending",
seq: []int{6, 5, 4, 3, 2, 1},
want: "0.6.4.2.1.3.5",
seq: []models.UID{"6", "5", "4", "3", "2", "1"},
want: ".6.4.2.1.3.5",
},
{
name: "mixed",
seq: []int{2, 1, 6, 3, 4, 5},
want: "0.6.4.2.1.3.5",
seq: []models.UID{"2", "1", "6", "3", "4", "5"},
want: ".6.4.2.1.3.5",
},
}
sortMap := map[uint32]int{
uint32(6): 1,
uint32(4): 2,
uint32(2): 3,
uint32(1): 4,
uint32(3): 5,
uint32(5): 6,
sortMap := map[models.UID]int{
"6": 1,
"4": 2,
"2": 3,
"1": 4,
"3": 5,
"5": 6,
}
// bigger compares the new child with the next node and returns true if
@@ -219,9 +219,9 @@ func TestThread_InsertCmd(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
tree := &Thread{Uid: 0}
tree := new(Thread)
for _, i := range test.seq {
tree.InsertCmp(&Thread{Uid: uint32(i)}, bigger)
tree.InsertCmp(&Thread{Uid: i}, bigger)
}
if got := uidSeq(tree); got != test.want {
t.Errorf("got: %s, but wanted: %s", got,