summaryrefslogtreecommitdiff
path: root/src/leap/mail/imap/memorystore.py
blob: 60e98c71dda094568bc10f68426f36446dbc7b96 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
# -*- coding: utf-8 -*-
# memorystore.py
# Copyright (C) 2014 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""
In-memory transient store for a LEAPIMAPServer.
"""
import contextlib
import logging
import threading
import weakref

from collections import defaultdict
from copy import copy

from twisted.internet import defer
from twisted.internet.task import LoopingCall
from twisted.python import log
from zope.interface import implements

from leap.common.check import leap_assert_type
from leap.mail import size
from leap.mail.decorators import deferred
from leap.mail.utils import empty
from leap.mail.messageflow import MessageProducer
from leap.mail.imap import interfaces
from leap.mail.imap.fields import fields
from leap.mail.imap.messageparts import MessagePartType, MessagePartDoc
from leap.mail.imap.messageparts import RecentFlagsDoc
from leap.mail.imap.messageparts import MessageWrapper
from leap.mail.imap.messageparts import ReferenciableDict

logger = logging.getLogger(__name__)


# The default period to do writebacks to the permanent
# soledad storage, in seconds.
SOLEDAD_WRITE_PERIOD = 10


@contextlib.contextmanager
def set_bool_flag(obj, att):
    """
    Set a boolean flag to True while we're doing our thing.
    Just to let the world know.
    """
    setattr(obj, att, True)
    try:
        yield True
    except RuntimeError as exc:
        logger.exception(exc)
    finally:
        setattr(obj, att, False)


class MemoryStore(object):
    """
    An in-memory store to where we can write the different parts that
    we split the messages into and buffer them until we write them to the
    permanent storage.

    It uses MessageWrapper instances to represent the message-parts, which are
    indexed by mailbox name and UID.

    It also can be passed a permanent storage as a paremeter (any implementor
    of IMessageStore, in this case a SoledadStore). In this case, a periodic
    dump of the messages stored in memory will be done. The period of the
    writes to the permanent storage is controled by the write_period parameter
    in the constructor.
    """
    implements(interfaces.IMessageStore,
               interfaces.IMessageStoreWriter)

    # TODO We will want to index by chash when we transition to local-only
    # UIDs.

    WRITING_FLAG = "_writing"
    _last_uid_lock = threading.Lock()

    def __init__(self, permanent_store=None,
                 write_period=SOLEDAD_WRITE_PERIOD):
        """
        Initialize a MemoryStore.

        :param permanent_store: a IMessageStore implementor to dump
                                messages to.
        :type permanent_store: IMessageStore
        :param write_period: the interval to dump messages to disk, in seconds.
        :type write_period: int
        """
        self._permanent_store = permanent_store
        self._write_period = write_period

        # Internal Storage: messages
        self._msg_store = {}

        # Internal Storage: payload-hash
        """
        {'phash': weakreaf.proxy(dict)}
        """
        self._phash_store = {}

        # Internal Storage: content-hash:fdoc
        """
        chash-fdoc-store keeps references to
        the flag-documents indexed by content-hash.

        {'chash': {'mbox-a': weakref.proxy(dict),
                   'mbox-b': weakref.proxy(dict)}
        }
        """
        self._chash_fdoc_store = {}

        # Internal Storage: recent-flags store
        """
        recent-flags store keeps one dict per mailbox,
        with the document-id of the u1db document
        and the set of the UIDs that have the recent flag.

        {'mbox-a': {'doc_id': 'deadbeef',
                    'set': {1,2,3,4}
                    }
        }
        """
        # TODO this will have to transition to content-hash
        # indexes after we move to local-only UIDs.

        self._rflags_store = defaultdict(
            lambda: {'doc_id': None, 'set': set([])})

        """
        last-uid store keeps the count of the highest UID
        per mailbox.

        {'mbox-a': 42,
         'mbox-b': 23}
        """
        self._last_uid = {}

        # New and dirty flags, to set MessageWrapper State.
        self._new = set([])
        self._new_deferreds = {}
        self._dirty = set([])
        self._rflags_dirty = set([])
        self._dirty_deferreds = {}

        # Flag for signaling we're busy writing to the disk storage.
        setattr(self, self.WRITING_FLAG, False)

        if self._permanent_store is not None:
            # this producer spits its messages to the permanent store
            # consumer using a queue. We will use that to put
            # our messages to be written.
            self.producer = MessageProducer(permanent_store,
                                            period=0.1)
            # looping call for dumping to SoledadStore
            self._write_loop = LoopingCall(self.write_messages,
                                           permanent_store)

            # We can start the write loop right now, why wait?
            self._start_write_loop()

    def _start_write_loop(self):
        """
        Start loop for writing to disk database.
        """
        if not self._write_loop.running:
            self._write_loop.start(self._write_period, now=True)

    def _stop_write_loop(self):
        """
        Stop loop for writing to disk database.
        """
        if self._write_loop.running:
            self._write_loop.stop()

    # IMessageStore

    # XXX this would work well for whole message operations.
    # We would have to add a put_flags operation to modify only
    # the flags doc (and set the dirty flag accordingly)

    def create_message(self, mbox, uid, message, notify_on_disk=True):
        """
        Create the passed message into this MemoryStore.

        By default we consider that any message is a new message.

        :param mbox: the mailbox
        :type mbox: basestring
        :param uid: the UID for the message
        :type uid: int
        :param message: a to be added
        :type message: MessageWrapper
        :param notify_on_disk:
        :type notify_on_disk: bool

        :return: a Deferred. if notify_on_disk is True, will be fired
                 when written to the db on disk.
                 Otherwise will fire inmediately
        :rtype: Deferred
        """
        print "adding new doc to memstore %s (%s)" % (mbox, uid)
        key = mbox, uid

        self._add_message(mbox, uid, message, notify_on_disk)

        d = defer.Deferred()
        d.addCallback(lambda result: log.msg("message save: %s" % result))
        self._new.add(key)

        # We store this deferred so we can keep track of the pending
        # operations internally.
        self._new_deferreds[key] = d

        if notify_on_disk:
            # Caller wants to be notified when the message is on disk
            # so we pass the deferred that will be fired when the message
            # has been written.
            return d
        else:
            # Caller does not care, just fired and forgot, so we pass
            # a defer that will inmediately have its callback triggered.
            return defer.succeed('fire-and-forget:%s' % str(key))

    def put_message(self, mbox, uid, message, notify_on_disk=True):
        """
        Put an existing message.

        :param mbox: the mailbox
        :type mbox: basestring
        :param uid: the UID for the message
        :type uid: int
        :param message: a to be added
        :type message: MessageWrapper
        :param notify_on_disk:
        :type notify_on_disk: bool

        :return: a Deferred. if notify_on_disk is True, will be fired
                 when written to the db on disk.
                 Otherwise will fire inmediately
        :rtype: Deferred
        """
        key = mbox, uid
        d = defer.Deferred()
        d.addCallback(lambda result: log.msg("message PUT save: %s" % result))

        self._dirty.add(key)
        self._dirty_deferreds[key] = d
        self._add_message(mbox, uid, message, notify_on_disk)
        #print "dirty ", self._dirty
        #print "new ", self._new
        return d

    def _add_message(self, mbox, uid, message, notify_on_disk=True):
        # XXX have to differentiate between notify_new and notify_dirty
        # TODO defaultdict the hell outa here...

        key = mbox, uid
        msg_dict = message.as_dict()

        FDOC = MessagePartType.fdoc.key
        HDOC = MessagePartType.hdoc.key
        CDOCS = MessagePartType.cdocs.key
        DOCS_ID = MessagePartType.docs_id.key

        try:
            store = self._msg_store[key]
        except KeyError:
            self._msg_store[key] = {FDOC: {},
                                    HDOC: {},
                                    CDOCS: {},
                                    DOCS_ID: {}}
            store = self._msg_store[key]

        fdoc = msg_dict.get(FDOC, None)
        if fdoc:
            if not store.get(FDOC, None):
                store[FDOC] = ReferenciableDict({})
            store[FDOC].update(fdoc)

            # content-hash indexing
            chash = fdoc.get(fields.CONTENT_HASH_KEY)
            chash_fdoc_store = self._chash_fdoc_store
            if not chash in chash_fdoc_store:
                chash_fdoc_store[chash] = {}

            chash_fdoc_store[chash][mbox] = weakref.proxy(
                store[FDOC])

        hdoc = msg_dict.get(HDOC, None)
        if hdoc:
            if not store.get(HDOC, None):
                store[HDOC] = ReferenciableDict({})
            store[HDOC].update(hdoc)

        docs_id = msg_dict.get(DOCS_ID, None)
        if docs_id:
            if not store.get(DOCS_ID, None):
                store[DOCS_ID] = {}
            store[DOCS_ID].update(docs_id)

        cdocs = message.cdocs
        for cdoc_key in cdocs.keys():
            if not store.get(CDOCS, None):
                store[CDOCS] = {}

            cdoc = cdocs[cdoc_key]
            # first we make it weak-referenciable
            referenciable_cdoc = ReferenciableDict(cdoc)
            store[CDOCS][cdoc_key] = referenciable_cdoc
            phash = cdoc.get(fields.PAYLOAD_HASH_KEY, None)
            if not phash:
                continue
            self._phash_store[phash] = weakref.proxy(referenciable_cdoc)

        def prune(seq, store):
            for key in seq:
                if key in store and empty(store.get(key)):
                    store.pop(key)
        prune((FDOC, HDOC, CDOCS, DOCS_ID), store)

        #print "after adding: "
        #import pprint; pprint.pprint(self._msg_store[key])

    def get_docid_for_fdoc(self, mbox, uid):
        """
        Get Soledad document id for the flags-doc for a given mbox and uid.
        """
        fdoc = self._permanent_store.get_flags_doc(mbox, uid)
        if not fdoc:
            return None
        doc_id = fdoc.doc_id
        return doc_id

    def get_message(self, mbox, uid):
        """
        Get a MessageWrapper for the given mbox and uid combination.

        :return: MessageWrapper or None
        """
        key = mbox, uid
        msg_dict = self._msg_store.get(key, None)
        if msg_dict:
            new, dirty = self._get_new_dirty_state(key)
            return MessageWrapper(from_dict=msg_dict,
                                  new=new,
                                  dirty=dirty,
                                  memstore=weakref.proxy(self))
        else:
            return None

    def remove_message(self, mbox, uid):
        """
        Remove a Message from this MemoryStore.
        """
        # XXX For the moment we are only removing the flags and headers
        # docs. The rest we leave there polluting your hard disk,
        # until we think about a good way of deorphaning.

        # XXX implement elijah's idea of using a PUT document as a
        # token to ensure consistency in the removal.

        try:
            key = mbox, uid
            self._new.discard(key)
            self._dirty.discard(key)
            self._msg_store.pop(key, None)
        except Exception as exc:
            logger.exception(exc)

    # IMessageStoreWriter

    def write_messages(self, store):
        """
        Write the message documents in this MemoryStore to a different store.
        """
        # For now, we pass if the queue is not empty, to avoid duplicate
        # queuing.
        # We would better use a flag to know when we've already enqueued an
        # item.

        # XXX this could return the deferred for all the enqueued operations

        if not self.producer.is_queue_empty():
            return

        print "Writing messages to Soledad..."
        with set_bool_flag(self, self.WRITING_FLAG):
            for rflags_doc_wrapper in self.all_rdocs_iter():
                self.producer.push(rflags_doc_wrapper)
            for msg_wrapper in self.all_new_dirty_msg_iter():
                self.producer.push(msg_wrapper)

    # MemoryStore specific methods.

    def get_uids(self, mbox):
        """
        Get all uids for a given mbox.
        """
        all_keys = self._msg_store.keys()
        return [uid for m, uid in all_keys if m == mbox]

    # last_uid

    def get_last_uid(self, mbox):
        """
        Get the highest UID for a given mbox.
        It will be the highest between the highest uid in the message store for
        the mailbox, and the soledad integer cache.
        """
        uids = self.get_uids(mbox)
        last_mem_uid = uids and max(uids) or 0
        last_soledad_uid = self.get_last_soledad_uid(mbox)
        return max(last_mem_uid, last_soledad_uid)

    def get_last_soledad_uid(self, mbox):
        """
        Get last uid for a given mbox from the soledad integer cache.
        """
        return self._last_uid.get(mbox, 0)

    def set_last_soledad_uid(self, mbox, value):
        """
        Set last uid for a given mbox in the soledad integer cache.
        SoledadMailbox should prime this value during initialization.
        Other methods (during message adding) SHOULD call
        `increment_last_soledad_uid` instead.
        """
        leap_assert_type(value, int)
        print "setting last soledad uid for ", mbox, "to", value
        # if we already have a vlue here, don't do anything
        with self._last_uid_lock:
            if not self._last_uid.get(mbox, None):
                self._last_uid[mbox] = value

    def increment_last_soledad_uid(self, mbox):
        """
        Increment by one the soledad integer cache for the last_uid for
        this mbox, and fire a defer-to-thread to update the soledad value.
        The caller should lock the call tho this method.
        """
        with self._last_uid_lock:
            self._last_uid[mbox] += 1
            value = self._last_uid[mbox]
            self.write_last_uid(mbox, value)
            return value

    @deferred
    def write_last_uid(self, mbox, value):
        """
        Increment the soledad cache,
        """
        leap_assert_type(value, int)
        if self._permanent_store:
            self._permanent_store.write_last_uid(mbox, value)

    # Counting sheeps...

    def count_new_mbox(self, mbox):
        """
        Count the new messages by inbox.
        """
        return len([(m, uid) for m, uid in self._new if mbox == mbox])

    def count_new(self):
        """
        Count all the new messages in the MemoryStore.
        """
        return len(self._new)

    def get_cdoc_from_phash(self, phash):
        """
        Return a content-document by its payload-hash.
        """
        doc = self._phash_store.get(phash, None)

        # XXX return None for consistency?

        # XXX have to keep a mapping between phash and its linkage
        # info, to know if this payload is been already saved or not.
        # We will be able to get this from the linkage-docs,
        # not yet implemented.
        new = True
        dirty = False
        return MessagePartDoc(
            new=new, dirty=dirty, store="mem",
            part=MessagePartType.cdoc,
            content=doc,
            doc_id=None)

    def get_fdoc_from_chash(self, chash, mbox):
        """
        Return a flags-document by its content-hash and a given mailbox.

        :return: MessagePartDoc, or None.
        """
        docs_dict = self._chash_fdoc_store.get(chash, None)
        fdoc = docs_dict.get(mbox, None) if docs_dict else None

        # a couple of special cases.
        # 1. We might have a doc with empty content...
        if empty(fdoc):
            return None

        # 2. ...Or the message could exist, but being flagged for deletion.
        # We want to create a new one in this case.
        # Hmmm what if the deletion is un-done?? We would end with a
        # duplicate...
        if fdoc and fields.DELETED_FLAG in fdoc[fields.FLAGS_KEY]:
            return None

        # XXX get flags
        new = True
        dirty = False
        return MessagePartDoc(
            new=new, dirty=dirty, store="mem",
            part=MessagePartType.fdoc,
            content=fdoc,
            doc_id=None)

    def all_msg_iter(self):
        """
        Return generator that iterates through all messages in the store.
        """
        return (self.get_message(*key)
                for key in sorted(self._msg_store.keys()))

    def all_new_dirty_msg_iter(self):
        """
        Return geneator that iterates through all new and dirty messages.
        """
        return (self.get_message(*key)
                for key in sorted(self._msg_store.keys())
                if key in self._new or key in self._dirty)

    def all_msg_dict_for_mbox(self, mbox):
        """
        Return all the message dicts for a given mbox.
        """
        return [self._msg_store[(mb, uid)]
                for mb, uid in self._msg_store if mb == mbox]

    def all_deleted_uid_iter(self, mbox):
        """
        Return generator that iterates through the UIDs for all messags
        with deleted flag in a given mailbox.
        """
        all_deleted = [
            msg['fdoc']['uid'] for msg in self.all_msg_dict_for_mbox(mbox)
            if msg.get('fdoc', None)
            and fields.DELETED_FLAG in msg['fdoc']['flags']]
        return all_deleted

    # new, dirty flags

    def _get_new_dirty_state(self, key):
        """
        Return `new` and `dirty` flags for a given message.
        """
        # XXX should return *first* the news, and *then* the dirty...
        return map(lambda _set: key in _set, (self._new, self._dirty))

    def set_new(self, key):
        """
        Add the key value to the `new` set.
        """
        self._new.add(key)

    def unset_new(self, key):
        """
        Remove the key value from the `new` set.
        """
        #print "Unsetting NEW for: %s" % str(key)
        self._new.discard(key)
        deferreds = self._new_deferreds
        d = deferreds.get(key, None)
        if d:
            # XXX use a namedtuple for passing the result
            # when we check it in the other side.
            d.callback('%s, ok' % str(key))
            deferreds.pop(key)

    def set_dirty(self, key):
        """
        Add the key value to the `dirty` set.
        """
        self._dirty.add(key)

    def unset_dirty(self, key):
        """
        Remove the key value from the `dirty` set.
        """
        #print "Unsetting DIRTY for: %s" % str(key)
        self._dirty.discard(key)
        deferreds = self._dirty_deferreds
        d = deferreds.get(key, None)
        if d:
            # XXX use a namedtuple for passing the result
            # when we check it in the other side.
            d.callback('%s, ok' % str(key))
            deferreds.pop(key)

    # Recent Flags

    # TODO --- nice but unused
    def set_recent_flag(self, mbox, uid):
        """
        Set the `Recent` flag for a given mailbox and UID.
        """
        self._rflags_dirty.add(mbox)
        self._rflags_store[mbox]['set'].add(uid)

    # TODO --- nice but unused
    def unset_recent_flag(self, mbox, uid):
        """
        Unset the `Recent` flag for a given mailbox and UID.
        """
        self._rflags_store[mbox]['set'].discard(uid)

    def set_recent_flags(self, mbox, value):
        """
        Set the value for the set of the recent flags.
        Used from the property in the MessageCollection.
        """
        self._rflags_dirty.add(mbox)
        self._rflags_store[mbox]['set'] = set(value)

    def load_recent_flags(self, mbox, flags_doc):
        """
        Load the passed flags document in the recent flags store, for a given
        mailbox.

        :param flags_doc: A dictionary containing the `doc_id` of the Soledad
                          flags-document for this mailbox, and the `set`
                          of uids marked with that flag.
        """
        self._rflags_store[mbox] = flags_doc

    def get_recent_flags(self, mbox):
        """
        Get the set of UIDs with the `Recent` flag for this mailbox.

        :return: set, or None
        """
        rflag_for_mbox = self._rflags_store.get(mbox, None)
        if not rflag_for_mbox:
            return None
        return self._rflags_store[mbox]['set']

    def all_rdocs_iter(self):
        """
        Return an iterator through all in-memory recent flag dicts, wrapped
        under a RecentFlagsDoc namedtuple.
        Used for saving to disk.

        :rtype: generator
        """
        # XXX use enums
        DOC_ID = "doc_id"
        SET = "set"

        rflags_store = self._rflags_store

        def get_rdoc(mbox, rdict):
            mbox_rflag_set = rdict[SET]
            recent_set = copy(mbox_rflag_set)
            # zero it!
            mbox_rflag_set.difference_update(mbox_rflag_set)
            return RecentFlagsDoc(
                doc_id=rflags_store[mbox][DOC_ID],
                content={
                    fields.TYPE_KEY: fields.TYPE_RECENT_VAL,
                    fields.MBOX_KEY: mbox,
                    fields.RECENTFLAGS_KEY: list(recent_set)
                })

        return (get_rdoc(mbox, rdict) for mbox, rdict in rflags_store.items()
                if not empty(rdict[SET]))

    # Methods that mirror the IMailbox interface

    def remove_all_deleted(self, mbox):
        """
        Remove all messages flagged \\Deleted from this Memory Store only.
        Called from `expunge`
        """
        mem_deleted = self.all_deleted_uid_iter(mbox)
        for uid in mem_deleted:
            self.remove_message(mbox, uid)
        return mem_deleted

    def expunge(self, mbox):
        """
        Remove all messages flagged \\Deleted, from the Memory Store
        and from the permanent store also.
        """
        # TODO expunge should add itself as a callback to the ongoing
        # writes.
        soledad_store = self._permanent_store

        try:
            # 1. Stop the writing call
            self._stop_write_loop()
            # 2. Enqueue a last write.
            #self.write_messages(soledad_store)
            # 3. Should wait on the writebacks to finish ???
            # FIXME wait for this, and add all the rest of the method
            # as a callback!!!
        except Exception as exc:
            logger.exception(exc)

        # Now, we...:

        try:
            # 1. Delete all messages marked as deleted in soledad.

            # XXX this could be deferred for faster operation.
            if soledad_store:
                sol_deleted = soledad_store.remove_all_deleted(mbox)
            else:
                sol_deleted = []

            # 2. Delete all messages marked as deleted in memory.
            mem_deleted = self.remove_all_deleted(mbox)

            all_deleted = set(mem_deleted).union(set(sol_deleted))
            print "deleted ", all_deleted
        except Exception as exc:
            logger.exception(exc)
        finally:
            self._start_write_loop()
        return all_deleted

    # Dump-to-disk controls.

    @property
    def is_writing(self):
        """
        Property that returns whether the store is currently writing its
        internal state to a permanent storage.

        Used to evaluate whether the CHECK command can inform that the field
        is clear to proceed, or waiting for the write operations to complete
        is needed instead.

        :rtype: bool
        """
        # FIXME this should return a deferred !!!
        # XXX ----- can fire when all new + dirty deferreds
        # are done (gatherResults)
        return getattr(self, self.WRITING_FLAG)

    def put_part(self, part_type, value):
        """
        Put the passed part into this IMessageStore.
        `part` should be one of: fdoc, hdoc, cdoc
        """
        # XXX turn that into a enum

    # Memory management.

    def get_size(self):
        """
        Return the size of the internal storage.
        Use for calculating the limit beyond which we should flush the store.
        """
        return size.get_size(self._msg_store)