aboutsummaryrefslogtreecommitdiffstats
path: root/Mailman/Queue/Switchboard.py
diff options
context:
space:
mode:
Diffstat (limited to 'Mailman/Queue/Switchboard.py')
-rw-r--r--Mailman/Queue/Switchboard.py30
1 files changed, 25 insertions, 5 deletions
diff --git a/Mailman/Queue/Switchboard.py b/Mailman/Queue/Switchboard.py
index 84e8a5e3..895d6df2 100644
--- a/Mailman/Queue/Switchboard.py
+++ b/Mailman/Queue/Switchboard.py
@@ -63,6 +63,11 @@ SAVE_MSGS_AS_PICKLES = True
# Small increment to add to time in case two entries have the same time. This
# prevents skipping one of two entries with the same time until the next pass.
DELTA = .0001
+# We count the number of times a file has been dequeued. This can be more
+# than one if the file has been moved to .bak and recovered. In order to
+# prevent loops and a message flood, when the count reaches this value, we
+# move the file to the shunt queue as a .psv.
+MAX_BAK_COUNT = 3
@@ -147,19 +152,34 @@ class Switchboard:
filename = os.path.join(self.__whichq, filebase + '.pck')
backfile = os.path.join(self.__whichq, filebase + '.bak')
# Read the message object and metadata.
- fp = open(filename)
+ fp = open(filename, 'rb+')
# Move the file to the backup file name for processing. If this
# process crashes uncleanly the .bak file will be used to re-instate
- # the .pck file in order to try again. XXX what if something caused
- # Python to constantly crash? Is it possible that we'd end up mail
- # bombing recipients or crushing the archiver? How would we defend
- # against that?
+ # the .pck file in order to try again. We keep count in _bak_count
+ # in the metadata which we rewrite of the number of times we recover
+ # and dequeue this file. When the count reaches MAX_BAK_COUNT, we
+ # move the .bak file to a .psv file in the shunt queue.
os.rename(filename, backfile)
try:
msg = cPickle.load(fp)
+ data_pos = fp.tell()
data = cPickle.load(fp)
+ data['_bak_count'] = data.setdefault('_bak_count', 0) + 1
+ fp.seek(data_pos)
+ if data.get('_parsemsg'):
+ protocol = 0
+ else:
+ protocol = 1
+ cPickle.dump(data, fp, protocol)
+ fp.truncate()
+ fp.flush()
+ os.fsync(fp.fileno())
finally:
fp.close()
+ if data['_bak_count'] >= MAX_BAK_COUNT:
+ syslog('error', '.bak file max count, preserving file: %s',
+ filebase)
+ self.finish(filebase, preserve=True)
if data.get('_parsemsg'):
msg = email.message_from_string(msg, Message.Message)
return msg, data