From 7d2756dd120800899f30b74ca68787e1044bed7c Mon Sep 17 00:00:00 2001 From: Tomas Touceda Date: Thu, 25 Apr 2013 12:12:02 -0300 Subject: Reorder files, normalize repo and add sample config --- COPYLEFT | 13 -- DESIGN.md | 238 --------------------- NOTES.md | 59 ----- bootstrap | 59 ----- doc/DESIGN.md | 238 +++++++++++++++++++++ doc/NOTES.md | 59 +++++ gpg-key-generator | 209 ------------------ gpg-keys/generated-2013-02-15_19-15-001.pub | Bin 2779 -> 0 bytes gpg-keys/generated-2013-02-15_19-15-001.sec | Bin 5453 -> 0 bytes gpg-keys/generated-2013-02-15_19-15-002.pub | Bin 2779 -> 0 bytes gpg-keys/generated-2013-02-15_19-15-002.sec | Bin 5453 -> 0 bytes gpg-keys/generated-2013-02-15_19-15-003.pub | Bin 2779 -> 0 bytes gpg-keys/generated-2013-02-15_19-15-003.sec | Bin 5453 -> 0 bytes gpg-keys/gpg-batch-key-script | 45 ---- pkg/utils/bootstrap | 59 +++++ pkg/utils/gpg-key-generator | 209 ++++++++++++++++++ .../gpg-keys/generated-2013-02-15_19-15-001.pub | Bin 0 -> 2779 bytes .../gpg-keys/generated-2013-02-15_19-15-001.sec | Bin 0 -> 5453 bytes .../gpg-keys/generated-2013-02-15_19-15-002.pub | Bin 0 -> 2779 bytes .../gpg-keys/generated-2013-02-15_19-15-002.sec | Bin 0 -> 5453 bytes .../gpg-keys/generated-2013-02-15_19-15-003.pub | Bin 0 -> 2779 bytes .../gpg-keys/generated-2013-02-15_19-15-003.sec | Bin 0 -> 5453 bytes pkg/utils/gpg-keys/gpg-batch-key-script | 45 ++++ pkg/utils/test_bootstrap | 39 ++++ sample-config/mx.conf | 9 + test_bootstrap | 39 ---- 26 files changed, 658 insertions(+), 662 deletions(-) delete mode 100644 COPYLEFT delete mode 100644 DESIGN.md delete mode 100644 NOTES.md delete mode 100755 bootstrap create mode 100644 doc/DESIGN.md create mode 100644 doc/NOTES.md delete mode 100755 gpg-key-generator delete mode 100644 gpg-keys/generated-2013-02-15_19-15-001.pub delete mode 100644 gpg-keys/generated-2013-02-15_19-15-001.sec delete mode 100644 gpg-keys/generated-2013-02-15_19-15-002.pub delete mode 100644 gpg-keys/generated-2013-02-15_19-15-002.sec delete mode 100644 gpg-keys/generated-2013-02-15_19-15-003.pub delete mode 100644 gpg-keys/generated-2013-02-15_19-15-003.sec delete mode 100644 gpg-keys/gpg-batch-key-script create mode 100755 pkg/utils/bootstrap create mode 100755 pkg/utils/gpg-key-generator create mode 100644 pkg/utils/gpg-keys/generated-2013-02-15_19-15-001.pub create mode 100644 pkg/utils/gpg-keys/generated-2013-02-15_19-15-001.sec create mode 100644 pkg/utils/gpg-keys/generated-2013-02-15_19-15-002.pub create mode 100644 pkg/utils/gpg-keys/generated-2013-02-15_19-15-002.sec create mode 100644 pkg/utils/gpg-keys/generated-2013-02-15_19-15-003.pub create mode 100644 pkg/utils/gpg-keys/generated-2013-02-15_19-15-003.sec create mode 100644 pkg/utils/gpg-keys/gpg-batch-key-script create mode 100755 pkg/utils/test_bootstrap create mode 100644 sample-config/mx.conf delete mode 100755 test_bootstrap diff --git a/COPYLEFT b/COPYLEFT deleted file mode 100644 index d8e7331..0000000 --- a/COPYLEFT +++ /dev/null @@ -1,13 +0,0 @@ - - This file is part of leap_mx, an encrypting mail exchange program. - Copyright (C) 2013 Isis Lovecruft - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as published - by the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Affero General Public License for more details. diff --git a/DESIGN.md b/DESIGN.md deleted file mode 100644 index 2d9fe82..0000000 --- a/DESIGN.md +++ /dev/null @@ -1,238 +0,0 @@ -# design # - -## overview # ----------------------- -This page pertains to the incoming mail exchange servers of the provider. - -General overview of how incoming email will work: - - 1. Incoming message is received by provider's MX servers. - 2. The MTA (postfix in our case) does a ton of checks on the message before we - even check to see if the recipient is valid (this comes from experience - running the riseup mail infrastructure, where the vast majority of messages - can be rejected early in the SMTP reception and thus save a ton of processing - time on the server). - 3. Postfix then queries the database to check if the recipient is valid, if - they are over quota, if their account is enabled, and to resolve any aliases - for the account. - 4. The message is then delivered to an on-disk message spool. - 5. A daemon watches for new files in this spool. Each message is encrypted to - the user's public key, and stored in the user's incoming message queue (stored - in couchdb), and removed from disk. - 6. When the user next logs in with their client, the user's message queue is - emptied by the client. - 7. Each message is decrypted by the client, and then stored in the user's - "inbox" as an unread message. - 8. This local inbox uses soledad for storage - 9. Soledad, in the background, will then re-encrypt this email (now a soledad - document), and sync to the cloud. - -## postfix pipeline ## ---------------------------- -incoming mx servers will run postfix, configured in a particular way: - - 1. postscreen: before accepting an incoming message, checks RBLs, checks RFC - validity, checks for spam pipelining. - (pass) proceed to next step. - (fail) return SMTP error, which bounces email. - 2. more SMTP checks: valid hostnames, etc. - (pass) accepted, proceed to next step. - (fail) return SMTP error, which bounces email. - 3. check_recipient_access -- look up each recipient and ensure they are - allowed to receive messages. - (pass) empty result, proceed to next step. - (fail) return SMTP error code and error comment, bounce message. - 4. milter processessing (spamassassin & clamav) - (pass) continue - (fail) bounce message, flag as spam, or silently kill. - 5. virtual_alias_maps -- map user defined aliases and forwards - (local address) continue if new address is for this mx - (remote address) continue. normally, postfix would relay to the remote domain, but we don't want that. - 6. deliver message to spool - (write) save the message to disk on the mx. - 7. postfix's job is done, mail_receiver picks up email from spool directory - -Questions: - - * what is the best way to have postfix write a message to a spool directory? - There is a built-in facility for saving to a maildir, so we could just - specify a common maildir for everyone. alternately, we could pipe to a - simple command that was responsible for safely saving the file to disk. a - third possibility would be to have a local long running daemon that spoke - lmtp that postfix forward the message on to for delivery. - * if virtual_alias_maps comes after check_recipient_access, then a user with - aliases set but who is over quota will not be able to forward email. i think - this is fine. - * if we are going to support forwarding, we should ensure that the message - gets encrypted before getting forwarded. so, postfix should not do any - forwarding. instead, this should be the job of mail_receiver. - -Considerations: - - 1. high load should fill queue, not crash pipeline: It is important that the - pipeline be able to handle massive bursts of email, as often happens with - email. This means map lookups need to be very fast, and when there is a high - load of email postfix should not be waiting on the mail receiver but must be - able to pass the message off quickly and have the slower mail receiver churn - through the backlog as best it can. - 2. don't lose messages: It is important to not lose any messages when there is - a problem. So, generally, a copy of an email should always exist in some spool - somewhere, and that copy should not be deleted until there is confirmation - that the next stage has succeeded. - -## alias_resolver ## ------------------------------- -The alias_resolver will be a daemon running on MX servers that handles lookups -in the user database of email aliases, forwards, quota, and account status. - -Communication with: - - 1. postfix:: alias_resolver will be bound to localhost and speak postfix's - very simple [tcp map protocol -> http://www.postfix.org/tcp_table.5.html]. - - 2. couchdb:: alias_resolver will make couchdb queries to a local http load - balancer that connects to a couchdb/bigcouch - cluster. [directly accessing the couch->https://we.riseup.net/leap+platform/querying-the-couchdb] - might help getting started. - -### Discussion: ### - - 1. we want the lookups to be fast. using views in couchdb, these should be - very fast. when using bigcouch, we can make it faster by specifying a read - quorum of 1 (instead of the default 2). this will make it so that only a - single couchdb needs to be queried to find the result. i don't know if this - would cause problems, but aliases don't change very often. - -alias_resolver will be responsible for two map lookups in postfix: - -#### check_recipient #### -------------------------- -postfix config: - -@check_recipient_access tcp:localhost:1000@ - -postfix will send "get username@domain.org" and alias_resolver should return an -empty result ("200 \n", i think) if postfix should deliver email to the -user. otherwise, it should return an error. here is example response, verbatim, -that can be used to bounce over quota users: - -``` -200 DEFER_IF_PERMIT Sorry, your message cannot be delivered because the -recipient's mailbox is full. If you can contact them another way, you may wish -to tell them of this problem. -``` - -"DEFER_IF_PERMIT" will let the other MX know that this error is temporary and -that they should try again soon. Typically, an MX will try repeatedly, at -longer and longer intervals, for four days before giving up. - -#### virtual alias map #### ---------------------------- -postfix config: - -@virtual_alias_map tcp:localhost:1001@ - -postfix will send "get alias-address@domain.org" and alias_resolver should -return "200 id_123456\n", where 123456 is the unique id of the user that has -alias-address@domain.org. - -couchdb should have a view that will let us query on an (alias) address and -return the user id. - -note: if the result of the alias map (e.g. id_123456) does not have a domain -suffix, i think postfix will use the 'default transport'. if we want it to use -the virtual transport instead, we should append the domain (eg -id_123456@example.org). see -http://www.postfix.org/ADDRESS_REWRITING_README.html#resolve - - -### Current status: ### -The current implementation of alias_resolver is in -leap-mx/src/leap/mx/alias_resolver.py. - -The class ```alias_resolver.StatusCodes``` deals with creating SMTP-like -response messages for Postfix, speaking Postfix's TCP Map protocol (from item -#1). - -As for Discussion item #1: - -It might be possible to use -[python-memcached](https://pypi.python.org/pypi/python-memcached/) as an -interface to a [memcached](http://memcached.org/) instance to speed up database -lookups, by keeping an in memory mapping of recent request/response -pairs. Also, Twisted now (I think as of 12.0.0) ships with a protocol for -handling Memcached servers, this is in ```twisted.protocols.memcache```. This -should be prioritised for later, if it is decided that querying the CouchDB is -too expensive or time-consuming. - -Thus far, to speed up alias lookup, an in-memory mapping of alias<->resolution -pairs is created by ```alias_resolver.AliasResolverFactory()```, which can be -optionally seeded with a dictionary of ```{ 'alias': 'resolution' }``` pairs -by doing: -~~~~~~ ->>> from leap.mx import alias_resolver ->>> aliasResolverFactory = alias_resolver.AliasResolverFactory( -... addr='1.2.3.4', port=4242, data={'isis': 'isis@leap.se', -... 'drebs': 'drebs@leap.se'}) ->>> aliasResolver = aliasResolverFactory.buildProtocol() ->>> aliasResolver.check_recipient_access('isis') -200 OK Others might say 'HELLA AWESOME'...but we're not convinced. -~~~~~~ - -TODO: - 1. The AliasResolverFactory needs to be connected to the CouchDB. The - classmethod in which this should occur is ```AliasResolverFactory.get()```. - - 2. I am not sure where to get the user's UUID from (Soledad?). Wherever we get - it from, it will need to be returned in - ```AliasResolver.virtual_alias_map()```, and if we want Postfix to hear about - it, then that response will need to be fed into ```AliasResolver.sendCode```. - - 3. Other than those two things, I think everything is done. The only potential - other thing I can think of is that the codes in - ```alias_resolver.StatusCodes``` might need to be urlencoded for Postfix to - accept them, but this is like two lines of code from urllib. - - - -## mail_receiver ## - -the mail_receiver is a daemon that runs on incoming MX servers and is -responsible for encrypting incoming email to the user's public key and saving -the email to an incoming queue database for that user. - -communicates with: - - * message spool directory:: mail_reciever sits and waits for new email to be - written to the spool directory (maybe using this - https://github.com/seb-m/pyinotify, i think it is better than FAM). when a - new file is dumped into the spool, mail_receiver reads the file, encrypts - the entire thing using the public key of the recipient, and saves to - couchdb. - * couchdb get:: mail_receiver does a query on user id to get back user's - public openpgp key. read quorum of 1 is probably ok. - * couchdb put:: mail_receiver communicates with couchdb for storing encrypted - email for each user (eventually, mail_receiver will communicate with a local - http proxy, that communicates with a bigcouch cluster, but the api is - identical) - -discussion: - * i am not sure if postfix adds a header to indicate to whom a message was - actually delivered. if not, this is a problem, because then how do we know - what db to put it in or what public key to use? this is perhaps a good - reason to not let postfix handle writing the message to disk, but instead - pipe it to another command (because postfix sets env variables for stuff - like recipient). - - * should the incoming message queue be a separate database or should it be - just documents in the user's main database with special flags? - - * whenever possible, we should refer to the user by a fixed id, not their - username, because we want to support the ability to change usernames. so, - for example, database names should not be based on usernames. - -### Current Status: ### -None of this is done, although having it be a separate daemon sound weird. - -You would probably want to use ```twisted.mail.mail.FileMonitoringService``` to -watch the mailbox (is the mailbox virtual or a maildir or mbox or?) diff --git a/NOTES.md b/NOTES.md deleted file mode 100644 index a53f49d..0000000 --- a/NOTES.md +++ /dev/null @@ -1,59 +0,0 @@ - -# Questions # -------------- - -1. What is the lowest available RAM for a target server running a leap_mx? - 1.a. Do we want to store all id_keys and/or aliases in memory? - -2. Asked in discussion section of '''postfix pipeline''' on the [leap_mx wiki -page](https://we.riseup.net/leap/mx) : - - "What is the best way to have postfix write a message to a spool directory? - There is a built-in facility for saving to a maildir, so we could just - specify a common maildir for everyone. alternately, we could pipe to a - simple command that was responsible for safely saving the file to disk. a - third possibility would be to have a local long running daemon that spoke - lmtp that postfix forward the message on to for delivery." - - I think that maildir is fine, but perhaps this will slow things down more - than monitoring a spool file. I would also imagine that if the server is - supposed to stand up to high loads, a spool file I/O blocks with every - email added to the queue. - -3. How do get it to go faster? Should we create some mockups and benchmark -them? Could we attempt to learn which aliases are most often resolved and -prioritize keeping those in in-memory mappings? Is -[memcache](http://code.sixapart.com/svn/memcached/trunk/server/doc/protocol.txt) -a viable protocol for this, and how would it interact with CouchDB? - -4. What lib should we use for Python + Twisted + GPG/PGP ? - 4.a. It looks like most people are using python-gnupg... - - -## Tickets ## -------------- - -'''To be created:''' - -ticket for feature-alias_resolver_couchdb_support: - - o The alias resolver needs to speak to a couchdb/bigcouch - instance(s). Currently, it merely creates an in-memory dictionary - mapping. It seems like paisley is the best library for this. - -ticket for feature-check_recipient: - - o Need various errors for anything that could go wrong, e.g. the recipient - address is malformed, sender doesn't have permissions to send to such - address, etc. - o These errcodes need to follow the SMTP server transport code spec. - -ticket for feature-virtual_alias_map: - - o Get the recipient's userid from couchdb. - -ticket for feature-evaluate_python_gnupg: - - o Briefly audit library in order to assess if it has the necessary - features, as well as its general code quality. - diff --git a/bootstrap b/bootstrap deleted file mode 100755 index b76a572..0000000 --- a/bootstrap +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash -############################################################################## -# -# bootstrap -# ----------------------- -# Setup a virtualenv, without ever using sudo. -# -# @author Isis Agora Lovecruft, 0x2cdb8b35 -# @date 18 February 2013 -# @version 0.0.1 -############################################################################## - -set -ex -- - -PYTHON=$(which python) -GIT=$(which git) - -VENV_VERSION=1.8.4 -VENV_URL=https://pypi.python.org/packages/source/v/virtualenv -VENV_TARBALL=virtualenv-${VENV_VERSION}.tar.gz - -VENV_WRAPPER_VERSION=3.6 -VENV_WRAPPER_URL=https://pypi.python.org/packages/source/v/virtualenvwrapper -VENV_WRAPPER_TARBALL=virtualenvwrapper-${VENV_WRAPPER_VERSION}.tar.gz - -BOOTSTRAP_ENV=.bootstrap -BOOTSTRAP_OPTS='--no-site-packages --setuptools --unzip-setuptools --never-download' - -PACKAGE_NAME=leap_mx -PACKAGE_URL=https://github.com/isislovecruft/leap_mx.git -PACKAGE_WORKON=${PWD}/${PACKAGE_NAME} -PACKAGE_REQUIREMENTS=${PACKAGE_WORKON}/pkg/mx-requirements.pip -PACKAGE_OPTS=${BOOTSTRAP_OPTS}'' ## xxx add parameter for extra options - -echo 'Downloading virtualenv source from' -echo "${VENV_URL}..." -\wget -O ${VENV_TARBALL} ${VENV_URL}/${VENV_TARBALL} -tar xvzf ${VENV_TARBALL} - -echo 'Downloading virtualenv-wrapper source from:' -echo "${VENV_WRAPPER_URL}" -\wget -O $VENV_WRAPPER_TARBALL ${VENV_WRAPPER_URL}/${VENV_WRAPPER_TARBALL} -tar xvzf virtualenvwrapper-${VENV_WRAPPER_VERSION}.tar.gz - - -echo 'Creating initial virtualenv bootstrap environment, called "bootstrap"' -echo 'in which we will install virtualenv, to avoid using sudo.' -$PYTHON virtualenv-${VENV_VERSION}/virtualenv.py $BOOTSTRAP_OPTS $BOOTSTRAP_ENV -rm -rf virtualenv-${VENV_VERSION} -${BOOTSTRAP_ENV}/bin/pip install ${VENV_TARBALL} -echo 'Installing virtualenvwrapper in "bootstrap" virtualenv...' -${BOOTSTRAP_ENV}/bin/pip install ${VENV_WRAPPER_TARBALL} - -echo 'Using "bootstrap" virtualenv to create project virtualenv...' -source ${BOOTSTRAP_ENV}/local/bin/virtualenvwrapper.sh -echo "Cloning from ${PACKAGE_URL}..." -${GIT} clone ${PACKAGE_URL} ${PACKAGE_NAME} -mkvirtualenv -a $PROJECT_WORKON -r ${PACKAGE_REQUIREMENTS} \ - ${PACKAGE_OPTS} ${PACKAGE_NAME} diff --git a/doc/DESIGN.md b/doc/DESIGN.md new file mode 100644 index 0000000..2d9fe82 --- /dev/null +++ b/doc/DESIGN.md @@ -0,0 +1,238 @@ +# design # + +## overview # +---------------------- +This page pertains to the incoming mail exchange servers of the provider. + +General overview of how incoming email will work: + + 1. Incoming message is received by provider's MX servers. + 2. The MTA (postfix in our case) does a ton of checks on the message before we + even check to see if the recipient is valid (this comes from experience + running the riseup mail infrastructure, where the vast majority of messages + can be rejected early in the SMTP reception and thus save a ton of processing + time on the server). + 3. Postfix then queries the database to check if the recipient is valid, if + they are over quota, if their account is enabled, and to resolve any aliases + for the account. + 4. The message is then delivered to an on-disk message spool. + 5. A daemon watches for new files in this spool. Each message is encrypted to + the user's public key, and stored in the user's incoming message queue (stored + in couchdb), and removed from disk. + 6. When the user next logs in with their client, the user's message queue is + emptied by the client. + 7. Each message is decrypted by the client, and then stored in the user's + "inbox" as an unread message. + 8. This local inbox uses soledad for storage + 9. Soledad, in the background, will then re-encrypt this email (now a soledad + document), and sync to the cloud. + +## postfix pipeline ## +--------------------------- +incoming mx servers will run postfix, configured in a particular way: + + 1. postscreen: before accepting an incoming message, checks RBLs, checks RFC + validity, checks for spam pipelining. + (pass) proceed to next step. + (fail) return SMTP error, which bounces email. + 2. more SMTP checks: valid hostnames, etc. + (pass) accepted, proceed to next step. + (fail) return SMTP error, which bounces email. + 3. check_recipient_access -- look up each recipient and ensure they are + allowed to receive messages. + (pass) empty result, proceed to next step. + (fail) return SMTP error code and error comment, bounce message. + 4. milter processessing (spamassassin & clamav) + (pass) continue + (fail) bounce message, flag as spam, or silently kill. + 5. virtual_alias_maps -- map user defined aliases and forwards + (local address) continue if new address is for this mx + (remote address) continue. normally, postfix would relay to the remote domain, but we don't want that. + 6. deliver message to spool + (write) save the message to disk on the mx. + 7. postfix's job is done, mail_receiver picks up email from spool directory + +Questions: + + * what is the best way to have postfix write a message to a spool directory? + There is a built-in facility for saving to a maildir, so we could just + specify a common maildir for everyone. alternately, we could pipe to a + simple command that was responsible for safely saving the file to disk. a + third possibility would be to have a local long running daemon that spoke + lmtp that postfix forward the message on to for delivery. + * if virtual_alias_maps comes after check_recipient_access, then a user with + aliases set but who is over quota will not be able to forward email. i think + this is fine. + * if we are going to support forwarding, we should ensure that the message + gets encrypted before getting forwarded. so, postfix should not do any + forwarding. instead, this should be the job of mail_receiver. + +Considerations: + + 1. high load should fill queue, not crash pipeline: It is important that the + pipeline be able to handle massive bursts of email, as often happens with + email. This means map lookups need to be very fast, and when there is a high + load of email postfix should not be waiting on the mail receiver but must be + able to pass the message off quickly and have the slower mail receiver churn + through the backlog as best it can. + 2. don't lose messages: It is important to not lose any messages when there is + a problem. So, generally, a copy of an email should always exist in some spool + somewhere, and that copy should not be deleted until there is confirmation + that the next stage has succeeded. + +## alias_resolver ## +------------------------------ +The alias_resolver will be a daemon running on MX servers that handles lookups +in the user database of email aliases, forwards, quota, and account status. + +Communication with: + + 1. postfix:: alias_resolver will be bound to localhost and speak postfix's + very simple [tcp map protocol -> http://www.postfix.org/tcp_table.5.html]. + + 2. couchdb:: alias_resolver will make couchdb queries to a local http load + balancer that connects to a couchdb/bigcouch + cluster. [directly accessing the couch->https://we.riseup.net/leap+platform/querying-the-couchdb] + might help getting started. + +### Discussion: ### + + 1. we want the lookups to be fast. using views in couchdb, these should be + very fast. when using bigcouch, we can make it faster by specifying a read + quorum of 1 (instead of the default 2). this will make it so that only a + single couchdb needs to be queried to find the result. i don't know if this + would cause problems, but aliases don't change very often. + +alias_resolver will be responsible for two map lookups in postfix: + +#### check_recipient #### +------------------------- +postfix config: + +@check_recipient_access tcp:localhost:1000@ + +postfix will send "get username@domain.org" and alias_resolver should return an +empty result ("200 \n", i think) if postfix should deliver email to the +user. otherwise, it should return an error. here is example response, verbatim, +that can be used to bounce over quota users: + +``` +200 DEFER_IF_PERMIT Sorry, your message cannot be delivered because the +recipient's mailbox is full. If you can contact them another way, you may wish +to tell them of this problem. +``` + +"DEFER_IF_PERMIT" will let the other MX know that this error is temporary and +that they should try again soon. Typically, an MX will try repeatedly, at +longer and longer intervals, for four days before giving up. + +#### virtual alias map #### +--------------------------- +postfix config: + +@virtual_alias_map tcp:localhost:1001@ + +postfix will send "get alias-address@domain.org" and alias_resolver should +return "200 id_123456\n", where 123456 is the unique id of the user that has +alias-address@domain.org. + +couchdb should have a view that will let us query on an (alias) address and +return the user id. + +note: if the result of the alias map (e.g. id_123456) does not have a domain +suffix, i think postfix will use the 'default transport'. if we want it to use +the virtual transport instead, we should append the domain (eg +id_123456@example.org). see +http://www.postfix.org/ADDRESS_REWRITING_README.html#resolve + + +### Current status: ### +The current implementation of alias_resolver is in +leap-mx/src/leap/mx/alias_resolver.py. + +The class ```alias_resolver.StatusCodes``` deals with creating SMTP-like +response messages for Postfix, speaking Postfix's TCP Map protocol (from item +#1). + +As for Discussion item #1: + +It might be possible to use +[python-memcached](https://pypi.python.org/pypi/python-memcached/) as an +interface to a [memcached](http://memcached.org/) instance to speed up database +lookups, by keeping an in memory mapping of recent request/response +pairs. Also, Twisted now (I think as of 12.0.0) ships with a protocol for +handling Memcached servers, this is in ```twisted.protocols.memcache```. This +should be prioritised for later, if it is decided that querying the CouchDB is +too expensive or time-consuming. + +Thus far, to speed up alias lookup, an in-memory mapping of alias<->resolution +pairs is created by ```alias_resolver.AliasResolverFactory()```, which can be +optionally seeded with a dictionary of ```{ 'alias': 'resolution' }``` pairs +by doing: +~~~~~~ +>>> from leap.mx import alias_resolver +>>> aliasResolverFactory = alias_resolver.AliasResolverFactory( +... addr='1.2.3.4', port=4242, data={'isis': 'isis@leap.se', +... 'drebs': 'drebs@leap.se'}) +>>> aliasResolver = aliasResolverFactory.buildProtocol() +>>> aliasResolver.check_recipient_access('isis') +200 OK Others might say 'HELLA AWESOME'...but we're not convinced. +~~~~~~ + +TODO: + 1. The AliasResolverFactory needs to be connected to the CouchDB. The + classmethod in which this should occur is ```AliasResolverFactory.get()```. + + 2. I am not sure where to get the user's UUID from (Soledad?). Wherever we get + it from, it will need to be returned in + ```AliasResolver.virtual_alias_map()```, and if we want Postfix to hear about + it, then that response will need to be fed into ```AliasResolver.sendCode```. + + 3. Other than those two things, I think everything is done. The only potential + other thing I can think of is that the codes in + ```alias_resolver.StatusCodes``` might need to be urlencoded for Postfix to + accept them, but this is like two lines of code from urllib. + + + +## mail_receiver ## + +the mail_receiver is a daemon that runs on incoming MX servers and is +responsible for encrypting incoming email to the user's public key and saving +the email to an incoming queue database for that user. + +communicates with: + + * message spool directory:: mail_reciever sits and waits for new email to be + written to the spool directory (maybe using this + https://github.com/seb-m/pyinotify, i think it is better than FAM). when a + new file is dumped into the spool, mail_receiver reads the file, encrypts + the entire thing using the public key of the recipient, and saves to + couchdb. + * couchdb get:: mail_receiver does a query on user id to get back user's + public openpgp key. read quorum of 1 is probably ok. + * couchdb put:: mail_receiver communicates with couchdb for storing encrypted + email for each user (eventually, mail_receiver will communicate with a local + http proxy, that communicates with a bigcouch cluster, but the api is + identical) + +discussion: + * i am not sure if postfix adds a header to indicate to whom a message was + actually delivered. if not, this is a problem, because then how do we know + what db to put it in or what public key to use? this is perhaps a good + reason to not let postfix handle writing the message to disk, but instead + pipe it to another command (because postfix sets env variables for stuff + like recipient). + + * should the incoming message queue be a separate database or should it be + just documents in the user's main database with special flags? + + * whenever possible, we should refer to the user by a fixed id, not their + username, because we want to support the ability to change usernames. so, + for example, database names should not be based on usernames. + +### Current Status: ### +None of this is done, although having it be a separate daemon sound weird. + +You would probably want to use ```twisted.mail.mail.FileMonitoringService``` to +watch the mailbox (is the mailbox virtual or a maildir or mbox or?) diff --git a/doc/NOTES.md b/doc/NOTES.md new file mode 100644 index 0000000..a53f49d --- /dev/null +++ b/doc/NOTES.md @@ -0,0 +1,59 @@ + +# Questions # +------------- + +1. What is the lowest available RAM for a target server running a leap_mx? + 1.a. Do we want to store all id_keys and/or aliases in memory? + +2. Asked in discussion section of '''postfix pipeline''' on the [leap_mx wiki +page](https://we.riseup.net/leap/mx) : + + "What is the best way to have postfix write a message to a spool directory? + There is a built-in facility for saving to a maildir, so we could just + specify a common maildir for everyone. alternately, we could pipe to a + simple command that was responsible for safely saving the file to disk. a + third possibility would be to have a local long running daemon that spoke + lmtp that postfix forward the message on to for delivery." + + I think that maildir is fine, but perhaps this will slow things down more + than monitoring a spool file. I would also imagine that if the server is + supposed to stand up to high loads, a spool file I/O blocks with every + email added to the queue. + +3. How do get it to go faster? Should we create some mockups and benchmark +them? Could we attempt to learn which aliases are most often resolved and +prioritize keeping those in in-memory mappings? Is +[memcache](http://code.sixapart.com/svn/memcached/trunk/server/doc/protocol.txt) +a viable protocol for this, and how would it interact with CouchDB? + +4. What lib should we use for Python + Twisted + GPG/PGP ? + 4.a. It looks like most people are using python-gnupg... + + +## Tickets ## +------------- + +'''To be created:''' + +ticket for feature-alias_resolver_couchdb_support: + + o The alias resolver needs to speak to a couchdb/bigcouch + instance(s). Currently, it merely creates an in-memory dictionary + mapping. It seems like paisley is the best library for this. + +ticket for feature-check_recipient: + + o Need various errors for anything that could go wrong, e.g. the recipient + address is malformed, sender doesn't have permissions to send to such + address, etc. + o These errcodes need to follow the SMTP server transport code spec. + +ticket for feature-virtual_alias_map: + + o Get the recipient's userid from couchdb. + +ticket for feature-evaluate_python_gnupg: + + o Briefly audit library in order to assess if it has the necessary + features, as well as its general code quality. + diff --git a/gpg-key-generator b/gpg-key-generator deleted file mode 100755 index 54d29fb..0000000 --- a/gpg-key-generator +++ /dev/null @@ -1,209 +0,0 @@ -#!/bin/bash -############################################################################## -# -# gpg-key-generator -# ------------------- -# Create batch processed keys for dummy users in the CouchDB, for testing. -# see -# www.gnupg.org/documentation/manuals/gnupg-devel/Unattended-GPG-key-generation.html -# for syntax specification. -# -# @author Isis Agora Lovecruft, 0x2cdb8b35 -# @date 11 February 2013 -# @version 0.1.0 -############################################################################## - - -here="${PWD}" -test_dir="${here}/gpg-keys" -batch_file="${test_dir}/gpg-batch-key-script" -default_keypair_password="leap" -default_keypair_email="blackhole@leap.se" - -function usage () -{ - echo -e "\033[40m\033[36m Usage: $0 [-n [-c|-a]] | [-d|-h]\033[0m" - echo -e "\033[40m\033[36m --------------------------------------------------------\033[0m" - echo -e "\033[40m\033[36m Creates a set of GPG dummy keys for unittesting purposes.\033[0m" - echo - echo -e "\033[40m\033[36m Keys will be created in ${test_dir}, \033[0m" - echo -e "\033[40m\033[36m and a GnuPG batch file named ${batch_file##*/} \033[0m" - echo -e "\033[40m\033[36m will also be created in that same directory. \033[0m" - echo -e "\033[40m\033[36m The default password to all keys is: "'"'"${default_keypair_passwd}"'"'" \033[0m" - echo - echo -e "\033[40m\033[36m Options:\033[0m" - echo -e "\033[40m\033[36m -n,--number Number of keys (to create/append) \033[0m" - echo -e "\033[40m\033[36m -c,--create Create a fresh set of N test keys \033[0m" - echo -e "\033[40m\033[36m -a,--append Append another set of N test keys \033[0m" - echo -e "\033[40m\033[36m -d,--delete Delete the test keys and directory\033[0m" - echo -e "\033[40m\033[36m -h,--help This cruft\033[0m" -} - -## @param $1: the filename to write to -## @param $2: the directory to place test keys and batch files in -## @param $3: the number of keypairs to create -function write_gpg_batch_file () -{ - ## if the test directory doesn't exist, create it: - if ! test -w "${1}" ; then - if ! test -d "${2}"; then - mkdir $2 - fi - fi - - # if the batch file is already there, ask to back it up: - if test -r "${1}" ; then - read -ers -N 1 -t 60 \ - -p"Should we keep a backup copy the previous batch file? (Y/n) " bak - case $bak in - n|N ) echo -e "\033[40m\033[31m Overwriting ${1}...\033[0m" ;; - * ) iii=0 - backup="${1}.${iii}-"$(date +"%F")".bak" - while ! test -r "$backup" ; do - echo -e"\033[40m\033[36m Backing up to: \033[0m" - echo -e"\033[40m\033[36m ${backup} \033[0m" - cp $1 $backup - iii=$(( $iii + 1 )) - done ;; - esac - ## then always delete the old otherwise we'll append to that and generate - ## the previous batch's keys too: - ! test -r "${1}" || rm $1 - fi - - ## and whether we backed up or not, make our file if it doesn't exist: - if ! test -w "${1}" ; then - touch $1 && chmod +rw $1 - fi - echo -e "\033[40m\033[36m Writing GPG key generation batch file to: \033[0m" - echo -e "\033[40m\033[36m ${1}... \033[0m" - - total_keypairs=$(printf "%03d" ${3}) - echo "Total keypairs to be generated: ${total_keypairs}" - - this_month=$(date +"%m") # ## this is awkward...isn't there - expire_soon=$(( ${this_month} + 1 )) ## a better way? - next_month=$(printf "%02d" ${expire_soon}) - expiry_date=$(date +"%Y-")${next_month}$(date +"-%d") - echo "Expiry date for keypairs: ${expiry_date}" - - for i in $(seq -f "%03g" 1 $3 ) ; do - now=$(date +"%Y-%m-%d_%H-%M") - echo "Writing generation parameters for keypair #${i}..." - cat >> $1 < 0 ]] ; then - SHORTS="hcadn:" - LONGS="help,create,append,destroy,number:" - ARGS=$(getopt -s bash --options $SHORTS --longoptions $LONGS \ - --name ${0##*/} -- "$@") - - if [ $? != 0 ] ; then - echo -e "\033[40m\033[31m Unable to parse options. \033[0m">&2 - exit 1 - fi - eval set -- "$ARGS" - while test -n "$1" ; do - case $1 in - -n|--number ) export CREATE_N="$2" - if test -z "$CREATE_N"; then CREATE_N="3"; fi; - shift 2 ;; - -c|--create ) delete_batch_keys ${test_dir} - write_gpg_batch_file ${batch_file} ${test_dir} \ - ${CREATE_N} - run_gpg_batch_file ${batch_file} ${test_dir} - shift ;; - -a|--append ) run_gpg_batch_file ${batch_file} ${test_dir} - shift ;; - -d|--destroy ) delete_batch_keys ${test_dir} ; shift ;; - --) shift ; break ;; - * ) usage ; shift ;; - esac - done - finish -else - usage -fi - -unset here test_dir batch_file CREATE_N - diff --git a/gpg-keys/generated-2013-02-15_19-15-001.pub b/gpg-keys/generated-2013-02-15_19-15-001.pub deleted file mode 100644 index 1c8fd34..0000000 Binary files a/gpg-keys/generated-2013-02-15_19-15-001.pub and /dev/null differ diff --git a/gpg-keys/generated-2013-02-15_19-15-001.sec b/gpg-keys/generated-2013-02-15_19-15-001.sec deleted file mode 100644 index 97a60e7..0000000 Binary files a/gpg-keys/generated-2013-02-15_19-15-001.sec and /dev/null differ diff --git a/gpg-keys/generated-2013-02-15_19-15-002.pub b/gpg-keys/generated-2013-02-15_19-15-002.pub deleted file mode 100644 index 1cbf8d8..0000000 Binary files a/gpg-keys/generated-2013-02-15_19-15-002.pub and /dev/null differ diff --git a/gpg-keys/generated-2013-02-15_19-15-002.sec b/gpg-keys/generated-2013-02-15_19-15-002.sec deleted file mode 100644 index f89cd97..0000000 Binary files a/gpg-keys/generated-2013-02-15_19-15-002.sec and /dev/null differ diff --git a/gpg-keys/generated-2013-02-15_19-15-003.pub b/gpg-keys/generated-2013-02-15_19-15-003.pub deleted file mode 100644 index bc0ac12..0000000 Binary files a/gpg-keys/generated-2013-02-15_19-15-003.pub and /dev/null differ diff --git a/gpg-keys/generated-2013-02-15_19-15-003.sec b/gpg-keys/generated-2013-02-15_19-15-003.sec deleted file mode 100644 index 397f44a..0000000 Binary files a/gpg-keys/generated-2013-02-15_19-15-003.sec and /dev/null differ diff --git a/gpg-keys/gpg-batch-key-script b/gpg-keys/gpg-batch-key-script deleted file mode 100644 index 7e48db0..0000000 --- a/gpg-keys/gpg-batch-key-script +++ /dev/null @@ -1,45 +0,0 @@ -%echo Generating keypair 001/003... -Key-Type: RSA -Key-Length: 4096 -Subkey-Type: RSA -Subkey-Length: 4096 -Name-Real: Louis Lingg -Name-Email: blackhole@leap.se -Name-Comment: Test Key 001/003 -Expire-Date: 2013-03-15 -Passphrase: leap -%pubring generated-2013-02-15_19-15-001.pub -%secring generated-2013-02-15_19-15-001.sec -%commit -%echo done. 001 keys out of 003 completed. - -%echo Generating keypair 002/003... -Key-Type: RSA -Key-Length: 4096 -Subkey-Type: RSA -Subkey-Length: 4096 -Name-Real: Louis Lingg -Name-Email: blackhole@leap.se -Name-Comment: Test Key 002/003 -Expire-Date: 2013-03-15 -Passphrase: leap -%pubring generated-2013-02-15_19-15-002.pub -%secring generated-2013-02-15_19-15-002.sec -%commit -%echo done. 002 keys out of 003 completed. - -%echo Generating keypair 003/003... -Key-Type: RSA -Key-Length: 4096 -Subkey-Type: RSA -Subkey-Length: 4096 -Name-Real: Louis Lingg -Name-Email: blackhole@leap.se -Name-Comment: Test Key 003/003 -Expire-Date: 2013-03-15 -Passphrase: leap -%pubring generated-2013-02-15_19-15-003.pub -%secring generated-2013-02-15_19-15-003.sec -%commit -%echo done. 003 keys out of 003 completed. - diff --git a/pkg/utils/bootstrap b/pkg/utils/bootstrap new file mode 100755 index 0000000..b76a572 --- /dev/null +++ b/pkg/utils/bootstrap @@ -0,0 +1,59 @@ +#!/bin/bash +############################################################################## +# +# bootstrap +# ----------------------- +# Setup a virtualenv, without ever using sudo. +# +# @author Isis Agora Lovecruft, 0x2cdb8b35 +# @date 18 February 2013 +# @version 0.0.1 +############################################################################## + +set -ex -- + +PYTHON=$(which python) +GIT=$(which git) + +VENV_VERSION=1.8.4 +VENV_URL=https://pypi.python.org/packages/source/v/virtualenv +VENV_TARBALL=virtualenv-${VENV_VERSION}.tar.gz + +VENV_WRAPPER_VERSION=3.6 +VENV_WRAPPER_URL=https://pypi.python.org/packages/source/v/virtualenvwrapper +VENV_WRAPPER_TARBALL=virtualenvwrapper-${VENV_WRAPPER_VERSION}.tar.gz + +BOOTSTRAP_ENV=.bootstrap +BOOTSTRAP_OPTS='--no-site-packages --setuptools --unzip-setuptools --never-download' + +PACKAGE_NAME=leap_mx +PACKAGE_URL=https://github.com/isislovecruft/leap_mx.git +PACKAGE_WORKON=${PWD}/${PACKAGE_NAME} +PACKAGE_REQUIREMENTS=${PACKAGE_WORKON}/pkg/mx-requirements.pip +PACKAGE_OPTS=${BOOTSTRAP_OPTS}'' ## xxx add parameter for extra options + +echo 'Downloading virtualenv source from' +echo "${VENV_URL}..." +\wget -O ${VENV_TARBALL} ${VENV_URL}/${VENV_TARBALL} +tar xvzf ${VENV_TARBALL} + +echo 'Downloading virtualenv-wrapper source from:' +echo "${VENV_WRAPPER_URL}" +\wget -O $VENV_WRAPPER_TARBALL ${VENV_WRAPPER_URL}/${VENV_WRAPPER_TARBALL} +tar xvzf virtualenvwrapper-${VENV_WRAPPER_VERSION}.tar.gz + + +echo 'Creating initial virtualenv bootstrap environment, called "bootstrap"' +echo 'in which we will install virtualenv, to avoid using sudo.' +$PYTHON virtualenv-${VENV_VERSION}/virtualenv.py $BOOTSTRAP_OPTS $BOOTSTRAP_ENV +rm -rf virtualenv-${VENV_VERSION} +${BOOTSTRAP_ENV}/bin/pip install ${VENV_TARBALL} +echo 'Installing virtualenvwrapper in "bootstrap" virtualenv...' +${BOOTSTRAP_ENV}/bin/pip install ${VENV_WRAPPER_TARBALL} + +echo 'Using "bootstrap" virtualenv to create project virtualenv...' +source ${BOOTSTRAP_ENV}/local/bin/virtualenvwrapper.sh +echo "Cloning from ${PACKAGE_URL}..." +${GIT} clone ${PACKAGE_URL} ${PACKAGE_NAME} +mkvirtualenv -a $PROJECT_WORKON -r ${PACKAGE_REQUIREMENTS} \ + ${PACKAGE_OPTS} ${PACKAGE_NAME} diff --git a/pkg/utils/gpg-key-generator b/pkg/utils/gpg-key-generator new file mode 100755 index 0000000..54d29fb --- /dev/null +++ b/pkg/utils/gpg-key-generator @@ -0,0 +1,209 @@ +#!/bin/bash +############################################################################## +# +# gpg-key-generator +# ------------------- +# Create batch processed keys for dummy users in the CouchDB, for testing. +# see +# www.gnupg.org/documentation/manuals/gnupg-devel/Unattended-GPG-key-generation.html +# for syntax specification. +# +# @author Isis Agora Lovecruft, 0x2cdb8b35 +# @date 11 February 2013 +# @version 0.1.0 +############################################################################## + + +here="${PWD}" +test_dir="${here}/gpg-keys" +batch_file="${test_dir}/gpg-batch-key-script" +default_keypair_password="leap" +default_keypair_email="blackhole@leap.se" + +function usage () +{ + echo -e "\033[40m\033[36m Usage: $0 [-n [-c|-a]] | [-d|-h]\033[0m" + echo -e "\033[40m\033[36m --------------------------------------------------------\033[0m" + echo -e "\033[40m\033[36m Creates a set of GPG dummy keys for unittesting purposes.\033[0m" + echo + echo -e "\033[40m\033[36m Keys will be created in ${test_dir}, \033[0m" + echo -e "\033[40m\033[36m and a GnuPG batch file named ${batch_file##*/} \033[0m" + echo -e "\033[40m\033[36m will also be created in that same directory. \033[0m" + echo -e "\033[40m\033[36m The default password to all keys is: "'"'"${default_keypair_passwd}"'"'" \033[0m" + echo + echo -e "\033[40m\033[36m Options:\033[0m" + echo -e "\033[40m\033[36m -n,--number Number of keys (to create/append) \033[0m" + echo -e "\033[40m\033[36m -c,--create Create a fresh set of N test keys \033[0m" + echo -e "\033[40m\033[36m -a,--append Append another set of N test keys \033[0m" + echo -e "\033[40m\033[36m -d,--delete Delete the test keys and directory\033[0m" + echo -e "\033[40m\033[36m -h,--help This cruft\033[0m" +} + +## @param $1: the filename to write to +## @param $2: the directory to place test keys and batch files in +## @param $3: the number of keypairs to create +function write_gpg_batch_file () +{ + ## if the test directory doesn't exist, create it: + if ! test -w "${1}" ; then + if ! test -d "${2}"; then + mkdir $2 + fi + fi + + # if the batch file is already there, ask to back it up: + if test -r "${1}" ; then + read -ers -N 1 -t 60 \ + -p"Should we keep a backup copy the previous batch file? (Y/n) " bak + case $bak in + n|N ) echo -e "\033[40m\033[31m Overwriting ${1}...\033[0m" ;; + * ) iii=0 + backup="${1}.${iii}-"$(date +"%F")".bak" + while ! test -r "$backup" ; do + echo -e"\033[40m\033[36m Backing up to: \033[0m" + echo -e"\033[40m\033[36m ${backup} \033[0m" + cp $1 $backup + iii=$(( $iii + 1 )) + done ;; + esac + ## then always delete the old otherwise we'll append to that and generate + ## the previous batch's keys too: + ! test -r "${1}" || rm $1 + fi + + ## and whether we backed up or not, make our file if it doesn't exist: + if ! test -w "${1}" ; then + touch $1 && chmod +rw $1 + fi + echo -e "\033[40m\033[36m Writing GPG key generation batch file to: \033[0m" + echo -e "\033[40m\033[36m ${1}... \033[0m" + + total_keypairs=$(printf "%03d" ${3}) + echo "Total keypairs to be generated: ${total_keypairs}" + + this_month=$(date +"%m") # ## this is awkward...isn't there + expire_soon=$(( ${this_month} + 1 )) ## a better way? + next_month=$(printf "%02d" ${expire_soon}) + expiry_date=$(date +"%Y-")${next_month}$(date +"-%d") + echo "Expiry date for keypairs: ${expiry_date}" + + for i in $(seq -f "%03g" 1 $3 ) ; do + now=$(date +"%Y-%m-%d_%H-%M") + echo "Writing generation parameters for keypair #${i}..." + cat >> $1 < 0 ]] ; then + SHORTS="hcadn:" + LONGS="help,create,append,destroy,number:" + ARGS=$(getopt -s bash --options $SHORTS --longoptions $LONGS \ + --name ${0##*/} -- "$@") + + if [ $? != 0 ] ; then + echo -e "\033[40m\033[31m Unable to parse options. \033[0m">&2 + exit 1 + fi + eval set -- "$ARGS" + while test -n "$1" ; do + case $1 in + -n|--number ) export CREATE_N="$2" + if test -z "$CREATE_N"; then CREATE_N="3"; fi; + shift 2 ;; + -c|--create ) delete_batch_keys ${test_dir} + write_gpg_batch_file ${batch_file} ${test_dir} \ + ${CREATE_N} + run_gpg_batch_file ${batch_file} ${test_dir} + shift ;; + -a|--append ) run_gpg_batch_file ${batch_file} ${test_dir} + shift ;; + -d|--destroy ) delete_batch_keys ${test_dir} ; shift ;; + --) shift ; break ;; + * ) usage ; shift ;; + esac + done + finish +else + usage +fi + +unset here test_dir batch_file CREATE_N + diff --git a/pkg/utils/gpg-keys/generated-2013-02-15_19-15-001.pub b/pkg/utils/gpg-keys/generated-2013-02-15_19-15-001.pub new file mode 100644 index 0000000..1c8fd34 Binary files /dev/null and b/pkg/utils/gpg-keys/generated-2013-02-15_19-15-001.pub differ diff --git a/pkg/utils/gpg-keys/generated-2013-02-15_19-15-001.sec b/pkg/utils/gpg-keys/generated-2013-02-15_19-15-001.sec new file mode 100644 index 0000000..97a60e7 Binary files /dev/null and b/pkg/utils/gpg-keys/generated-2013-02-15_19-15-001.sec differ diff --git a/pkg/utils/gpg-keys/generated-2013-02-15_19-15-002.pub b/pkg/utils/gpg-keys/generated-2013-02-15_19-15-002.pub new file mode 100644 index 0000000..1cbf8d8 Binary files /dev/null and b/pkg/utils/gpg-keys/generated-2013-02-15_19-15-002.pub differ diff --git a/pkg/utils/gpg-keys/generated-2013-02-15_19-15-002.sec b/pkg/utils/gpg-keys/generated-2013-02-15_19-15-002.sec new file mode 100644 index 0000000..f89cd97 Binary files /dev/null and b/pkg/utils/gpg-keys/generated-2013-02-15_19-15-002.sec differ diff --git a/pkg/utils/gpg-keys/generated-2013-02-15_19-15-003.pub b/pkg/utils/gpg-keys/generated-2013-02-15_19-15-003.pub new file mode 100644 index 0000000..bc0ac12 Binary files /dev/null and b/pkg/utils/gpg-keys/generated-2013-02-15_19-15-003.pub differ diff --git a/pkg/utils/gpg-keys/generated-2013-02-15_19-15-003.sec b/pkg/utils/gpg-keys/generated-2013-02-15_19-15-003.sec new file mode 100644 index 0000000..397f44a Binary files /dev/null and b/pkg/utils/gpg-keys/generated-2013-02-15_19-15-003.sec differ diff --git a/pkg/utils/gpg-keys/gpg-batch-key-script b/pkg/utils/gpg-keys/gpg-batch-key-script new file mode 100644 index 0000000..7e48db0 --- /dev/null +++ b/pkg/utils/gpg-keys/gpg-batch-key-script @@ -0,0 +1,45 @@ +%echo Generating keypair 001/003... +Key-Type: RSA +Key-Length: 4096 +Subkey-Type: RSA +Subkey-Length: 4096 +Name-Real: Louis Lingg +Name-Email: blackhole@leap.se +Name-Comment: Test Key 001/003 +Expire-Date: 2013-03-15 +Passphrase: leap +%pubring generated-2013-02-15_19-15-001.pub +%secring generated-2013-02-15_19-15-001.sec +%commit +%echo done. 001 keys out of 003 completed. + +%echo Generating keypair 002/003... +Key-Type: RSA +Key-Length: 4096 +Subkey-Type: RSA +Subkey-Length: 4096 +Name-Real: Louis Lingg +Name-Email: blackhole@leap.se +Name-Comment: Test Key 002/003 +Expire-Date: 2013-03-15 +Passphrase: leap +%pubring generated-2013-02-15_19-15-002.pub +%secring generated-2013-02-15_19-15-002.sec +%commit +%echo done. 002 keys out of 003 completed. + +%echo Generating keypair 003/003... +Key-Type: RSA +Key-Length: 4096 +Subkey-Type: RSA +Subkey-Length: 4096 +Name-Real: Louis Lingg +Name-Email: blackhole@leap.se +Name-Comment: Test Key 003/003 +Expire-Date: 2013-03-15 +Passphrase: leap +%pubring generated-2013-02-15_19-15-003.pub +%secring generated-2013-02-15_19-15-003.sec +%commit +%echo done. 003 keys out of 003 completed. + diff --git a/pkg/utils/test_bootstrap b/pkg/utils/test_bootstrap new file mode 100755 index 0000000..f072d10 --- /dev/null +++ b/pkg/utils/test_bootstrap @@ -0,0 +1,39 @@ +#!/bin/bash +############################################################################## +# +# test_bootstrap +# -------------- +# Test that the bootstrap script works correctly by making a temporary new +# user. +# +# @author Isis Agora Lovecruft, 0x2cdb8b35 +# @date 18 February 2013 +# @version 0.0.1 +############################################################################## + +set -ex - + +HERE=$(pwd) +TEST_USER=bootstraptester + +echo "Creating new user: "'"'"${TEST_USER}"'"'"" +sudo adduser --home /home/${TEST_USER} --shell /bin/bash ${TEST_USER} && \ + echo -e "notsecure\nnotsecure\n" | sudo passwd ${TEST_USER} + +echo 'Copying boostrap script to new user home directory...' +sudo cp ${HERE}/bootstrap /home/${TEST_USER}/bootstrap && \ + sudo chown ${TEST_USER}:${TEST_USER} /home/${TEST_USER}/bootstrap + +echo 'Logging in as new user and executing bootstrap script...' +echo 'Executing test of bootstrap script...' +## -S pulls password from stdin +echo -e "notsecure\n" | sudo -S -H -u ${TEST_USER} -i /home/${TEST_USER}/bootstrap + +if [[ "$?" != 0 ]] ; then + echo 'Error while testing bootstrap...' +else + echo 'Test of bootstrap successful.' +fi + +echo "Deleting user: "'"'"${TEST_USER}"'"'"" +sudo deluser --remove-home ${TEST_USER} diff --git a/sample-config/mx.conf b/sample-config/mx.conf new file mode 100644 index 0000000..2036706 --- /dev/null +++ b/sample-config/mx.conf @@ -0,0 +1,9 @@ +[mail1] +path=/home/blabla/Maildir/ +recursive=True + +[couchdb] +user=someuser +password=somepass +server=localhost +port=6666 diff --git a/test_bootstrap b/test_bootstrap deleted file mode 100755 index f072d10..0000000 --- a/test_bootstrap +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -############################################################################## -# -# test_bootstrap -# -------------- -# Test that the bootstrap script works correctly by making a temporary new -# user. -# -# @author Isis Agora Lovecruft, 0x2cdb8b35 -# @date 18 February 2013 -# @version 0.0.1 -############################################################################## - -set -ex - - -HERE=$(pwd) -TEST_USER=bootstraptester - -echo "Creating new user: "'"'"${TEST_USER}"'"'"" -sudo adduser --home /home/${TEST_USER} --shell /bin/bash ${TEST_USER} && \ - echo -e "notsecure\nnotsecure\n" | sudo passwd ${TEST_USER} - -echo 'Copying boostrap script to new user home directory...' -sudo cp ${HERE}/bootstrap /home/${TEST_USER}/bootstrap && \ - sudo chown ${TEST_USER}:${TEST_USER} /home/${TEST_USER}/bootstrap - -echo 'Logging in as new user and executing bootstrap script...' -echo 'Executing test of bootstrap script...' -## -S pulls password from stdin -echo -e "notsecure\n" | sudo -S -H -u ${TEST_USER} -i /home/${TEST_USER}/bootstrap - -if [[ "$?" != 0 ]] ; then - echo 'Error while testing bootstrap...' -else - echo 'Test of bootstrap successful.' -fi - -echo "Deleting user: "'"'"${TEST_USER}"'"'"" -sudo deluser --remove-home ${TEST_USER} -- cgit v1.2.3