summaryrefslogtreecommitdiff
path: root/scripts/ddocs
diff options
context:
space:
mode:
authordrebs <drebs@leap.se>2014-12-18 14:42:13 -0200
committerdrebs <drebs@leap.se>2014-12-18 14:42:13 -0200
commitfa8dacef003d30cd9b56f7e2b07baa3b387c1e20 (patch)
tree3e5c616877755b230ec817db9e35fec9b8e4cac2 /scripts/ddocs
parente909a218efb0ad31f413c47c90303f44f6906158 (diff)
Update testing scripts.
Diffstat (limited to 'scripts/ddocs')
-rw-r--r--scripts/ddocs/update_design_docs.py191
1 files changed, 107 insertions, 84 deletions
diff --git a/scripts/ddocs/update_design_docs.py b/scripts/ddocs/update_design_docs.py
index e7b5a29c..2e2fa8f0 100644
--- a/scripts/ddocs/update_design_docs.py
+++ b/scripts/ddocs/update_design_docs.py
@@ -11,84 +11,83 @@ import re
import threading
import binascii
-
+from urlparse import urlparse
from getpass import getpass
from ConfigParser import ConfigParser
-from couchdb.client import Server
-from couchdb.http import Resource, Session
-from datetime import datetime
-from urlparse import urlparse
+from couchdb.client import Server
+from couchdb.http import Resource
+from couchdb.http import Session
+from couchdb.http import ResourceNotFound
from leap.soledad.common import ddocs
-# parse command line for the log file name
-logger_fname = "/tmp/update-design-docs_%s.log" % \
- str(datetime.now()).replace(' ', '_')
-parser = argparse.ArgumentParser()
-parser.add_argument('--log', action='store', default=logger_fname, type=str,
- required=False, help='the name of the log file', nargs=1)
-args = parser.parse_args()
+MAX_THREADS = 20
+DESIGN_DOCS = {
+ '_design/docs': json.loads(binascii.a2b_base64(ddocs.docs)),
+ '_design/syncs': json.loads(binascii.a2b_base64(ddocs.syncs)),
+ '_design/transactions': json.loads(
+ binascii.a2b_base64(ddocs.transactions)),
+}
-# configure the logger
+# create a logger
logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
-print "Logging to %s." % args.log
-logging.basicConfig(
- filename=args.log,
- format="%(asctime)-15s %(message)s")
+LOG_FORMAT = '%(asctime)s %(message)s'
+logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
-# configure threads
-max_threads = 20
-semaphore_pool = threading.BoundedSemaphore(value=max_threads)
-threads = []
+def _parse_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-u', dest='uuid', default=None, type=str,
+ help='the UUID of the user')
+ parser.add_argument('-t', dest='threads', default=MAX_THREADS, type=int,
+ help='the number of parallel threads')
+ return parser.parse_args()
-# get couch url
-cp = ConfigParser()
-cp.read('/etc/leap/soledad-server.conf')
-url = urlparse(cp.get('soledad-server', 'couch_url'))
-# get admin password
-netloc = re.sub('^.*@', '', url.netloc)
-url = url._replace(netloc=netloc)
-password = getpass("Admin password for %s: " % url.geturl())
-url = url._replace(netloc='admin:%s@%s' % (password, netloc))
+def _get_url():
+ # get couch url
+ cp = ConfigParser()
+ cp.read('/etc/leap/soledad-server.conf')
+ url = urlparse(cp.get('soledad-server', 'couch_url'))
+ # get admin password
+ netloc = re.sub('^.*@', '', url.netloc)
+ url = url._replace(netloc=netloc)
+ password = getpass("Admin password for %s: " % url.geturl())
+ return url._replace(netloc='admin:%s@%s' % (password, netloc))
-resource = Resource(url.geturl(), Session(retry_delays=[1,2,4,8], timeout=10))
-server = Server(url=resource)
-hidden_url = re.sub(
- 'http://(.*):.*@',
- 'http://\\1:xxxxx@',
- url.geturl())
+def _get_server(url):
+ resource = Resource(
+ url.geturl(), Session(retry_delays=[1, 2, 4, 8], timeout=10))
+ return Server(url=resource)
-print """
-==========
-ATTENTION!
-==========
-This script will modify Soledad's shared and user databases in:
+def _confirm(url):
+ hidden_url = re.sub(
+ 'http://(.*):.*@',
+ 'http://\\1:xxxxx@',
+ url.geturl())
- %s
+ print """
+ ==========
+ ATTENTION!
+ ==========
-This script does not make a backup of the couch db data, so make sure you
-have a copy or you may loose data.
-""" % hidden_url
-confirm = raw_input("Proceed (type uppercase YES)? ")
+ This script will modify Soledad's shared and user databases in:
-if confirm != "YES":
- exit(1)
+ %s
-# convert design doc content
+ This script does not make a backup of the couch db data, so make sure you
+ have a copy or you may loose data.
+ """ % hidden_url
+ confirm = raw_input("Proceed (type uppercase YES)? ")
+
+ if confirm != "YES":
+ exit(1)
-design_docs = {
- '_design/docs': json.loads(binascii.a2b_base64(ddocs.docs)),
- '_design/syncs': json.loads(binascii.a2b_base64(ddocs.syncs)),
- '_design/transactions': json.loads(binascii.a2b_base64(ddocs.transactions)),
-}
#
# Thread
@@ -106,42 +105,66 @@ class DBWorkerThread(threading.Thread):
def run(self):
- logger.info("(%d/%d) Updating db %s." % (self._db_idx, self._db_len,
- self._dbname))
+ logger.info(
+ "(%d/%d) Updating db %s."
+ % (self._db_idx, self._db_len, self._dbname))
- for doc_id in design_docs:
- doc = self._cdb[doc_id]
+ for doc_id in DESIGN_DOCS:
+ try:
+ doc = self._cdb[doc_id]
+ except ResourceNotFound:
+ doc = {'_id': doc_id}
for key in ['lists', 'views', 'updates']:
- if key in design_docs[doc_id]:
- doc[key] = design_docs[doc_id][key]
+ if key in DESIGN_DOCS[doc_id]:
+ doc[key] = DESIGN_DOCS[doc_id][key]
self._cdb.save(doc)
# release the semaphore
self._release_fun()
-db_idx = 0
-db_len = len(server)
-for dbname in server:
-
- db_idx += 1
-
- if not (dbname.startswith('user-') or dbname == 'shared') \
- or dbname == 'user-test-db':
- logger.info("(%d/%d) Skipping db %s." % (db_idx, db_len, dbname))
- continue
-
-
- # get access to couch db
- cdb = Server(url.geturl())[dbname]
-
- #---------------------------------------------------------------------
- # Start DB worker thread
- #---------------------------------------------------------------------
- semaphore_pool.acquire()
- thread = DBWorkerThread(server, dbname, db_idx, db_len, semaphore_pool.release)
+def _launch_update_design_docs_thread(
+ server, dbname, db_idx, db_len, semaphore_pool):
+ semaphore_pool.acquire() # wait for an available working slot
+ thread = DBWorkerThread(
+ server, dbname, db_idx, db_len, semaphore_pool.release)
thread.daemon = True
thread.start()
- threads.append(thread)
-
-map(lambda thread: thread.join(), threads)
+ return thread
+
+
+def _update_design_docs(args, server):
+
+ # find the actual databases to be updated
+ dbs = []
+ if args.uuid:
+ dbs.append('user-%s' % args.uuid)
+ else:
+ for dbname in server:
+ if dbname.startswith('user-') or dbname == 'shared':
+ dbs.append(dbname)
+ else:
+ logger.info("Skipping db %s." % dbname)
+
+ db_idx = 0
+ db_len = len(dbs)
+ semaphore_pool = threading.BoundedSemaphore(value=args.threads)
+ threads = []
+
+ # launch the update
+ for db in dbs:
+ db_idx += 1
+ threads.append(
+ _launch_update_design_docs_thread(
+ server, db, db_idx, db_len, semaphore_pool))
+
+ # wait for all threads to finish
+ map(lambda thread: thread.join(), threads)
+
+
+if __name__ == "__main__":
+ args = _parse_args()
+ url = _get_url()
+ _confirm(url)
+ server = _get_server(url)
+ _update_design_docs(args, server)