update ffsync 1.5
This commit is contained in:
146
sources/syncserver/__init__.py
Normal file
146
sources/syncserver/__init__.py
Normal file
@@ -0,0 +1,146 @@
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
# You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
import os
|
||||
import logging
|
||||
from urlparse import urlparse, urlunparse
|
||||
|
||||
from pyramid.response import Response
|
||||
from pyramid.events import NewRequest, subscriber
|
||||
|
||||
import mozsvc.config
|
||||
|
||||
from tokenserver.util import _JSONError
|
||||
|
||||
logger = logging.getLogger("syncserver")
|
||||
|
||||
|
||||
def includeme(config):
|
||||
"""Install SyncServer application into the given Pyramid configurator."""
|
||||
# Set the umask so that files are created with secure permissions.
|
||||
# Necessary for e.g. created-on-demand sqlite database files.
|
||||
os.umask(0077)
|
||||
|
||||
# Sanity-check the deployment settings and provide sensible defaults.
|
||||
settings = config.registry.settings
|
||||
public_url = settings.get("syncserver.public_url")
|
||||
if public_url is None:
|
||||
raise RuntimeError("you much configure syncserver.public_url")
|
||||
public_url = public_url.rstrip("/")
|
||||
settings["syncserver.public_url"] = public_url
|
||||
|
||||
secret = settings.get("syncserver.secret")
|
||||
if secret is None:
|
||||
secret = os.urandom(32).encode("hex")
|
||||
sqluri = settings.get("syncserver.sqluri")
|
||||
if sqluri is None:
|
||||
rootdir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
|
||||
sqluri = "sqlite:///" + os.path.join(rootdir, "syncserver.db")
|
||||
|
||||
# Configure app-specific defaults based on top-level configuration.
|
||||
settings.pop("config", None)
|
||||
if "tokenserver.backend" not in settings:
|
||||
# Default to our simple static node-assignment backend
|
||||
settings["tokenserver.backend"] =\
|
||||
"syncserver.staticnode.StaticNodeAssignment"
|
||||
settings["tokenserver.sqluri"] = sqluri
|
||||
settings["tokenserver.node_url"] = public_url
|
||||
settings["endpoints.sync-1.5"] = "{node}/storage/1.5/{uid}"
|
||||
if "tokenserver.monkey_patch_gevent" not in settings:
|
||||
# Default to no gevent monkey-patching
|
||||
settings["tokenserver.monkey_patch_gevent"] = False
|
||||
if "tokenserver.applications" not in settings:
|
||||
# Default to just the sync-1.5 application
|
||||
settings["tokenserver.applications"] = "sync-1.5"
|
||||
if "tokenserver.secrets.backend" not in settings:
|
||||
# Default to a single fixed signing secret
|
||||
settings["tokenserver.secrets.backend"] = "mozsvc.secrets.FixedSecrets"
|
||||
settings["tokenserver.secrets.secrets"] = [secret]
|
||||
if "tokenserver.allow_new_users" not in settings:
|
||||
allow_new_users = settings.get("syncserver.allow_new_users")
|
||||
if allow_new_users is not None:
|
||||
settings["tokenserver.allow_new_users"] = allow_new_users
|
||||
if "hawkauth.secrets.backend" not in settings:
|
||||
# Default to the same secrets backend as the tokenserver
|
||||
for key in settings.keys():
|
||||
if key.startswith("tokenserver.secrets."):
|
||||
newkey = "hawkauth" + key[len("tokenserver"):]
|
||||
settings[newkey] = settings[key]
|
||||
if "storage.backend" not in settings:
|
||||
# Default to sql syncstorage backend
|
||||
settings["storage.backend"] = "syncstorage.storage.sql.SQLStorage"
|
||||
settings["storage.sqluri"] = sqluri
|
||||
settings["storage.create_tables"] = True
|
||||
if "browserid.backend" not in settings:
|
||||
# Default to remote verifier, with base of public_url as only audience
|
||||
audience = urlunparse(urlparse(public_url)._replace(path=""))
|
||||
settings["browserid.backend"] = "tokenserver.verifiers.RemoteVerifier"
|
||||
settings["browserid.audiences"] = audience
|
||||
if "loggers" not in settings:
|
||||
# Default to basic logging config.
|
||||
root_logger = logging.getLogger("")
|
||||
if not root_logger.handlers:
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
# Include the relevant sub-packages.
|
||||
config.scan("syncserver")
|
||||
config.include("syncstorage", route_prefix="/storage")
|
||||
config.include("tokenserver", route_prefix="/token")
|
||||
|
||||
# Add a top-level "it works!" view.
|
||||
def itworks(request):
|
||||
return Response("it works!")
|
||||
|
||||
config.add_route('itworks', '/')
|
||||
config.add_view(itworks, route_name='itworks')
|
||||
|
||||
|
||||
@subscriber(NewRequest)
|
||||
def reconcile_wsgi_environ_with_public_url(event):
|
||||
"""Event-listener that checks and tweaks WSGI environ based on public_url.
|
||||
|
||||
This is a simple trick to help ensure that the configured public_url
|
||||
matches the actual deployed address. It fixes fixes parts of the WSGI
|
||||
environ where it makes sense (e.g. SCRIPT_NAME) and warns about any parts
|
||||
that seem obviously mis-configured (e.g. http:// versus https://).
|
||||
|
||||
It's very important to get public_url and WSGI environ matching exactly,
|
||||
since they're used for browserid audience checking and HAWK signature
|
||||
validation, so mismatches can easily cause strange and cryptic errors.
|
||||
"""
|
||||
request = event.request
|
||||
public_url = request.registry.settings["syncserver.public_url"]
|
||||
p_public_url = urlparse(public_url)
|
||||
# If we don't have a SCRIPT_NAME, take it from the public_url.
|
||||
# This is often the case if we're behind e.g. an nginx proxy that
|
||||
# is serving us at some sub-path.
|
||||
if not request.script_name:
|
||||
request.script_name = p_public_url.path.rstrip("/")
|
||||
# Log a noisy error if the application url is different to what we'd
|
||||
# expect based on public_url setting.
|
||||
application_url = request.application_url
|
||||
if public_url != application_url:
|
||||
msg = "The public_url setting does not match the application url.\n"
|
||||
msg += "This will almost certainly cause authentication failures!\n"
|
||||
msg += " public_url setting is: %s\n" % (public_url,)
|
||||
msg += " application url is: %s\n" % (application_url,)
|
||||
logger.error(msg)
|
||||
raise _JSONError([msg], status_code=500)
|
||||
|
||||
|
||||
def get_configurator(global_config, **settings):
|
||||
"""Load a SyncStorge configurator object from deployment settings."""
|
||||
config = mozsvc.config.get_configurator(global_config, **settings)
|
||||
config.begin()
|
||||
try:
|
||||
config.include(includeme)
|
||||
finally:
|
||||
config.end()
|
||||
return config
|
||||
|
||||
|
||||
def main(global_config, **settings):
|
||||
"""Load a SyncStorage WSGI app from deployment settings."""
|
||||
config = get_configurator(global_config, **settings)
|
||||
return config.make_wsgi_app()
|
||||
218
sources/syncserver/staticnode.py
Normal file
218
sources/syncserver/staticnode.py
Normal file
@@ -0,0 +1,218 @@
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
# You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
"""
|
||||
Simple node-assignment backend using a single, static node.
|
||||
|
||||
This is a greatly-simplified node-assignment backend. It keeps user records
|
||||
in an SQL database, but does not attempt to do any node management. All users
|
||||
are implicitly assigned to a single, static node.
|
||||
|
||||
XXX TODO: move this into the tokenserver repo.
|
||||
|
||||
"""
|
||||
import time
|
||||
import urlparse
|
||||
from mozsvc.exceptions import BackendError
|
||||
|
||||
from sqlalchemy import Column, Integer, String, BigInteger, Index
|
||||
from sqlalchemy import create_engine, Table, MetaData
|
||||
from sqlalchemy.pool import QueuePool
|
||||
from sqlalchemy.sql import text as sqltext
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
|
||||
from tokenserver.assignment import INodeAssignment
|
||||
from zope.interface import implements
|
||||
|
||||
|
||||
metadata = MetaData()
|
||||
|
||||
|
||||
users = Table(
|
||||
"users",
|
||||
metadata,
|
||||
Column("uid", Integer(), primary_key=True, autoincrement=True,
|
||||
nullable=False),
|
||||
Column("service", String(32), nullable=False),
|
||||
Column("email", String(255), nullable=False),
|
||||
Column("generation", BigInteger(), nullable=False),
|
||||
Column("client_state", String(32), nullable=False),
|
||||
Column("created_at", BigInteger(), nullable=False),
|
||||
Column("replaced_at", BigInteger(), nullable=True),
|
||||
Index('lookup_idx', 'email', 'service', 'created_at'),
|
||||
Index('clientstate_idx', 'email', 'service', 'client_state', unique=True),
|
||||
)
|
||||
|
||||
|
||||
_GET_USER_RECORDS = sqltext("""\
|
||||
select
|
||||
uid, generation, client_state
|
||||
from
|
||||
users
|
||||
where
|
||||
email = :email
|
||||
and
|
||||
service = :service
|
||||
order by
|
||||
created_at desc, uid desc
|
||||
limit
|
||||
20
|
||||
""")
|
||||
|
||||
|
||||
_CREATE_USER_RECORD = sqltext("""\
|
||||
insert into
|
||||
users
|
||||
(service, email, generation, client_state, created_at, replaced_at)
|
||||
values
|
||||
(:service, :email, :generation, :client_state, :timestamp, NULL)
|
||||
""")
|
||||
|
||||
|
||||
_UPDATE_GENERATION_NUMBER = sqltext("""\
|
||||
update
|
||||
users
|
||||
set
|
||||
generation = :generation
|
||||
where
|
||||
service = :service and email = :email and
|
||||
generation < :generation and replaced_at is null
|
||||
""")
|
||||
|
||||
|
||||
_REPLACE_USER_RECORDS = sqltext("""\
|
||||
update
|
||||
users
|
||||
set
|
||||
replaced_at = :timestamp
|
||||
where
|
||||
service = :service and email = :email
|
||||
and replaced_at is null and created_at < :timestamp
|
||||
""")
|
||||
|
||||
|
||||
def get_timestamp():
|
||||
"""Get current timestamp in milliseconds."""
|
||||
return int(time.time() * 1000)
|
||||
|
||||
|
||||
class StaticNodeAssignment(object):
|
||||
implements(INodeAssignment)
|
||||
|
||||
def __init__(self, sqluri, node_url, **kw):
|
||||
self.sqluri = sqluri
|
||||
self.node_url = node_url
|
||||
self.driver = urlparse.urlparse(sqluri).scheme.lower()
|
||||
sqlkw = {
|
||||
"logging_name": "syncserver",
|
||||
"connect_args": {},
|
||||
"poolclass": QueuePool,
|
||||
"pool_reset_on_return": True,
|
||||
}
|
||||
if self.driver == "sqlite":
|
||||
# We must mark it as safe to share sqlite connections between
|
||||
# threads. The pool will ensure there's on race conditions.
|
||||
sqlkw["connect_args"]["check_same_thread"] = False
|
||||
# If using a :memory: database, we must use a QueuePool of size
|
||||
# 1 so that a single connection is shared by all threads.
|
||||
if urlparse.urlparse(sqluri).path.lower() in ("/", "/:memory:"):
|
||||
sqlkw["pool_size"] = 1
|
||||
sqlkw["max_overflow"] = 0
|
||||
self._engine = create_engine(sqluri, **sqlkw)
|
||||
users.create(self._engine, checkfirst=True)
|
||||
|
||||
def get_user(self, service, email):
|
||||
params = {'service': service, 'email': email}
|
||||
res = self._engine.execute(_GET_USER_RECORDS, **params)
|
||||
try:
|
||||
row = res.fetchone()
|
||||
if row is None:
|
||||
return None
|
||||
# The first row is the most up-to-date user record.
|
||||
user = {
|
||||
'email': email,
|
||||
'uid': row.uid,
|
||||
'node': self.node_url,
|
||||
'generation': row.generation,
|
||||
'client_state': row.client_state,
|
||||
'old_client_states': {}
|
||||
}
|
||||
# Any subsequent rows are due to old client-state values.
|
||||
row = res.fetchone()
|
||||
while row is not None:
|
||||
user['old_client_states'][row.client_state] = True
|
||||
row = res.fetchone()
|
||||
return user
|
||||
finally:
|
||||
res.close()
|
||||
|
||||
def allocate_user(self, service, email, generation=0, client_state=''):
|
||||
params = {
|
||||
'service': service, 'email': email, 'generation': generation,
|
||||
'client_state': client_state, 'timestamp': get_timestamp()
|
||||
}
|
||||
try:
|
||||
res = self._engine.execute(_CREATE_USER_RECORD, **params)
|
||||
except IntegrityError:
|
||||
raise
|
||||
return self.get_user(service, email)
|
||||
else:
|
||||
res.close()
|
||||
return {
|
||||
'email': email,
|
||||
'uid': res.lastrowid,
|
||||
'node': self.node_url,
|
||||
'generation': generation,
|
||||
'client_state': client_state,
|
||||
'old_client_states': {}
|
||||
}
|
||||
|
||||
def update_user(self, service, user, generation=None, client_state=None):
|
||||
if client_state is None:
|
||||
# uid can stay the same, just update the generation number.
|
||||
if generation is not None:
|
||||
params = {
|
||||
'service': service,
|
||||
'email': user['email'],
|
||||
'generation': generation,
|
||||
}
|
||||
res = self._engine.execute(_UPDATE_GENERATION_NUMBER, **params)
|
||||
res.close()
|
||||
user['generation'] = max(generation, user['generation'])
|
||||
else:
|
||||
# reject previously-seen client-state strings.
|
||||
if client_state == user['client_state']:
|
||||
raise BackendError('previously seen client-state string')
|
||||
if client_state in user['old_client_states']:
|
||||
raise BackendError('previously seen client-state string')
|
||||
# need to create a new record for new client_state.
|
||||
if generation is not None:
|
||||
generation = max(user['generation'], generation)
|
||||
else:
|
||||
generation = user['generation']
|
||||
now = get_timestamp()
|
||||
params = {
|
||||
'service': service, 'email': user['email'],
|
||||
'generation': generation, 'client_state': client_state,
|
||||
'timestamp': now,
|
||||
}
|
||||
try:
|
||||
res = self._engine.execute(_CREATE_USER_RECORD, **params)
|
||||
except IntegrityError:
|
||||
user.update(self.get_user(service, user['email']))
|
||||
else:
|
||||
self.get_user(service, user['email'])
|
||||
user['uid'] = res.lastrowid
|
||||
user['generation'] = generation
|
||||
user['old_client_states'][user['client_state']] = True
|
||||
user['client_state'] = client_state
|
||||
res.close()
|
||||
# mark old records as having been replaced.
|
||||
# if we crash here, they are unmarked and we may fail to
|
||||
# garbage collect them for a while, but the active state
|
||||
# will be undamaged.
|
||||
params = {
|
||||
'service': service, 'email': user['email'], 'timestamp': now
|
||||
}
|
||||
res = self._engine.execute(_REPLACE_USER_RECORDS, **params)
|
||||
res.close()
|
||||
17
sources/syncserver/tests.ini
Normal file
17
sources/syncserver/tests.ini
Normal file
@@ -0,0 +1,17 @@
|
||||
[server:main]
|
||||
use = egg:Paste#http
|
||||
host = 0.0.0.0
|
||||
port = 5000
|
||||
|
||||
[app:main]
|
||||
use = egg:SyncServer
|
||||
|
||||
[syncserver]
|
||||
# This must be edited to point to the public URL of your server.
|
||||
public_url = http://localhost:5000/
|
||||
|
||||
# This defines the database in which to store all server data.
|
||||
#sqluri = sqlite:////tmp/syncserver.db
|
||||
|
||||
# This is a secret key used for signing authentication tokens.
|
||||
#secret = INSERT_SECRET_KEY_HERE
|
||||
Reference in New Issue
Block a user