2006-02-03 12:17:34 +00:00
|
|
|
## transports_nb.py
|
|
|
|
## based on transports.py
|
|
|
|
##
|
|
|
|
## Copyright (C) 2003-2004 Alexey "Snake" Nezhdanov
|
|
|
|
## modified by Dimitur Kirov <dkirov@gmail.com>
|
|
|
|
##
|
|
|
|
## This program is free software; you can redistribute it and/or modify
|
|
|
|
## it under the terms of the GNU General Public License as published by
|
|
|
|
## the Free Software Foundation; either version 2, or (at your option)
|
|
|
|
## any later version.
|
|
|
|
##
|
|
|
|
## This program is distributed in the hope that it will be useful,
|
|
|
|
## but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
## GNU General Public License for more details.
|
|
|
|
|
|
|
|
import socket,select,base64,dispatcher_nb
|
2007-01-24 21:50:59 +00:00
|
|
|
import struct
|
2006-02-03 12:17:34 +00:00
|
|
|
from simplexml import ustr
|
|
|
|
from client import PlugIn
|
|
|
|
from idlequeue import IdleObject
|
|
|
|
from protocol import *
|
|
|
|
|
|
|
|
import sys
|
|
|
|
import os
|
|
|
|
import errno
|
2006-11-28 01:02:31 +00:00
|
|
|
import time
|
2006-02-03 12:17:34 +00:00
|
|
|
|
2006-11-13 14:24:41 +00:00
|
|
|
import traceback
|
2008-06-30 00:02:32 +00:00
|
|
|
import threading
|
2006-11-13 14:24:41 +00:00
|
|
|
|
|
|
|
import logging
|
2006-12-28 11:15:44 +00:00
|
|
|
log = logging.getLogger('gajim.c.x.transports_nb')
|
2008-06-30 00:02:32 +00:00
|
|
|
consoleloghandler = logging.StreamHandler()
|
|
|
|
consoleloghandler.setLevel(logging.DEBUG)
|
|
|
|
consoleloghandler.setFormatter(
|
|
|
|
logging.Formatter('%(levelname)s: %(message)s')
|
|
|
|
)
|
|
|
|
log.setLevel(logging.DEBUG)
|
|
|
|
log.addHandler(consoleloghandler)
|
|
|
|
log.propagate = False
|
|
|
|
|
|
|
|
|
|
|
|
def urisplit(self, uri):
|
|
|
|
'''
|
|
|
|
Function for splitting URI string to tuple (protocol, host, path).
|
|
|
|
e.g. urisplit('http://httpcm.jabber.org/webclient') returns
|
|
|
|
('http', 'httpcm.jabber.org', '/webclient')
|
|
|
|
'''
|
|
|
|
import re
|
|
|
|
regex = '(([^:/]+)(://))?([^/]*)(/?.*)'
|
|
|
|
grouped = re.match(regex, uri).groups()
|
|
|
|
proto, host, path = grouped[1], grouped[3], grouped[4]
|
|
|
|
return proto, host, path
|
|
|
|
|
|
|
|
|
|
|
|
|
2006-11-13 14:24:41 +00:00
|
|
|
|
2008-05-31 16:51:40 +00:00
|
|
|
# I don't need to load gajim.py just because of few TLS variables, so I changed
|
2008-06-09 00:32:02 +00:00
|
|
|
# %s/common\.gajim\.DATA_DIR/\'\.\.\/data\'/c
|
|
|
|
# %s/common\.gajim\.MY_CACERTS/\'\%s\/\.gajim\/cacerts\.pem\' \% os\.environ\[\'HOME\'\]/c
|
2008-05-31 16:51:40 +00:00
|
|
|
|
|
|
|
# To change it back do:
|
|
|
|
# %s/\'\.\.\/data\'/common\.gajim\.DATA_DIR/c
|
2008-06-09 00:32:02 +00:00
|
|
|
# %s/\'%s\/\.gajim\/cacerts\.pem\'\ %\ os\.environ\[\'HOME\'\]/common\.gajim\.MY_CACERTS/c
|
2008-06-30 00:02:32 +00:00
|
|
|
# TODO: make the paths configurable - as constructor parameters or sth
|
|
|
|
|
2008-05-31 16:51:40 +00:00
|
|
|
|
2008-06-12 23:54:46 +00:00
|
|
|
# import common.gajim
|
2008-05-31 16:51:40 +00:00
|
|
|
|
2008-06-30 00:02:32 +00:00
|
|
|
# timeout to connect to the server socket, it doesn't include auth
|
|
|
|
CONNECT_TIMEOUT_SECONDS = 30
|
|
|
|
|
|
|
|
# how long to wait for a disconnect to complete
|
|
|
|
DISCONNECT_TIMEOUT_SECONDS = 10
|
|
|
|
|
|
|
|
# size of the buffer which reads data from server
|
|
|
|
# if lower, more stanzas will be fragmented and processed twice
|
|
|
|
RECV_BUFSIZE = 32768 # 2x maximum size of ssl packet, should be plenty
|
|
|
|
#RECV_BUFSIZE = 16 # FIXME: (#2634) gajim breaks with this setting: it's inefficient but should work.
|
2007-06-03 14:15:50 +00:00
|
|
|
|
2008-06-09 00:32:02 +00:00
|
|
|
DATA_RECEIVED='DATA RECEIVED'
|
|
|
|
DATA_SENT='DATA SENT'
|
|
|
|
|
2008-06-30 00:02:32 +00:00
|
|
|
|
|
|
|
DISCONNECTED ='DISCONNECTED'
|
|
|
|
CONNECTING ='CONNECTING'
|
|
|
|
CONNECTED ='CONNECTED'
|
|
|
|
DISCONNECTING ='DISCONNECTING'
|
|
|
|
|
|
|
|
class NonBlockingTcp(PlugIn, IdleObject):
|
|
|
|
'''
|
|
|
|
Non-blocking TCP socket wrapper
|
|
|
|
'''
|
|
|
|
def __init__(self, on_disconnect):
|
|
|
|
'''
|
|
|
|
Class constructor.
|
|
|
|
'''
|
|
|
|
|
|
|
|
PlugIn.__init__(self)
|
|
|
|
IdleObject.__init__(self)
|
|
|
|
|
|
|
|
self.on_disconnect = on_disconnect
|
|
|
|
|
|
|
|
self.on_connect = None
|
|
|
|
self.on_connect_failure = None
|
|
|
|
self.sock = None
|
|
|
|
self.idlequeue = None
|
|
|
|
self.on_receive = None
|
|
|
|
self.DBG_LINE='socket'
|
|
|
|
self.state = DISCONNECTED
|
|
|
|
|
|
|
|
# writable, readable - keep state of the last pluged flags
|
|
|
|
# This prevents replug of same object with the same flags
|
|
|
|
self.writable = True
|
|
|
|
self.readable = False
|
|
|
|
|
|
|
|
# queue with messages to be send
|
|
|
|
self.sendqueue = []
|
|
|
|
|
|
|
|
# time to wait for SOME stanza to come and then send keepalive
|
|
|
|
self.sendtimeout = 0
|
|
|
|
|
|
|
|
# in case we want to something different than sending keepalives
|
|
|
|
self.on_timeout = None
|
|
|
|
|
|
|
|
# bytes remained from the last send message
|
|
|
|
self.sendbuff = ''
|
|
|
|
self._exported_methods=[self.disconnect, self.onreceive, self.set_send_timeout,
|
|
|
|
self.set_timeout, self.remove_timeout]
|
|
|
|
|
|
|
|
def plugin(self, owner):
|
|
|
|
owner.Connection=self
|
|
|
|
print 'plugin called'
|
|
|
|
self.idlequeue = owner.idlequeue
|
|
|
|
|
|
|
|
def plugout(self):
|
|
|
|
self._owner.Connection = None
|
|
|
|
self._owner = None
|
|
|
|
|
|
|
|
|
|
|
|
def get_fd(self):
|
|
|
|
try:
|
|
|
|
tmp = self._sock.fileno()
|
|
|
|
return tmp
|
|
|
|
except:
|
|
|
|
return 0
|
|
|
|
|
|
|
|
def connect(self, conn_5tuple, on_connect, on_connect_failure):
|
|
|
|
'''
|
|
|
|
Creates and connects socket to server and port defined in conn_5tupe which
|
|
|
|
should be list item returned from getaddrinfo.
|
|
|
|
:param conn_5tuple: 5-tuple returned from getaddrinfo
|
|
|
|
:param on_connect: callback called on successful tcp connection
|
|
|
|
:param on_connect_failure: callback called on failure when estabilishing tcp
|
|
|
|
connection
|
|
|
|
'''
|
|
|
|
self.on_connect = on_connect
|
|
|
|
self.on_connect_failure = on_connect_failure
|
|
|
|
(self.server, self.port) = conn_5tuple[4]
|
|
|
|
log.debug('NonBlocking Connect :: About tot connect to %s:%s' % conn_5tuple[4])
|
|
|
|
try:
|
|
|
|
self._sock = socket.socket(*conn_5tuple[:3])
|
|
|
|
except socket.error, (errnum, errstr):
|
|
|
|
on_connect_failure('NonBlockingTcp: Error while creating socket: %s %s' % (errnum, errstr))
|
|
|
|
return
|
|
|
|
|
|
|
|
self._send = self._sock.send
|
|
|
|
self._recv = self._sock.recv
|
|
|
|
self.fd = self._sock.fileno()
|
|
|
|
self.idlequeue.plug_idle(self, True, False)
|
|
|
|
|
|
|
|
errnum = 0
|
|
|
|
''' variable for errno symbol that will be found from exception raised from connect() '''
|
|
|
|
|
|
|
|
# set timeout for TCP connecting - if nonblocking connect() fails, pollend
|
|
|
|
# is called. If if succeeds pollout is called.
|
|
|
|
self.idlequeue.set_read_timeout(self.get_fd(), CONNECT_TIMEOUT_SECONDS)
|
|
|
|
|
|
|
|
try:
|
|
|
|
self._sock.setblocking(False)
|
|
|
|
self._sock.connect((self.server,self.port))
|
|
|
|
except Exception, (errnum, errstr):
|
|
|
|
pass
|
|
|
|
|
|
|
|
if errnum in (errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK):
|
|
|
|
# connecting in progress
|
|
|
|
self.set_state(CONNECTING)
|
|
|
|
log.debug('After connect. "%s" raised => CONNECTING' % errstr)
|
|
|
|
# on_connect/failure will be called from self.pollin/self.pollout
|
|
|
|
return
|
|
|
|
elif errnum in (0, 10056, errno.EISCONN):
|
|
|
|
# already connected - this branch is very unlikely, nonblocking connect() will
|
|
|
|
# return EINPROGRESS exception in most cases. When here, we don't need timeout
|
|
|
|
# on connected descriptor and success callback can be called.
|
|
|
|
log.debug('After connect. "%s" raised => CONNECTED' % errstr)
|
|
|
|
self._on_connect(self)
|
|
|
|
return
|
|
|
|
|
|
|
|
# if there was some other error, call failure callback and unplug transport
|
|
|
|
# which will also remove read_timeouts for descriptor
|
|
|
|
self._on_connect_failure('Exception while connecting to %s:%s - %s %s' %
|
|
|
|
(self.server, self.port, errnum, errstr))
|
|
|
|
|
|
|
|
def _on_connect(self, data):
|
|
|
|
''' preceeds call of on_connect callback '''
|
|
|
|
self.set_state(CONNECTED)
|
|
|
|
self.idlequeue.remove_timeout(self.get_fd())
|
|
|
|
self.on_connect()
|
|
|
|
|
|
|
|
|
|
|
|
def set_state(self, newstate):
|
|
|
|
assert(newstate in [DISCONNECTED, CONNECTING, CONNECTED, DISCONNECTING])
|
|
|
|
if (self.state, newstate) in [(CONNECTING, DISCONNECTING), (DISCONNECTED, DISCONNECTING)]:
|
|
|
|
log.info('strange move: %s -> %s' % (self.state, newstate))
|
|
|
|
self.state = newstate
|
|
|
|
|
|
|
|
|
|
|
|
def _on_connect_failure(self,err_message):
|
|
|
|
''' preceeds call of on_connect_failure callback '''
|
|
|
|
# In case of error while connecting we need to close socket
|
|
|
|
# but we don't want to call DisconnectHandlers from client,
|
|
|
|
# thus the do_callback=False
|
|
|
|
self.disconnect(do_callback=False)
|
|
|
|
self.on_connect_failure(err_message=err_message)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def pollin(self):
|
|
|
|
'''called when receive on plugged socket is possible '''
|
|
|
|
log.debug('pollin called, state == %s' % self.state)
|
|
|
|
self._do_receive()
|
|
|
|
|
|
|
|
def pollout(self):
|
|
|
|
'''called when send to plugged socket is possible'''
|
|
|
|
log.debug('pollout called, state == %s' % self.state)
|
|
|
|
|
|
|
|
if self.state==CONNECTING:
|
|
|
|
self._on_connect(self)
|
|
|
|
return
|
|
|
|
self._do_send()
|
|
|
|
|
|
|
|
def pollend(self):
|
|
|
|
log.debug('pollend called, state == %s' % self.state)
|
|
|
|
|
|
|
|
if self.state==CONNECTING:
|
|
|
|
self._on_connect_failure('Error during connect to %s:%s' %
|
|
|
|
(self.server, self.port))
|
|
|
|
else :
|
|
|
|
self.disconnect()
|
|
|
|
|
|
|
|
def disconnect(self, do_callback=True):
|
|
|
|
if self.state == DISCONNECTED:
|
|
|
|
return
|
|
|
|
self.idlequeue.unplug_idle(self.get_fd())
|
|
|
|
try:
|
|
|
|
self._sock.shutdown(socket.SHUT_RDWR)
|
|
|
|
self._sock.close()
|
|
|
|
except socket.error, (errnum, errstr):
|
|
|
|
log.error('Error disconnecting a socket: %s %s' % (errnum,errstr))
|
|
|
|
self.set_state(DISCONNECTED)
|
|
|
|
if do_callback:
|
|
|
|
# invoke callback given in __init__
|
|
|
|
self.on_disconnect()
|
|
|
|
|
|
|
|
def read_timeout(self):
|
|
|
|
'''
|
|
|
|
Implemntation of IdleObject function called on timeouts from IdleQueue.
|
|
|
|
'''
|
|
|
|
log.debug('read_timeout called, state == %s' % self.state)
|
|
|
|
if self.state==CONNECTING:
|
|
|
|
# if read_timeout is called during connecting, connect() didn't end yet
|
|
|
|
# thus we have to call the tcp failure callback
|
|
|
|
self._on_connect_failure('Error during connect to %s:%s' %
|
|
|
|
(self.server, self.port))
|
|
|
|
else:
|
|
|
|
if self.on_timeout:
|
|
|
|
self.on_timeout()
|
|
|
|
self.renew_send_timeout()
|
|
|
|
|
|
|
|
def renew_send_timeout(self):
|
|
|
|
if self.on_timeout and self.sendtimeout > 0:
|
|
|
|
self.set_timeout(self.sendtimeout)
|
|
|
|
else:
|
|
|
|
self.remove_timeout()
|
|
|
|
|
|
|
|
def set_send_timeout(self, timeout, on_timeout):
|
|
|
|
self.sendtimeout = timeout
|
|
|
|
if self.sendtimeout > 0:
|
|
|
|
self.on_timeout = on_timeout
|
|
|
|
else:
|
|
|
|
self.on_timeout = None
|
|
|
|
|
|
|
|
def set_timeout(self, timeout):
|
|
|
|
if self.state in [CONNECTING, CONNECTED] and self.get_fd() > 0:
|
|
|
|
self.idlequeue.set_read_timeout(self.get_fd(), timeout)
|
|
|
|
|
|
|
|
def remove_timeout(self):
|
|
|
|
if self.get_fd():
|
|
|
|
self.idlequeue.remove_timeout(self.get_fd())
|
|
|
|
|
|
|
|
def send(self, raw_data, now=False):
|
|
|
|
'''Append raw_data to the queue of messages to be send.
|
|
|
|
If supplied data is unicode string, encode it to utf-8.
|
|
|
|
'''
|
|
|
|
|
|
|
|
if self.state not in [CONNECTED, DISCONNECTING]:
|
|
|
|
log.error('Trying to send %s when transport is %s.' %
|
|
|
|
(raw_data, self.state))
|
|
|
|
return
|
|
|
|
r = raw_data
|
|
|
|
if isinstance(r, unicode):
|
|
|
|
r = r.encode('utf-8')
|
|
|
|
elif not isinstance(r, str):
|
|
|
|
r = ustr(r).encode('utf-8')
|
|
|
|
if now:
|
|
|
|
self.sendqueue.insert(0, r)
|
|
|
|
self._do_send()
|
|
|
|
else:
|
|
|
|
self.sendqueue.append(r)
|
|
|
|
self._plug_idle()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _plug_idle(self):
|
|
|
|
# readable if socket is connected or disconnecting
|
|
|
|
readable = self.state != DISCONNECTED
|
|
|
|
# writeable if sth to send
|
|
|
|
if self.sendqueue or self.sendbuff:
|
|
|
|
writable = True
|
|
|
|
else:
|
|
|
|
writable = False
|
|
|
|
print 'About to plug fd %d, W:%s, R:%s' % (self.get_fd(), writable, readable)
|
|
|
|
if self.writable != writable or self.readable != readable:
|
|
|
|
print 'Really plugging fd %d, W:%s, R:%s' % (self.get_fd(), writable, readable)
|
|
|
|
self.idlequeue.plug_idle(self, writable, readable)
|
|
|
|
else:
|
|
|
|
print 'Not plugging - is already plugged'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _do_send(self):
|
|
|
|
if not self.sendbuff:
|
|
|
|
if not self.sendqueue:
|
|
|
|
return None # nothing to send
|
|
|
|
self.sendbuff = self.sendqueue.pop(0)
|
|
|
|
try:
|
|
|
|
send_count = self._send(self.sendbuff)
|
|
|
|
if send_count:
|
|
|
|
sent_data = self.sendbuff[:send_count]
|
|
|
|
self.sendbuff = self.sendbuff[send_count:]
|
|
|
|
self._plug_idle()
|
|
|
|
self._raise_event(DATA_SENT, sent_data)
|
|
|
|
|
|
|
|
except socket.error, e:
|
|
|
|
log.error('_do_send:', exc_info=True)
|
|
|
|
traceback.print_exc()
|
|
|
|
self.disconnect()
|
|
|
|
|
|
|
|
def _raise_event(self, event_type, data):
|
|
|
|
if data and data.strip():
|
|
|
|
log.debug('raising event from transport: %s %s' % (event_type,data))
|
|
|
|
if hasattr(self._owner, 'Dispatcher'):
|
|
|
|
self._owner.Dispatcher.Event('', event_type, data)
|
|
|
|
|
|
|
|
def onreceive(self, recv_handler):
|
|
|
|
''' Sets the on_receive callback. Do not confuse it with
|
|
|
|
on_receive() method, which is the callback itself.'''
|
|
|
|
if not recv_handler:
|
|
|
|
if hasattr(self._owner, 'Dispatcher'):
|
|
|
|
self.on_receive = self._owner.Dispatcher.ProcessNonBlocking
|
|
|
|
else:
|
|
|
|
self.on_receive = None
|
|
|
|
return
|
|
|
|
self.on_receive = recv_handler
|
|
|
|
|
|
|
|
|
|
|
|
def _do_receive(self):
|
|
|
|
''' Reads all pending incoming data. Calls owner's disconnected() method if appropriate.'''
|
|
|
|
ERR_DISCONN = -2 # Misc error signifying that we got disconnected
|
|
|
|
received = None
|
|
|
|
errnum = 0
|
|
|
|
errstr = 'No Error Set'
|
|
|
|
|
|
|
|
try:
|
|
|
|
# get as many bites, as possible, but not more than RECV_BUFSIZE
|
|
|
|
received = self._recv(RECV_BUFSIZE)
|
|
|
|
except (socket.error, socket.herror, socket.gaierror), (errnum, errstr):
|
|
|
|
# save exception number and message to errnum, errstr
|
|
|
|
log.debug("_do_receive: got %s:" % received , exc_info=True)
|
|
|
|
|
|
|
|
if received == '':
|
|
|
|
errnum = ERR_DISCONN
|
|
|
|
errstr = "Connection closed unexpectedly"
|
|
|
|
|
|
|
|
if errnum in (ERR_DISCONN, errno.ECONNRESET, errno.ENOTCONN, errno.ESHUTDOWN):
|
|
|
|
# ECONNRESET - connection you are trying to access has been reset by the peer
|
|
|
|
# ENOTCONN - Transport endpoint is not connected
|
|
|
|
# ESHUTDOWN - shutdown(2) has been called on a socket to close down the
|
|
|
|
# sending end of the transmision, and then data was attempted to be sent
|
|
|
|
log.error("Connection to %s lost: %s %s" % ( self.server, errnum, errstr))
|
|
|
|
self.disconnect()
|
|
|
|
return
|
|
|
|
|
|
|
|
if received is None:
|
|
|
|
# in case of some other exception
|
|
|
|
# FIXME: is this needed??
|
|
|
|
if errnum != 0:
|
|
|
|
self.DEBUG(self.DBG, errstr, 'error')
|
|
|
|
log.error("CConnection to %s lost: %s %s" % (self.server, errnum, errstr))
|
|
|
|
if not errors_only and self.state in [CONNECTING, CONNECTED]:
|
|
|
|
self.pollend(retry=True)
|
|
|
|
return
|
|
|
|
received = ''
|
|
|
|
|
|
|
|
# we have received some bytes, stop the timeout!
|
|
|
|
self.renew_send_timeout()
|
|
|
|
if self.on_receive:
|
|
|
|
self._raise_event(DATA_RECEIVED, received)
|
|
|
|
self._on_receive(received)
|
|
|
|
else:
|
|
|
|
# This should never happen, so we need the debug
|
|
|
|
log.error('SOCKET Unhandled data received: %s' % received)
|
|
|
|
self.disconnect()
|
|
|
|
|
|
|
|
def _on_receive(self, data):
|
|
|
|
# Overriding this method allows modifying received data before it is passed
|
|
|
|
# to callback.
|
|
|
|
self.on_receive(data)
|
|
|
|
|
|
|
|
|
|
|
|
class NonBlockingHttpBOSH(NonBlockingTcp):
|
|
|
|
'''
|
|
|
|
Socket wrapper that makes HTTP message out of send data and peels-off
|
|
|
|
HTTP headers from incoming messages
|
|
|
|
'''
|
|
|
|
|
|
|
|
def __init__(self, bosh_uri, bosh_port, on_disconnect):
|
|
|
|
self.bosh_protocol, self.bosh_host, self.bosh_path = self.urisplit(bosh_uri)
|
|
|
|
if self.bosh_protocol is None:
|
|
|
|
self.bosh_protocol = 'http'
|
|
|
|
if self.bosh_path == '':
|
|
|
|
bosh_path = '/'
|
|
|
|
self.bosh_port = bosh_port
|
|
|
|
|
|
|
|
def send(self, raw_data, now=False):
|
|
|
|
|
|
|
|
NonBlockingTcp.send(
|
|
|
|
self,
|
|
|
|
self.build_http_message(raw_data),
|
|
|
|
now)
|
|
|
|
|
|
|
|
def _on_receive(self,data):
|
|
|
|
'''Preceeds pass of received data to Client class. Gets rid of HTTP headers
|
|
|
|
and checks them.'''
|
|
|
|
statusline, headers, httpbody = self.parse_http_message(data)
|
|
|
|
if statusline[1] != '200':
|
|
|
|
log.error('HTTP Error: %s %s' % (statusline[1], statusline[2]))
|
|
|
|
self.disconnect()
|
|
|
|
self.on_receive(httpbody)
|
|
|
|
|
|
|
|
|
|
|
|
def build_http_message(self, httpbody):
|
|
|
|
'''
|
|
|
|
Builds bosh http message with given body.
|
|
|
|
Values for headers and status line fields are taken from class variables.
|
|
|
|
)
|
|
|
|
'''
|
|
|
|
headers = ['POST %s HTTP/1.1' % self.bosh_path,
|
|
|
|
'Host: %s:%s' % (self.bosh_host, self.bosh_port),
|
|
|
|
'Content-Type: text/xml; charset=utf-8',
|
|
|
|
'Content-Length: %s' % len(str(httpbody)),
|
|
|
|
'\r\n']
|
|
|
|
headers = '\r\n'.join(headers)
|
|
|
|
return('%s%s\r\n' % (headers, httpbody))
|
|
|
|
|
|
|
|
def parse_http_message(self, message):
|
|
|
|
'''
|
|
|
|
splits http message to tuple (
|
|
|
|
statusline - list of e.g. ['HTTP/1.1', '200', 'OK'],
|
|
|
|
headers - dictionary of headers e.g. {'Content-Length': '604',
|
|
|
|
'Content-Type': 'text/xml; charset=utf-8'},
|
|
|
|
httpbody - string with http body
|
|
|
|
)
|
|
|
|
'''
|
|
|
|
message = message.replace('\r','')
|
|
|
|
(header, httpbody) = message.split('\n\n')
|
|
|
|
header = header.split('\n')
|
|
|
|
statusline = header[0].split(' ')
|
|
|
|
header = header[1:]
|
|
|
|
headers = {}
|
|
|
|
for dummy in header:
|
|
|
|
row = dummy.split(' ',1)
|
|
|
|
headers[row[0][:-1]] = row[1]
|
|
|
|
return (statusline, headers, httpbody)
|
|
|
|
|
|
|
|
|
2006-11-13 14:24:41 +00:00
|
|
|
USE_PYOPENSSL = False
|
|
|
|
|
|
|
|
try:
|
|
|
|
#raise ImportError("Manually disabled PyOpenSSL")
|
|
|
|
import OpenSSL.SSL
|
|
|
|
import OpenSSL.crypto
|
|
|
|
USE_PYOPENSSL = True
|
2006-12-20 20:40:08 +00:00
|
|
|
log.info("PyOpenSSL loaded")
|
2006-11-13 14:24:41 +00:00
|
|
|
except ImportError:
|
2006-12-29 01:46:46 +00:00
|
|
|
log.debug("Import of PyOpenSSL failed:", exc_info=True)
|
|
|
|
|
2006-11-13 14:24:41 +00:00
|
|
|
# FIXME: Remove these prints before release, replace with a warning dialog.
|
2006-12-20 20:40:08 +00:00
|
|
|
print >> sys.stderr, "=" * 79
|
|
|
|
print >> sys.stderr, "PyOpenSSL not found, falling back to Python builtin SSL objects (insecure)."
|
|
|
|
print >> sys.stderr, "=" * 79
|
2006-02-03 12:17:34 +00:00
|
|
|
|
2006-11-13 14:24:41 +00:00
|
|
|
|
|
|
|
def torf(cond, tv, fv):
|
|
|
|
if cond: return tv
|
|
|
|
return fv
|
|
|
|
|
2006-11-28 01:02:31 +00:00
|
|
|
def gattr(obj, attr, default=None):
|
|
|
|
try:
|
|
|
|
return getattr(obj, attr)
|
|
|
|
except:
|
|
|
|
return default
|
2006-11-13 22:17:16 +00:00
|
|
|
|
2006-11-28 01:02:31 +00:00
|
|
|
class SSLWrapper:
|
|
|
|
class Error(IOError):
|
2006-12-20 20:40:08 +00:00
|
|
|
def __init__(self, sock=None, exc=None, errno=None, strerror=None, peer=None):
|
|
|
|
self.parent = IOError
|
2006-11-28 01:02:31 +00:00
|
|
|
|
2006-12-13 20:15:22 +00:00
|
|
|
errno = errno or gattr(exc, 'errno')
|
2006-12-01 22:23:45 +00:00
|
|
|
strerror = strerror or gattr(exc, 'strerror') or gattr(exc, 'args')
|
|
|
|
if not isinstance(strerror, basestring): strerror = repr(strerror)
|
2006-11-28 01:02:31 +00:00
|
|
|
|
2006-12-20 20:40:08 +00:00
|
|
|
self.sock = sock
|
|
|
|
self.exc = exc
|
|
|
|
self.peer = peer
|
|
|
|
self.exc_name = None
|
|
|
|
self.exc_args = None
|
|
|
|
self.exc_str = None
|
|
|
|
self.exc_repr = None
|
|
|
|
|
|
|
|
if self.exc is not None:
|
|
|
|
self.exc_name = str(self.exc.__class__)
|
|
|
|
self.exc_args = gattr(self.exc, 'args')
|
|
|
|
self.exc_str = str(self.exc)
|
|
|
|
self.exc_repr = repr(self.exc)
|
2006-12-13 20:15:22 +00:00
|
|
|
if not errno:
|
|
|
|
try:
|
|
|
|
if isinstance(exc, OpenSSL.SSL.SysCallError):
|
2006-12-20 20:40:08 +00:00
|
|
|
if self.exc_args[0] > 0:
|
|
|
|
errno = self.exc_args[0]
|
|
|
|
strerror = self.exc_args[1]
|
|
|
|
except: pass
|
2006-12-13 20:15:22 +00:00
|
|
|
|
2006-12-20 20:40:08 +00:00
|
|
|
self.parent.__init__(self, errno, strerror)
|
2006-11-28 01:02:31 +00:00
|
|
|
|
2006-12-20 20:40:08 +00:00
|
|
|
if self.peer is None and sock is not None:
|
2006-11-28 01:02:31 +00:00
|
|
|
try:
|
2006-12-20 20:40:08 +00:00
|
|
|
ppeer = self.sock.getpeername()
|
2006-11-28 01:02:31 +00:00
|
|
|
if len(ppeer) == 2 and isinstance(ppeer[0], basestring) \
|
|
|
|
and isinstance(ppeer[1], int):
|
2006-12-20 20:40:08 +00:00
|
|
|
self.peer = ppeer
|
2006-11-28 01:02:31 +00:00
|
|
|
except: pass
|
|
|
|
|
2006-12-20 20:40:08 +00:00
|
|
|
def __str__(self):
|
|
|
|
s = str(self.__class__)
|
|
|
|
if self.peer: s += " for %s:%d" % self.peer
|
|
|
|
if self.errno is not None: s += ": [Errno: %d]" % self.errno
|
|
|
|
if self.strerror: s += " (%s)" % self.strerror
|
|
|
|
if self.exc_name:
|
|
|
|
s += ", Caused by %s" % self.exc_name
|
|
|
|
if self.exc_str:
|
|
|
|
if self.strerror: s += "(%s)" % self.exc_str
|
|
|
|
else: s += "(%s)" % str(self.exc_args)
|
2006-11-28 01:02:31 +00:00
|
|
|
return s
|
|
|
|
|
2006-12-20 20:40:08 +00:00
|
|
|
def __init__(self, sslobj, sock=None):
|
|
|
|
self.sslobj = sslobj
|
|
|
|
self.sock = sock
|
|
|
|
log.debug("%s.__init__ called with %s", self.__class__, sslobj)
|
2006-11-13 14:24:41 +00:00
|
|
|
|
2006-12-20 20:40:08 +00:00
|
|
|
def recv(self, data, flags=None):
|
|
|
|
""" Receive wrapper for SSL object
|
|
|
|
|
|
|
|
We can return None out of this function to signal that no data is
|
|
|
|
available right now. Better than an exception, which differs
|
|
|
|
depending on which SSL lib we're using. Unfortunately returning ''
|
|
|
|
can indicate that the socket has been closed, so to be sure, we avoid
|
|
|
|
this by returning None. """
|
2006-11-13 14:24:41 +00:00
|
|
|
|
|
|
|
raise NotImplementedException()
|
|
|
|
|
2007-03-15 12:53:29 +00:00
|
|
|
def send(self, data, flags=None, now = False):
|
2006-11-13 14:24:41 +00:00
|
|
|
raise NotImplementedException()
|
|
|
|
|
|
|
|
class PyOpenSSLWrapper(SSLWrapper):
|
|
|
|
'''Wrapper class for PyOpenSSL's recv() and send() methods'''
|
|
|
|
|
2006-12-20 20:40:08 +00:00
|
|
|
def __init__(self, *args):
|
|
|
|
self.parent = SSLWrapper
|
|
|
|
self.parent.__init__(self, *args)
|
2006-11-13 14:24:41 +00:00
|
|
|
|
2006-12-20 20:40:08 +00:00
|
|
|
def is_numtoolarge(self, e):
|
2006-11-13 14:24:41 +00:00
|
|
|
t = ('asn1 encoding routines', 'a2d_ASN1_OBJECT', 'first num too large')
|
|
|
|
return isinstance(e.args, (list, tuple)) and len(e.args) == 1 and \
|
|
|
|
isinstance(e.args[0], (list, tuple)) and len(e.args[0]) == 2 and \
|
|
|
|
e.args[0][0] == e.args[0][1] == t
|
|
|
|
|
2006-12-20 20:40:08 +00:00
|
|
|
def recv(self, bufsize, flags=None):
|
2006-11-13 14:24:41 +00:00
|
|
|
retval = None
|
|
|
|
try:
|
2006-12-20 20:40:08 +00:00
|
|
|
if flags is None: retval = self.sslobj.recv(bufsize)
|
|
|
|
else: retval = self.sslobj.recv(bufsize, flags)
|
2006-11-13 14:24:41 +00:00
|
|
|
except (OpenSSL.SSL.WantReadError, OpenSSL.SSL.WantWriteError), e:
|
2006-12-20 20:40:08 +00:00
|
|
|
pass
|
|
|
|
# log.debug("Recv: " + repr(e))
|
2006-11-14 06:39:08 +00:00
|
|
|
except OpenSSL.SSL.SysCallError, e:
|
2007-01-01 02:11:15 +00:00
|
|
|
log.debug("Recv: Got OpenSSL.SSL.SysCallError: " + repr(e), exc_info=True)
|
2006-12-20 20:40:08 +00:00
|
|
|
#traceback.print_exc()
|
|
|
|
raise SSLWrapper.Error(self.sock or self.sslobj, e)
|
2006-11-13 14:24:41 +00:00
|
|
|
except OpenSSL.SSL.Error, e:
|
2006-12-20 20:40:08 +00:00
|
|
|
if self.is_numtoolarge(e):
|
2006-11-28 01:02:31 +00:00
|
|
|
# warn, but ignore this exception
|
|
|
|
log.warning("Recv: OpenSSL: asn1enc: first num too large (ignored)")
|
2006-11-13 14:24:41 +00:00
|
|
|
else:
|
2007-01-01 02:11:15 +00:00
|
|
|
log.debug("Recv: Caught OpenSSL.SSL.Error:", exc_info=True)
|
2006-12-20 20:40:08 +00:00
|
|
|
#traceback.print_exc()
|
|
|
|
#print "Current Stack:"
|
|
|
|
#traceback.print_stack()
|
|
|
|
raise SSLWrapper.Error(self.sock or self.sslobj, e)
|
2006-11-13 14:24:41 +00:00
|
|
|
return retval
|
|
|
|
|
2007-03-15 12:53:29 +00:00
|
|
|
def send(self, data, flags=None, now = False):
|
2006-11-13 14:24:41 +00:00
|
|
|
try:
|
2006-12-20 20:40:08 +00:00
|
|
|
if flags is None: return self.sslobj.send(data)
|
|
|
|
else: return self.sslobj.send(data, flags)
|
2006-11-13 14:24:41 +00:00
|
|
|
except (OpenSSL.SSL.WantReadError, OpenSSL.SSL.WantWriteError), e:
|
2006-12-20 20:40:08 +00:00
|
|
|
#log.debug("Send: " + repr(e))
|
2006-11-28 01:02:31 +00:00
|
|
|
time.sleep(0.1) # prevent 100% CPU usage
|
|
|
|
except OpenSSL.SSL.SysCallError, e:
|
2006-12-20 20:40:08 +00:00
|
|
|
log.error("Send: Got OpenSSL.SSL.SysCallError: " + repr(e), exc_info=True)
|
|
|
|
#traceback.print_exc()
|
|
|
|
raise SSLWrapper.Error(self.sock or self.sslobj, e)
|
2006-11-28 01:02:31 +00:00
|
|
|
except OpenSSL.SSL.Error, e:
|
2006-12-20 20:40:08 +00:00
|
|
|
if self.is_numtoolarge(e):
|
2006-11-13 14:24:41 +00:00
|
|
|
# warn, but ignore this exception
|
2006-11-28 01:02:31 +00:00
|
|
|
log.warning("Send: OpenSSL: asn1enc: first num too large (ignored)")
|
2006-11-13 14:24:41 +00:00
|
|
|
else:
|
2006-12-20 20:40:08 +00:00
|
|
|
log.error("Send: Caught OpenSSL.SSL.Error:", exc_info=True)
|
|
|
|
#traceback.print_exc()
|
|
|
|
#print "Current Stack:"
|
|
|
|
#traceback.print_stack()
|
|
|
|
raise SSLWrapper.Error(self.sock or self.sslobj, e)
|
2006-11-13 14:24:41 +00:00
|
|
|
return 0
|
|
|
|
|
|
|
|
class StdlibSSLWrapper(SSLWrapper):
|
|
|
|
'''Wrapper class for Python's socket.ssl read() and write() methods'''
|
|
|
|
|
2006-12-20 20:40:08 +00:00
|
|
|
def __init__(self, *args):
|
|
|
|
self.parent = SSLWrapper
|
|
|
|
self.parent.__init__(self, *args)
|
2006-11-13 14:24:41 +00:00
|
|
|
|
2006-12-20 20:40:08 +00:00
|
|
|
def recv(self, bufsize, flags=None):
|
2006-11-13 14:24:41 +00:00
|
|
|
# we simply ignore flags since ssl object doesn't support it
|
|
|
|
try:
|
2006-12-20 20:40:08 +00:00
|
|
|
return self.sslobj.read(bufsize)
|
2006-11-13 14:24:41 +00:00
|
|
|
except socket.sslerror, e:
|
2006-12-21 18:08:17 +00:00
|
|
|
#log.debug("Recv: Caught socket.sslerror:", exc_info=True)
|
2006-12-20 20:40:08 +00:00
|
|
|
#traceback.print_exc()
|
2006-11-13 14:24:41 +00:00
|
|
|
if e.args[0] not in (socket.SSL_ERROR_WANT_READ, socket.SSL_ERROR_WANT_WRITE):
|
2006-12-20 20:40:08 +00:00
|
|
|
raise SSLWrapper.Error(self.sock or self.sslobj, e)
|
|
|
|
return None
|
2006-11-13 14:44:17 +00:00
|
|
|
|
2007-03-15 12:53:29 +00:00
|
|
|
def send(self, data, flags=None, now = False):
|
2006-11-13 14:24:41 +00:00
|
|
|
# we simply ignore flags since ssl object doesn't support it
|
|
|
|
try:
|
2006-12-20 20:40:08 +00:00
|
|
|
return self.sslobj.write(data)
|
2006-11-13 14:24:41 +00:00
|
|
|
except socket.sslerror, e:
|
2006-12-21 18:08:17 +00:00
|
|
|
#log.debug("Send: Caught socket.sslerror:", exc_info=True)
|
2006-12-20 20:40:08 +00:00
|
|
|
#traceback.print_exc()
|
2006-11-13 14:24:41 +00:00
|
|
|
if e.args[0] not in (socket.SSL_ERROR_WANT_READ, socket.SSL_ERROR_WANT_WRITE):
|
2006-12-20 20:40:08 +00:00
|
|
|
raise SSLWrapper.Error(self.sock or self.sslobj, e)
|
2006-11-13 14:24:41 +00:00
|
|
|
return 0
|
2006-02-03 12:17:34 +00:00
|
|
|
|
2006-03-15 19:39:27 +00:00
|
|
|
|
2006-02-03 12:17:34 +00:00
|
|
|
class NonBlockingTLS(PlugIn):
|
|
|
|
''' TLS connection used to encrypts already estabilished tcp connection.'''
|
2006-11-13 14:24:41 +00:00
|
|
|
|
|
|
|
# from ssl.h (partial extract)
|
|
|
|
ssl_h_bits = { "SSL_ST_CONNECT": 0x1000, "SSL_ST_ACCEPT": 0x2000,
|
|
|
|
"SSL_CB_LOOP": 0x01, "SSL_CB_EXIT": 0x02,
|
|
|
|
"SSL_CB_READ": 0x04, "SSL_CB_WRITE": 0x08,
|
|
|
|
"SSL_CB_ALERT": 0x4000,
|
|
|
|
"SSL_CB_HANDSHAKE_START": 0x10, "SSL_CB_HANDSHAKE_DONE": 0x20}
|
|
|
|
|
2008-06-30 00:02:32 +00:00
|
|
|
def PlugIn(self, owner, on_tls_success, on_tls_failure, now=0):
|
2006-02-03 12:17:34 +00:00
|
|
|
''' If the 'now' argument is true then starts using encryption immidiatedly.
|
|
|
|
If 'now' in false then starts encryption as soon as TLS feature is
|
|
|
|
declared by the server (if it were already declared - it is ok).
|
|
|
|
'''
|
|
|
|
if owner.__dict__.has_key('NonBlockingTLS'):
|
|
|
|
return # Already enabled.
|
|
|
|
PlugIn.PlugIn(self, owner)
|
|
|
|
DBG_LINE='NonBlockingTLS'
|
2008-06-30 00:02:32 +00:00
|
|
|
self.on_tls_success = on_tls_success
|
|
|
|
self.on_tls_faliure = on_tls_failure
|
2006-07-26 12:49:11 +00:00
|
|
|
if now:
|
|
|
|
try:
|
|
|
|
res = self._startSSL()
|
|
|
|
except Exception, e:
|
2006-12-20 20:40:08 +00:00
|
|
|
log.error("PlugIn: while trying _startSSL():", exc_info=True)
|
|
|
|
#traceback.print_exc()
|
2006-07-26 12:49:11 +00:00
|
|
|
self._owner.socket.pollend()
|
|
|
|
return
|
2008-06-30 00:02:32 +00:00
|
|
|
on_tls_success()
|
2006-02-03 12:17:34 +00:00
|
|
|
return res
|
|
|
|
if self._owner.Dispatcher.Stream.features:
|
|
|
|
try:
|
|
|
|
self.FeaturesHandler(self._owner.Dispatcher, self._owner.Dispatcher.Stream.features)
|
|
|
|
except NodeProcessed:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
self._owner.RegisterHandlerOnce('features',self.FeaturesHandler, xmlns=NS_STREAMS)
|
|
|
|
self.starttls = None
|
|
|
|
|
|
|
|
def plugout(self,now=0):
|
|
|
|
''' Unregisters TLS handler's from owner's dispatcher. Take note that encription
|
|
|
|
can not be stopped once started. You can only break the connection and start over.'''
|
2006-03-10 20:36:15 +00:00
|
|
|
# if dispatcher is not plugged we cannot (un)register handlers
|
|
|
|
if self._owner.__dict__.has_key('Dispatcher'):
|
|
|
|
self._owner.UnregisterHandler('features', self.FeaturesHandler,xmlns=NS_STREAMS)
|
2006-05-03 16:12:10 +00:00
|
|
|
self._owner.Dispatcher.PlugOut()
|
|
|
|
self._owner = None
|
2006-02-03 12:17:34 +00:00
|
|
|
|
|
|
|
def FeaturesHandler(self, conn, feats):
|
|
|
|
''' Used to analyse server <features/> tag for TLS support.
|
|
|
|
If TLS is supported starts the encryption negotiation. Used internally '''
|
|
|
|
if not feats.getTag('starttls', namespace=NS_TLS):
|
|
|
|
self.DEBUG("TLS unsupported by remote server.", 'warn')
|
2008-06-30 00:02:32 +00:00
|
|
|
self.on_tls_failure("TLS unsupported by remote server.")
|
2006-02-03 12:17:34 +00:00
|
|
|
return
|
|
|
|
self.DEBUG("TLS supported by remote server. Requesting TLS start.", 'ok')
|
|
|
|
self._owner.RegisterHandlerOnce('proceed', self.StartTLSHandler, xmlns=NS_TLS)
|
|
|
|
self._owner.RegisterHandlerOnce('failure', self.StartTLSHandler, xmlns=NS_TLS)
|
|
|
|
self._owner.send('<starttls xmlns="%s"/>' % NS_TLS)
|
|
|
|
raise NodeProcessed
|
|
|
|
|
2006-12-20 20:40:08 +00:00
|
|
|
def _dumpX509(self, cert, stream=sys.stderr):
|
|
|
|
print >> stream, "Digest (SHA-1):", cert.digest("sha1")
|
|
|
|
print >> stream, "Digest (MD5):", cert.digest("md5")
|
|
|
|
print >> stream, "Serial #:", cert.get_serial_number()
|
|
|
|
print >> stream, "Version:", cert.get_version()
|
|
|
|
print >> stream, "Expired:", torf(cert.has_expired(), "Yes", "No")
|
|
|
|
print >> stream, "Subject:"
|
|
|
|
self._dumpX509Name(cert.get_subject(), stream)
|
|
|
|
print >> stream, "Issuer:"
|
|
|
|
self._dumpX509Name(cert.get_issuer(), stream)
|
|
|
|
self._dumpPKey(cert.get_pubkey(), stream)
|
|
|
|
|
|
|
|
def _dumpX509Name(self, name, stream=sys.stderr):
|
|
|
|
print >> stream, "X509Name:", str(name)
|
|
|
|
|
|
|
|
def _dumpPKey(self, pkey, stream=sys.stderr):
|
2006-11-13 14:24:41 +00:00
|
|
|
typedict = {OpenSSL.crypto.TYPE_RSA: "RSA", OpenSSL.crypto.TYPE_DSA: "DSA"}
|
2006-12-20 20:40:08 +00:00
|
|
|
print >> stream, "PKey bits:", pkey.bits()
|
|
|
|
print >> stream, "PKey type: %s (%d)" % (typedict.get(pkey.type(), "Unknown"), pkey.type())
|
2006-11-13 14:24:41 +00:00
|
|
|
|
2006-02-03 12:17:34 +00:00
|
|
|
def _startSSL(self):
|
|
|
|
''' Immidiatedly switch socket to TLS mode. Used internally.'''
|
2006-12-20 20:40:08 +00:00
|
|
|
log.debug("_startSSL called")
|
2006-11-13 14:24:41 +00:00
|
|
|
if USE_PYOPENSSL: return self._startSSL_pyOpenSSL()
|
|
|
|
return self._startSSL_stdlib()
|
|
|
|
|
|
|
|
def _startSSL_pyOpenSSL(self):
|
2006-12-20 20:40:08 +00:00
|
|
|
#log.debug("_startSSL_pyOpenSSL called, thread id: %s", str(thread.get_ident()))
|
|
|
|
log.debug("_startSSL_pyOpenSSL called")
|
2006-11-13 14:24:41 +00:00
|
|
|
tcpsock = self._owner.Connection
|
|
|
|
# FIXME: should method be configurable?
|
2006-12-21 18:08:17 +00:00
|
|
|
tcpsock._sslContext = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)
|
|
|
|
#tcpsock._sslContext = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
|
2007-06-03 14:15:50 +00:00
|
|
|
tcpsock.ssl_errnum = 0
|
|
|
|
tcpsock._sslContext.set_verify(OpenSSL.SSL.VERIFY_PEER, self._ssl_verify_callback)
|
2008-06-12 23:54:46 +00:00
|
|
|
cacerts = os.path.join('../data', 'other', 'cacerts.pem')
|
2007-06-03 14:15:50 +00:00
|
|
|
try:
|
2007-12-04 09:38:39 +00:00
|
|
|
tcpsock._sslContext.load_verify_locations(cacerts)
|
2007-06-03 14:15:50 +00:00
|
|
|
except:
|
2007-12-04 09:38:39 +00:00
|
|
|
log.warning('Unable to load SSL certificats from file %s' % \
|
|
|
|
os.path.abspath(cacerts))
|
|
|
|
# load users certs
|
2008-06-12 23:54:46 +00:00
|
|
|
if os.path.isfile('%s/.gajim/cacerts.pem' % os.environ['HOME']):
|
2007-12-04 09:38:39 +00:00
|
|
|
store = tcpsock._sslContext.get_cert_store()
|
2008-06-12 23:54:46 +00:00
|
|
|
f = open('%s/.gajim/cacerts.pem' % os.environ['HOME'])
|
2007-12-04 09:38:39 +00:00
|
|
|
lines = f.readlines()
|
|
|
|
i = 0
|
|
|
|
begin = -1
|
|
|
|
for line in lines:
|
|
|
|
if 'BEGIN CERTIFICATE' in line:
|
|
|
|
begin = i
|
|
|
|
elif 'END CERTIFICATE' in line and begin > -1:
|
|
|
|
cert = ''.join(lines[begin:i+2])
|
|
|
|
try:
|
|
|
|
X509cert = OpenSSL.crypto.load_certificate(
|
|
|
|
OpenSSL.crypto.FILETYPE_PEM, cert)
|
|
|
|
store.add_cert(X509cert)
|
2007-12-15 08:31:42 +00:00
|
|
|
except OpenSSL.crypto.Error, exception_obj:
|
|
|
|
log.warning('Unable to load a certificate from file %s: %s' %\
|
2008-06-12 23:54:46 +00:00
|
|
|
('%s/.gajim/cacerts.pem' % os.environ['HOME'], exception_obj.args[0][0][2]))
|
2007-12-04 09:38:39 +00:00
|
|
|
except:
|
2007-12-15 08:31:42 +00:00
|
|
|
log.warning(
|
|
|
|
'Unknown error while loading certificate from file %s' % \
|
2008-06-12 23:54:46 +00:00
|
|
|
'%s/.gajim/cacerts.pem' % os.environ['HOME'])
|
2007-12-04 09:38:39 +00:00
|
|
|
begin = -1
|
|
|
|
i += 1
|
2006-11-13 14:24:41 +00:00
|
|
|
tcpsock._sslObj = OpenSSL.SSL.Connection(tcpsock._sslContext, tcpsock._sock)
|
|
|
|
tcpsock._sslObj.set_connect_state() # set to client mode
|
|
|
|
|
|
|
|
wrapper = PyOpenSSLWrapper(tcpsock._sslObj)
|
|
|
|
tcpsock._recv = wrapper.recv
|
|
|
|
tcpsock._send = wrapper.send
|
|
|
|
|
2006-12-20 20:40:08 +00:00
|
|
|
log.debug("Initiating handshake...")
|
2007-03-03 20:13:43 +00:00
|
|
|
# FIXME: Figure out why _connect_success is called before the
|
|
|
|
# SSL handshake is completed in STARTTLS mode. See #2838.
|
|
|
|
tcpsock._sslObj.setblocking(True)
|
2006-11-13 14:24:41 +00:00
|
|
|
try:
|
|
|
|
self.starttls='in progress'
|
|
|
|
tcpsock._sslObj.do_handshake()
|
2008-01-23 19:04:35 +00:00
|
|
|
except:
|
2008-06-30 00:02:32 +00:00
|
|
|
log.error('Error while TLS handshake: ', exc_info=True)
|
|
|
|
self.on_tls_failure('Error while TLS Handshake')
|
|
|
|
return
|
2007-03-03 20:13:43 +00:00
|
|
|
tcpsock._sslObj.setblocking(False)
|
|
|
|
log.debug("Synchronous handshake completed")
|
|
|
|
#log.debug("Async handshake started...")
|
2006-11-13 14:24:41 +00:00
|
|
|
|
2006-11-18 15:09:40 +00:00
|
|
|
# fake it, for now
|
|
|
|
self.starttls='success'
|
|
|
|
|
2006-11-13 14:24:41 +00:00
|
|
|
def _startSSL_stdlib(self):
|
2006-12-20 20:40:08 +00:00
|
|
|
log.debug("_startSSL_stdlib called")
|
2006-02-03 12:17:34 +00:00
|
|
|
tcpsock=self._owner.Connection
|
|
|
|
tcpsock._sock.setblocking(True)
|
2006-04-15 09:43:15 +00:00
|
|
|
tcpsock._sslObj = socket.ssl(tcpsock._sock, None, None)
|
2006-02-03 12:17:34 +00:00
|
|
|
tcpsock._sock.setblocking(False)
|
|
|
|
tcpsock._sslIssuer = tcpsock._sslObj.issuer()
|
|
|
|
tcpsock._sslServer = tcpsock._sslObj.server()
|
2006-11-28 01:02:31 +00:00
|
|
|
wrapper = StdlibSSLWrapper(tcpsock._sslObj, tcpsock._sock)
|
2006-11-13 14:24:41 +00:00
|
|
|
tcpsock._recv = wrapper.recv
|
|
|
|
tcpsock._send = wrapper.send
|
2006-02-03 12:17:34 +00:00
|
|
|
self.starttls='success'
|
|
|
|
|
2007-06-03 14:15:50 +00:00
|
|
|
def _ssl_verify_callback(self, sslconn, cert, errnum, depth, ok):
|
2006-11-13 14:24:41 +00:00
|
|
|
# Exceptions can't propagate up through this callback, so print them here.
|
2006-12-20 20:40:08 +00:00
|
|
|
try:
|
2007-12-04 09:38:39 +00:00
|
|
|
self._owner.Connection.ssl_fingerprint_sha1 = cert.digest('sha1')
|
2007-06-03 14:15:50 +00:00
|
|
|
if errnum == 0:
|
|
|
|
return True
|
|
|
|
self._owner.Connection.ssl_errnum = errnum
|
2007-12-04 09:38:39 +00:00
|
|
|
self._owner.Connection.ssl_cert_pem = OpenSSL.crypto.dump_certificate(
|
|
|
|
OpenSSL.crypto.FILETYPE_PEM, cert)
|
2007-06-03 14:15:50 +00:00
|
|
|
return True
|
2006-12-20 20:40:08 +00:00
|
|
|
except:
|
|
|
|
log.error("Exception caught in _ssl_info_callback:", exc_info=True)
|
|
|
|
traceback.print_exc() # Make sure something is printed, even if log is disabled.
|
2006-11-13 14:24:41 +00:00
|
|
|
|
2006-02-03 12:17:34 +00:00
|
|
|
def StartTLSHandler(self, conn, starttls):
|
|
|
|
''' Handle server reply if TLS is allowed to process. Behaves accordingly.
|
|
|
|
Used internally.'''
|
|
|
|
if starttls.getNamespace() <> NS_TLS:
|
2008-06-30 00:02:32 +00:00
|
|
|
self.on_tls_failure('Unknown namespace: %s' % starttls.getNamespace())
|
2006-02-03 12:17:34 +00:00
|
|
|
return
|
|
|
|
self.starttls = starttls.getName()
|
|
|
|
if self.starttls == 'failure':
|
2008-06-30 00:02:32 +00:00
|
|
|
self.on_tls_failure('TLS <failure> received: %s' % self.starttls)
|
2006-02-03 12:17:34 +00:00
|
|
|
return
|
|
|
|
self.DEBUG('Got starttls proceed response. Switching to TLS/SSL...','ok')
|
2006-07-26 12:49:11 +00:00
|
|
|
try:
|
|
|
|
self._startSSL()
|
|
|
|
except Exception, e:
|
2006-12-20 20:40:08 +00:00
|
|
|
log.error("StartTLSHandler:", exc_info=True)
|
2008-06-30 00:02:32 +00:00
|
|
|
self.on_tls_failure('in StartTLSHandler')
|
2006-12-20 20:40:08 +00:00
|
|
|
#traceback.print_exc()
|
2006-07-26 12:49:11 +00:00
|
|
|
return
|
2006-02-03 12:17:34 +00:00
|
|
|
self._owner.Dispatcher.PlugOut()
|
2008-06-30 00:02:32 +00:00
|
|
|
self.on_tls_success()
|
|
|
|
#dispatcher_nb.Dispatcher().PlugIn(self._owner)
|
2006-02-03 12:17:34 +00:00
|
|
|
|
2008-06-30 00:02:32 +00:00
|
|
|
class NBProxySocket(NonBlockingTcp):
|
|
|
|
'''
|
|
|
|
Interface for proxy socket wrappers - when tunnneling XMPP over proxies,
|
|
|
|
some connecting process usually has to be done before opening stream.
|
|
|
|
'''
|
|
|
|
def __init__(self, on_disconnect, xmpp_server, proxy_creds=(None,None)):
|
|
|
|
self.proxy_user, self.proxy_pass = proxy_creds
|
|
|
|
self.xmpp_server = xmpp_server
|
|
|
|
NonBlockingTcp.__init__(self, on_disconnect)
|
|
|
|
|
2006-02-03 12:17:34 +00:00
|
|
|
|
2008-06-30 00:02:32 +00:00
|
|
|
def connect(self, conn_5tuple, on_connect, on_connect_failure):
|
|
|
|
'''
|
|
|
|
connect method is extended by proxy credentials and xmpp server hostname
|
|
|
|
and port because those are needed for
|
|
|
|
The idea is to insert Proxy-specific mechanism after TCP connect and
|
|
|
|
before XMPP stream opening (which is done from client).
|
|
|
|
'''
|
|
|
|
|
|
|
|
self.after_proxy_connect = on_connect
|
2006-02-03 12:17:34 +00:00
|
|
|
|
2008-06-30 00:02:32 +00:00
|
|
|
NonBlockingTcp.connect(self,
|
|
|
|
conn_5tuple=conn_5tuple,
|
|
|
|
on_connect =self._on_tcp_connect,
|
|
|
|
on_connect_failure =on_connect_failure)
|
|
|
|
|
|
|
|
def _on_tcp_connect(self):
|
|
|
|
pass
|
2006-02-03 12:17:34 +00:00
|
|
|
|
|
|
|
|
2008-06-30 00:02:32 +00:00
|
|
|
|
|
|
|
class NBHTTPProxySocket(NBProxySocket):
|
|
|
|
''' This class can be used instead of NonBlockingTcp
|
|
|
|
HTTP (CONNECT) proxy connection class. Allows to use HTTP proxies like squid with
|
|
|
|
(optionally) simple authentication (using login and password).
|
|
|
|
'''
|
|
|
|
|
|
|
|
def _on_tcp_connect(self):
|
2006-02-03 12:17:34 +00:00
|
|
|
''' Starts connection. Connects to proxy, supplies login and password to it
|
|
|
|
(if were specified while creating instance). Instructs proxy to make
|
|
|
|
connection to the target server. Returns non-empty sting on success. '''
|
2008-06-30 00:02:32 +00:00
|
|
|
log.debug('Proxy server contacted, performing authentification')
|
|
|
|
connector = ['CONNECT %s:%s HTTP/1.0' % self.xmpp_server,
|
2006-02-03 12:17:34 +00:00
|
|
|
'Proxy-Connection: Keep-Alive',
|
|
|
|
'Pragma: no-cache',
|
2008-06-30 00:02:32 +00:00
|
|
|
'Host: %s:%s' % self.xmpp_server,
|
2006-02-03 12:17:34 +00:00
|
|
|
'User-Agent: HTTPPROXYsocket/v0.1']
|
2008-06-30 00:02:32 +00:00
|
|
|
if self.proxy_user and self.proxy_pass:
|
|
|
|
credentials = '%s:%s' % (self.proxy_user, self.proxy_pass)
|
2006-02-03 12:17:34 +00:00
|
|
|
credentials = base64.encodestring(credentials).strip()
|
|
|
|
connector.append('Proxy-Authorization: Basic '+credentials)
|
|
|
|
connector.append('\r\n')
|
|
|
|
self.onreceive(self._on_headers_sent)
|
|
|
|
self.send('\r\n'.join(connector))
|
|
|
|
|
|
|
|
def _on_headers_sent(self, reply):
|
|
|
|
if reply is None:
|
|
|
|
return
|
|
|
|
self.reply = reply.replace('\r', '')
|
|
|
|
try:
|
|
|
|
proto, code, desc = reply.split('\n')[0].split(' ', 2)
|
|
|
|
except:
|
2006-12-20 20:40:08 +00:00
|
|
|
log.error("_on_headers_sent:", exc_info=True)
|
|
|
|
#traceback.print_exc()
|
2008-06-30 00:02:32 +00:00
|
|
|
self._on_connect_failure('Invalid proxy reply')
|
2007-02-07 22:05:52 +00:00
|
|
|
return
|
2006-02-03 12:17:34 +00:00
|
|
|
if code <> '200':
|
2008-06-30 00:02:32 +00:00
|
|
|
log.error('Invalid proxy reply: %s %s %s' % (proto, code, desc))
|
|
|
|
self._on_connect_failure('Invalid proxy reply')
|
2006-02-03 12:17:34 +00:00
|
|
|
return
|
2007-01-24 21:50:59 +00:00
|
|
|
if len(reply) != 2:
|
|
|
|
pass
|
2006-02-03 12:17:34 +00:00
|
|
|
self.onreceive(self._on_proxy_auth)
|
|
|
|
|
|
|
|
def _on_proxy_auth(self, reply):
|
|
|
|
if self.reply.find('\n\n') == -1:
|
|
|
|
if reply is None:
|
2008-06-30 00:02:32 +00:00
|
|
|
self._on_connect_failure('Proxy authentification failed')
|
2007-02-07 22:05:52 +00:00
|
|
|
return
|
2006-02-03 12:17:34 +00:00
|
|
|
if reply.find('\n\n') == -1:
|
|
|
|
self.reply += reply.replace('\r', '')
|
2008-06-30 00:02:32 +00:00
|
|
|
self._on_connect_failure('Proxy authentification failed')
|
2006-02-03 12:17:34 +00:00
|
|
|
return
|
2008-06-30 00:02:32 +00:00
|
|
|
log.debug('Authentification successfull. Jabber server contacted.')
|
|
|
|
self._on_connect(self)
|
2006-02-03 12:17:34 +00:00
|
|
|
|
2007-01-24 21:50:59 +00:00
|
|
|
|
2008-06-30 00:02:32 +00:00
|
|
|
class NBSOCKS5ProxySocket(NBProxySocket):
|
2007-01-24 21:50:59 +00:00
|
|
|
'''SOCKS5 proxy connection class. Uses TCPsocket as the base class
|
|
|
|
redefines only connect method. Allows to use SOCKS5 proxies with
|
|
|
|
(optionally) simple authentication (only USERNAME/PASSWORD auth).
|
|
|
|
'''
|
2008-06-30 00:02:32 +00:00
|
|
|
# TODO: replace DEBUG with ordinrar logging, replace on_proxy_failure() with
|
|
|
|
# _on_connect_failure, at the end call _on_connect()
|
2007-01-24 21:50:59 +00:00
|
|
|
|
|
|
|
def _on_tcp_connect(self):
|
|
|
|
self.DEBUG('Proxy server contacted, performing authentification', 'start')
|
|
|
|
if self.proxy.has_key('user') and self.proxy.has_key('password'):
|
|
|
|
to_send = '\x05\x02\x00\x02'
|
|
|
|
else:
|
|
|
|
to_send = '\x05\x01\x00'
|
|
|
|
self.onreceive(self._on_greeting_sent)
|
|
|
|
self.send(to_send)
|
|
|
|
|
|
|
|
def _on_greeting_sent(self, reply):
|
|
|
|
if reply is None:
|
|
|
|
return
|
|
|
|
if len(reply) != 2:
|
2007-02-07 22:05:52 +00:00
|
|
|
self.on_proxy_failure('Invalid proxy reply')
|
|
|
|
return
|
2007-01-24 21:50:59 +00:00
|
|
|
if reply[0] != '\x05':
|
|
|
|
self.DEBUG('Invalid proxy reply', 'error')
|
|
|
|
self._owner.disconnected()
|
2007-02-07 22:05:52 +00:00
|
|
|
self.on_proxy_failure('Invalid proxy reply')
|
2007-01-24 21:50:59 +00:00
|
|
|
return
|
|
|
|
if reply[1] == '\x00':
|
|
|
|
return self._on_proxy_auth('\x01\x00')
|
|
|
|
elif reply[1] == '\x02':
|
2007-02-07 22:05:52 +00:00
|
|
|
to_send = '\x01' + chr(len(self.proxy['user'])) + self.proxy['user'] +\
|
|
|
|
chr(len(self.proxy['password'])) + self.proxy['password']
|
2007-01-24 21:50:59 +00:00
|
|
|
self.onreceive(self._on_proxy_auth)
|
2007-02-07 22:05:52 +00:00
|
|
|
self.send(to_send)
|
2007-01-24 21:50:59 +00:00
|
|
|
else:
|
|
|
|
if reply[1] == '\xff':
|
|
|
|
self.DEBUG('Authentification to proxy impossible: no acceptable '
|
|
|
|
'auth method', 'error')
|
2007-02-07 22:05:52 +00:00
|
|
|
self._owner.disconnected()
|
|
|
|
self.on_proxy_failure('Authentification to proxy impossible: no '
|
|
|
|
'acceptable authentification method')
|
|
|
|
return
|
|
|
|
self.DEBUG('Invalid proxy reply', 'error')
|
2007-01-24 21:50:59 +00:00
|
|
|
self._owner.disconnected()
|
2007-02-07 22:05:52 +00:00
|
|
|
self.on_proxy_failure('Invalid proxy reply')
|
2007-01-24 21:50:59 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
def _on_proxy_auth(self, reply):
|
|
|
|
if reply is None:
|
|
|
|
return
|
|
|
|
if len(reply) != 2:
|
|
|
|
self.DEBUG('Invalid proxy reply', 'error')
|
|
|
|
self._owner.disconnected()
|
2007-02-07 22:05:52 +00:00
|
|
|
self.on_proxy_failure('Invalid proxy reply')
|
|
|
|
return
|
2007-01-24 21:50:59 +00:00
|
|
|
if reply[0] != '\x01':
|
|
|
|
self.DEBUG('Invalid proxy reply', 'error')
|
|
|
|
self._owner.disconnected()
|
2007-02-07 22:05:52 +00:00
|
|
|
self.on_proxy_failure('Invalid proxy reply')
|
|
|
|
return
|
2007-01-24 21:50:59 +00:00
|
|
|
if reply[1] != '\x00':
|
|
|
|
self.DEBUG('Authentification to proxy failed', 'error')
|
|
|
|
self._owner.disconnected()
|
2007-02-07 22:05:52 +00:00
|
|
|
self.on_proxy_failure('Authentification to proxy failed')
|
2007-01-24 21:50:59 +00:00
|
|
|
return
|
|
|
|
self.DEBUG('Authentification successfull. Jabber server contacted.','ok')
|
|
|
|
# Request connection
|
|
|
|
req = "\x05\x01\x00"
|
|
|
|
# If the given destination address is an IP address, we'll
|
|
|
|
# use the IPv4 address request even if remote resolving was specified.
|
|
|
|
try:
|
|
|
|
self.ipaddr = socket.inet_aton(self.server[0])
|
2007-12-27 22:28:40 +00:00
|
|
|
req = req + "\x01" + self.ipaddr
|
2007-01-24 21:50:59 +00:00
|
|
|
except socket.error:
|
|
|
|
# Well it's not an IP number, so it's probably a DNS name.
|
|
|
|
# if self.__proxy[3]==True:
|
|
|
|
# Resolve remotely
|
|
|
|
self.ipaddr = None
|
|
|
|
req = req + "\x03" + chr(len(self.server[0])) + self.server[0]
|
|
|
|
# else:
|
|
|
|
# # Resolve locally
|
|
|
|
# self.ipaddr = socket.inet_aton(socket.gethostbyname(self.server[0]))
|
|
|
|
# req = req + "\x01" + ipaddr
|
|
|
|
req = req + struct.pack(">H",self.server[1])
|
|
|
|
self.onreceive(self._on_req_sent)
|
|
|
|
self.send(req)
|
|
|
|
|
|
|
|
def _on_req_sent(self, reply):
|
|
|
|
if reply is None:
|
|
|
|
return
|
|
|
|
if len(reply) < 10:
|
|
|
|
self.DEBUG('Invalid proxy reply', 'error')
|
|
|
|
self._owner.disconnected()
|
2007-02-07 22:05:52 +00:00
|
|
|
self.on_proxy_failure('Invalid proxy reply')
|
|
|
|
return
|
2007-01-24 21:50:59 +00:00
|
|
|
if reply[0] != '\x05':
|
|
|
|
self.DEBUG('Invalid proxy reply', 'error')
|
|
|
|
self._owner.disconnected()
|
2007-02-07 22:05:52 +00:00
|
|
|
self.on_proxy_failure('Invalid proxy reply')
|
|
|
|
return
|
2007-01-24 21:50:59 +00:00
|
|
|
if reply[1] != "\x00":
|
|
|
|
# Connection failed
|
|
|
|
self._owner.disconnected()
|
|
|
|
if ord(reply[1])<9:
|
|
|
|
errors = ['general SOCKS server failure',
|
|
|
|
'connection not allowed by ruleset',
|
|
|
|
'Network unreachable',
|
|
|
|
'Host unreachable',
|
|
|
|
'Connection refused',
|
|
|
|
'TTL expired',
|
|
|
|
'Command not supported',
|
|
|
|
'Address type not supported'
|
|
|
|
]
|
|
|
|
txt = errors[ord(reply[1])-1]
|
|
|
|
else:
|
|
|
|
txt = 'Invalid proxy reply'
|
|
|
|
self.DEBUG(txt, 'error')
|
2007-02-07 22:05:52 +00:00
|
|
|
self.on_proxy_failure(txt)
|
2007-01-24 21:50:59 +00:00
|
|
|
return
|
|
|
|
# Get the bound address/port
|
|
|
|
elif reply[3] == "\x01":
|
|
|
|
begin, end = 3, 7
|
|
|
|
elif reply[3] == "\x03":
|
|
|
|
begin, end = 4, 4 + reply[4]
|
|
|
|
else:
|
|
|
|
self.DEBUG('Invalid proxy reply', 'error')
|
|
|
|
self._owner.disconnected()
|
2007-02-07 22:05:52 +00:00
|
|
|
self.on_proxy_failure('Invalid proxy reply')
|
2007-01-24 21:50:59 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
if self.on_connect_proxy:
|
|
|
|
self.on_connect_proxy()
|
|
|
|
|
|
|
|
def DEBUG(self, text, severity):
|
|
|
|
''' Overwrites DEBUG tag to allow debug output be presented as "CONNECTproxy".'''
|
|
|
|
return self._owner.DEBUG(DBG_CONNECT_PROXY, text, severity)
|