#!/usr/bin/env python
# RSS to MetaWeblog bridge
# Copyright (C) 2002 Phillip Pearson
# A little script to poll an RSS feed and post all the new items to a
# weblog using the MetaWeblog API.
# Built for the group-forming mailing list, so you'll want to change
# some of the language below if you have a different application.
# To use, save your login name in rpcLogin.txt ('echo foobar >
# rpcLogin.txt') and your password in rpcPassword.txt ('echo password
# > rpcPassword.txt'). Then change the URLs below.
# You'll also need a copy of Mark Pilgrim's RSS parser. You can fetch
# it from:
# http://diveintomark.org/projects/misc/rssparser.py.txt
# (Rename that to rssparser.py).
# Note that you need to have a blog already set up. I used Movable
# Type, with comments etc turned off. Blogger unfortunately won't do
# as it doesn't support posting titles. Radio UserLand should work
# fine, although you need a Windows box and will have to leave Radio
# running all the time.
# the RSS feed:
rssUrl = 'http://www.aquameta.com/gf/list.rss'
# the weblog data:
blogUrl = 'http://dev.myelin.co.nz/gf/'
rpcUrl = 'http://dev.myelin.co.nz/gf/mt-xmlrpc.cgi'
rpcLogin = open( 'rpcLogin.txt' ).readline().rstrip()
rpcPassword = open( 'rpcPassword.txt' ).readline().rstrip()
# file for caching recent posts
cacheFn = 'cache.dat'
# Thanks to Mark Pilgrim for the RSS parser, and the Python team for a
# very nice language.
__LICENSE__ = 'GPL'
import rssparser
import pickle
#import urllib; print urllib.urlopen( rssUrl ).read()
# Download and parse the RSS
rss = rssparser.parse( rssUrl )
channel, items = [ rss[a] for a in ( 'channel', 'items' ) ]
#print channel
# Read cache
try:
pastItems = pickle.load( open( cacheFn ) )
except IOError:
pastItems = {}
import xmlrpclib
s = xmlrpclib.Server( rpcUrl )
# Find out blog ID
blogs = s.blogger.getUsersBlogs( '', rpcLogin, rpcPassword )
for blog in blogs:
#print blog
if blog['url'] == blogUrl:
rpcBlogId = blog['blogid']
items.reverse()
for item in items:
if not pastItems.has_key( item['link'] ):
print "new item",item
if item.has_key( 'description' ):
descr = item['description'].replace( '
', '' )
else:
descr = ''
descr += '\n(full text of this message)\n' % ( item['link'], )
item['description'] = descr
print s.metaWeblog.newPost( rpcBlogId, rpcLogin, rpcPassword, item, xmlrpclib.True )
# Write cache
pastItems = dict( [ ( i['link'], i ) for i in items ] )
pickle.dump( pastItems, open( cacheFn, 'w' ) )