#!/usr/bin/python import os import sys import optparse import re import time, datetime import xml.etree.ElementTree as ET import xml.dom.minidom from lxml import html from BeautifulSoup import BeautifulSoup try: from simplemediawiki import MediaWiki except ImportError: print "Unable to import simplemediawiki. Is python-simpemediawiki installed?" sys.exit(1) def parse_args(): '''Set up the option parser''' parser = optparse.OptionParser(usage="%prog [options] [options]") parser.add_option('-v', '--verbose', action='store_true', default=False, help='Enable more verbose output') parser.add_option('-d', '--debug', action='store_true', default=False, help='Enable debugging output') parser.add_option('--url', action='store', default='https://fedoraproject.org/w/api.php', help='API URL') # general optgrp = optparse.OptionGroup(parser, "General options") optgrp.add_option('-l', '--limit', action='store', default=5, type="int", help='Limit recursion depth (%default)') parser.add_option_group(optgrp) # categorymembers(test cases) migration optgrp = optparse.OptionGroup(parser, "Options for 'categorymembers' command:") optgrp.add_option('-c', '--category', dest="categories", default=[], action="append", help='Wiki category name to covert all its members(test cases) to Nitrate xml file') parser.add_option_group(optgrp) # single test case migration optgrp = optparse.OptionGroup(parser, "Options for 'migration':") optgrp.add_option('-t', '--title', default='', action='store', help='Page title to convert this test case to Nitrate xml file') parser.add_option_group(optgrp) # page links(test cases) migration optgrp = optparse.OptionGroup(parser, "Options for 'pagelinks':") optgrp.add_option('-p', '--page', default='', action='store', help='Page name to convert all its links(test cases) to Nitrate xml file') parser.add_option_group(optgrp) (opts, args) = parser.parse_args() if len(args) == 0: parser.error("No action specified") else: action = args[0] # Validate inputs if action == 'categorymembers': if len(opts.categories) == 0: parser.error("Must specify at least one category (-c|--category)") elif action == 'migration': if opts.title == '': parser.error("Must specify a page (-t|--title)") elif action == 'pagelinks': if opts.page == '': parser.error("Must specify a page (-p|--page)") return (opts, action) def parse(wiki, page): '''Parse a page and return content''' query = dict(action='parse', page=page) if opts.debug: print query response = wiki.call(query) if opts.debug: print response return response.get('parse',{}) def return_links(wiki, page, limit=200): '''Return all links with 'QA:' namespace from the given page''' query = dict(action='query', titles=page, prop='links', pllimit=limit, plnamespace=104) if opts.debug: print query response = wiki.call(query) for page in response.get('query',{}).get('pages',{}): links = [entry.get('title') for entry in response.get('query',{}).get('pages',{}).get(page,{}).get('links',{}) if entry.has_key('title')] return links def list_categorymembers(wiki, cat_page, limit=5): '''Return a list of pages belonging to category page''' # Add 'Category:' prefix if not given if not cat_page.startswith("Category:"): cat_page = "Category:%s" % cat_page # Build query arguments and call wiki query = dict(action='query', list='categorymembers', cmtitle=cat_page) if opts.debug: print query response = wiki.call(query) members = [entry.get('title') for entry in response.get('query',{}).get('categorymembers',{}) if entry.has_key('title')] # Determine whether we need to recurse idx = 0 while True: if idx >= len(members) or limit <= 0: break # Recurse? if members[idx].startswith('Category:') and limit > 0: members.extend(list_categorymembers(wiki, members[idx], limit-1)) members.remove(members[idx]) # remove Category from list else: idx += 1 return members def repl(link): '''add full url addresses to the links which only have paths.''' if link.startswith('/'): link = 'http://fedoraproject.org' + link return link def extract_to_dict(string, titles): '''extract wiki contents in html format and cache to table''' s_tag = string.get('categories',{}) tag = [] for t in s_tag: tag.append(t.get('*','')) s_text = string.get('text',{}).get('*','') s_text_polished = html.rewrite_links(s_text, repl) soup = BeautifulSoup(''.join(s_text_polished)) table = {} table['title'] = titles if soup.find(id='Description') == None: table['description'] = '' else: table['description'] = soup.find(id='Description').findNext('p') if soup.find(id='Setup') == None: table['setup'] = '' else: table['setup'] = soup.find(id='Setup').findNext('ol') table['actions'] = soup.find(id='How_to_test').findNext('ol') table['results'] = soup.find(id='Expected_Results').findNext('ol') table['tag'] = tag return table def nitrate_xml(table): '''generate Nitrate format xml from wiki test case''' head = ET.Element("testcase") head.attrib["author"] = "rhe@redhat.com" head.attrib["priority"] = "P1" head.attrib["automated"] = "" head.attrib["status"] = "PROPOSED" title = ET.SubElement(head, "summary") title.text = table['title'] title = ET.SubElement(head, "categoryname") title.text = "--default--" title = ET.SubElement(head, "defaulttester") title = ET.SubElement(head, "notes") title.text = str(table['description']) title = ET.SubElement(head, "testplan_reference") title.attrib["type"] = "xml_description" title.text = "Fedora 15 Install Test Plan" title = ET.SubElement(head, "action") title.text = str(table['actions']) title = ET.SubElement(head, "expectedresults") title.text = str(table['results']) title = ET.SubElement(head, "setup") title.text = str(table['setup']) title = ET.SubElement(head, "breakdown") for tag in table['tag']: title = ET.SubElement(head, "tag") title.text = str(tag) return head def write_to_file(xmlcases): '''write the xml contents to a file''' root = ET.Element("testopia") root.attrib["version"] = "1.1" for case in xmlcases: root.append(case) string = ET.tostring(root) xml_dom = xml.dom.minidom.parseString(string) pretty_xml = xml_dom.toprettyxml() f = open('output.xml', 'w') f.write(pretty_xml) f.close() if __name__ == "__main__": (opts,action) = parse_args() # Create mediawiki handle wiki = MediaWiki(opts.url) if action == 'categorymembers': for cat_page in opts.categories: pages = list_categorymembers(wiki, cat_page, opts.limit) if pages: print "\n".join(pages) else: print "No data found for '%s'" % cat_page sys.exit(1) elif action == 'pagelinks': pages = return_links(wiki, opts.page) elif action == 'migration': pages = [] pages.append(opts.title) if opts.debug: for key in table.keys(): print key, '\t', table[key] else: print "Unknown action requested '%s'" % action sys.exit(1) pagesxml = [] for pagetitle in pages: if pagetitle.lower().find('testcase') == -1: print "The page '%s' is not a test case" % pagetitle continue pagestring = parse(wiki, pagetitle) pagetable = extract_to_dict(pagestring, pagetitle) pagesxml.append(nitrate_xml(pagetable)) write_to_file(pagesxml)