summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorroot <root@wlan-5-141.nay.redhat.com>2011-06-10 18:44:27 +0800
committerroot <root@wlan-5-141.nay.redhat.com>2011-06-10 18:44:27 +0800
commitee92f962bffac5db5aa143c2055ed593ea3579e4 (patch)
tree90afd4f8083d3553186c2d089916d484ecb3f41d
parent7c36bd336f37e8b7ca834432abf9d53844177d59 (diff)
downloadrepo-ee92f962bffac5db5aa143c2055ed593ea3579e4.tar.gz
repo-ee92f962bffac5db5aa143c2055ed593ea3579e4.tar.xz
repo-ee92f962bffac5db5aa143c2055ed593ea3579e4.zip
use the contents with html tags to generate the xml file
-rwxr-xr-xwiki_to_nitrate_xml.py88
1 files changed, 37 insertions, 51 deletions
diff --git a/wiki_to_nitrate_xml.py b/wiki_to_nitrate_xml.py
index b473eab..c1706c5 100755
--- a/wiki_to_nitrate_xml.py
+++ b/wiki_to_nitrate_xml.py
@@ -7,6 +7,7 @@ import re
import time, datetime
import xml.etree.ElementTree as ET
import xml.dom.minidom
+from BeautifulSoup import BeautifulSoup
try:
from simplemediawiki import MediaWiki
@@ -64,20 +65,12 @@ def parse_args():
def parse(wiki, page):
'''Parse a page and return content'''
-
- # Build query arguments and call wiki
- query = dict(action='query',
- prop='revisions',
- titles=page,
- rvprop='content')
+ query = dict(action='parse',
+ page=page)
if opts.debug: print query
response = wiki.call(query)
- for page in response.get('query',{}).get('pages',{}).values():
- revs = page.get('revisions',[])
- for t in revs:
- return t.get('*','')
- return ''
-
+ if opts.debug: print response
+ return response.get('parse',{})
def list_categorymembers(wiki, cat_page, limit=5):
'''Return a list of pages belonging to category page'''
@@ -108,64 +101,57 @@ def list_categorymembers(wiki, cat_page, limit=5):
return members
-def extract(s, titles):
- w_dsc = s.find('|description=')
- w_setup = s.find('|setup=')
- w_action = s.find('|actions=')
- w_result = s.find('|results=')
- w_resultend = s.find('}}')
- start = [w_resultend+1, w_resultend+1]
+def extract_html(string, titles):
+ '''extract wiki contents in html format and cache to table'''
+ s_text = string.get('text',{}).get('*','')
+ s_tag = string.get('categories',{})
tag = []
- w_tagstart = []
- w_tagend = []
- i = 0
- while True:
- #saved '[[' and ']]' to w_tagstart and w_tagend separately.
- w_tagstart.append(s.find('[[', start[0]))
- w_tagend.append(s.find(']]', start[1]))
- if w_tagstart[i] == -1:
- break
- #saved category names to tag.
- tag.append(s[(w_tagstart[i]+len('[[Category:')):w_tagend[i]])
- start[0] = w_tagstart[i] + 1
- start[1] = w_tagend[i] + 1
- i += 1
+ for t in s_tag:
+ tag.append(t.get('*',''))
+ soup = BeautifulSoup(''.join(s_text))
table = {}
table['title'] = titles
- if w_setup == -1:
- table['description'] = s[(w_dsc+len('|description=')):w_action]
- table['setup'] = ''
+ if soup.find(id='Description') == None:
+ table['description'] = ''
+ else:
+ table['description'] = soup.find(id='Description').findNext('p')
+ if soup.find(id='Setup') == None:
+ table['setup'] = ''
else:
- table['description'] = s[(w_dsc+len('|description=')):w_setup]
- table['setup'] = s[(w_setup+len('|setup=')):w_action]
- table['actions'] = s[(w_action+len('|actions=')):w_result]
- table['results'] = s[(w_result+len('|results=')):w_resultend]
- table['tag'] = tag
+ table['setup'] = soup.find(id='Setup').findNext('ol')
+ table['actions'] = soup.find(id='How_to_test').findNext('ol')
+ table['results'] = soup.find(id='Expected_Results').findNext('ol')
+ table['tag'] = tag
return table
def nitratexml(table):
- #generate Nitrate format xml for wiki test case
+ '''generate Nitrate format xml from wiki test case'''
root = ET.Element("testopia")
root.attrib["version"] = "1.1"
head = ET.SubElement(root, "testcase")
+ head.attrib["author"] = "rhe@redhat.com"
+ head.attrib["priority"] = "P1"
+ head.attrib["automated"] = ""
+ head.attrib["status"] = "PROPOSED"
title = ET.SubElement(head, "summary")
title.text = table['title']
title = ET.SubElement(head, "categoryname")
+ title.text = "default"
title = ET.SubElement(head, "defaulttester")
title = ET.SubElement(head, "notes")
- title.text = table['description']
+ title.text = str(table['description'])
title = ET.SubElement(head, "testplan_reference")
title.attrib["type"] = "xml_description"
title.text = "Fedora 15 Install Test Plan"
title = ET.SubElement(head, "action")
- title.text = table['actions']
+ title.text = str(table['actions'])
title = ET.SubElement(head, "expectedresults")
- title.text = table['results']
+ title.text = str(table['results'])
title = ET.SubElement(head, "setup")
- title.text = table['setup']
+ title.text = str(table['setup'])
title = ET.SubElement(head, "breakdown")
title = ET.SubElement(head, "tag")
- title.text = table['tag'][0]
+ title.text = str(table['tag'][0])
tree = ET.ElementTree(root)
tree.write("output.xml", encoding="UTF-8", xml_declaration=True)
@@ -189,13 +175,13 @@ if __name__ == "__main__":
print "No data found for '%s'" % cat_page
elif action == 'migration':
- s = parse(wiki, opts.title)
- table = extract(s, opts.title)
+ string = parse(wiki, opts.title)
+ table = extract_html(string, opts.title)
if opts.debug:
for key in table.keys():
print key, '\t', table[key]
- pretty_xml_print = nitratexml(table)
- print pretty_xml_print
+ pretty_xml_print = nitratexml(table)
+ print pretty_xml_print, '\n', '\"The xml file named output.xml is generated at ./\"'
else:
print "Unknown action requested '%s'" % action