[core] improve pull request artifacts comment (#3705)

This commit is contained in:
User123698745 2023-09-24 21:13:01 +02:00 committed by GitHub
parent 857e908929
commit 09f3c1532a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

156
.github/prtester.py vendored
View File

@ -1,33 +1,65 @@
import argparse
import requests import requests
import itertools
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from datetime import datetime from datetime import datetime
from typing import Iterable
import os.path import os.path
# This script is specifically written to be used in automation for https://github.com/RSS-Bridge/rss-bridge # This script is specifically written to be used in automation for https://github.com/RSS-Bridge/rss-bridge
# #
# This will scrape the whitelisted bridges in the current state (port 3000) and the PR state (port 3001) of # This will scrape the whitelisted bridges in the current state (port 3000) and the PR state (port 3001) of
# RSS-Bridge, generate a feed for each of the bridges and save the output as html files. # RSS-Bridge, generate a feed for each of the bridges and save the output as html files.
# It also replaces the default static CSS link with a hardcoded link to @em92's public instance, so viewing # It also add a <base> tag with the url of em's public instance, so viewing
# the HTML file locally will actually work as designed. # the HTML file locally will actually work as designed.
def testBridges(bridges,status): class Instance:
for bridge in bridges: name = ''
if bridge.get('data-ref'): # Some div entries are empty, this ignores those url = ''
bridgeid = bridge.get('id')
def main(instances: Iterable[Instance], with_upload: bool, comment_title: str):
start_date = datetime.now()
table_rows = []
for instance in instances:
page = requests.get(instance.url) # Use python requests to grab the rss-bridge main page
soup = BeautifulSoup(page.content, "html.parser") # use bs4 to turn the page into soup
bridge_cards = soup.select('.bridge-card') # get a soup-formatted list of all bridges on the rss-bridge page
table_rows += testBridges(instance, bridge_cards, with_upload) # run the main scraping code with the list of bridges and the info if this is for the current version or the pr version
with open(file=os.getcwd() + '/comment.txt', mode='w+', encoding='utf-8') as file:
table_rows_value = '\n'.join(sorted(table_rows))
file.write(f'''
## {comment_title}
| Bridge | Context | Status |
| - | - | - |
{table_rows_value}
*last change: {start_date.strftime("%A %Y-%m-%d %H:%M:%S")}*
'''.strip())
def testBridges(instance: Instance, bridge_cards: Iterable, with_upload: bool) -> Iterable:
instance_suffix = ''
if instance.name:
instance_suffix = f' ({instance.name})'
table_rows = []
for bridge_card in bridge_cards:
bridgeid = bridge_card.get('id')
bridgeid = bridgeid.split('-')[1] # this extracts a readable bridge name from the bridge metadata bridgeid = bridgeid.split('-')[1] # this extracts a readable bridge name from the bridge metadata
print(bridgeid + "\n") print(f'{bridgeid}{instance_suffix}\n')
bridgestring = '/?action=display&bridge=' + bridgeid + '&format=Html' bridgestring = '/?action=display&bridge=' + bridgeid + '&format=Html'
forms = bridge.find_all("form") bridge_name = bridgeid.replace('Bridge', '')
formid = 1 context_forms = bridge_card.find_all("form")
for form in forms: form_number = 1
for context_form in context_forms:
# a bridge can have multiple contexts, named 'forms' in html # a bridge can have multiple contexts, named 'forms' in html
# this code will produce a fully working formstring that should create a working feed when called # this code will produce a fully working formstring that should create a working feed when called
# this will create an example feed for every single context, to test them all # this will create an example feed for every single context, to test them all
formstring = '' formstring = ''
errormessages = [] error_messages = []
parameters = form.find_all("input") context_name = '*untitled*'
lists = form.find_all("select") context_name_element = context_form.find_previous_sibling('h5')
if context_name_element and context_name_element.text.strip() != '':
context_name = context_name_element.text
parameters = context_form.find_all("input")
lists = context_form.find_all("select")
# this for/if mess cycles through all available input parameters, checks if it required, then pulls # this for/if mess cycles through all available input parameters, checks if it required, then pulls
# the default or examplevalue and then combines it all together into the formstring # the default or examplevalue and then combines it all together into the formstring
# if an example or default value is missing for a required attribute, it will throw an error # if an example or default value is missing for a required attribute, it will throw an error
@ -40,7 +72,8 @@ def testBridges(bridges,status):
if parameter.has_attr('required'): if parameter.has_attr('required'):
if parameter.get('placeholder') == '': if parameter.get('placeholder') == '':
if parameter.get('value') == '': if parameter.get('value') == '':
errormessages.append(parameter.get('name')) name_value = parameter.get('name')
error_messages.append(f'Missing example or default value for parameter "{name_value}"')
else: else:
formstring = formstring + '&' + parameter.get('name') + '=' + parameter.get('value') formstring = formstring + '&' + parameter.get('name') + '=' + parameter.get('value')
else: else:
@ -68,46 +101,59 @@ def testBridges(bridges,status):
selectionvalue = selectionentry.get('value') selectionvalue = selectionentry.get('value')
break break
formstring = formstring + '&' + listname + '=' + selectionvalue formstring = formstring + '&' + listname + '=' + selectionvalue
if not errormessages: termpad_url = 'about:blank'
# if all example/default values are present, form the full request string, run the request, replace the static css if error_messages:
# file with the url of em's public instance and then upload it to termpad.com, a pastebin-like-site. status = '<br>'.join(map(lambda m: f'❌ `{m}`', error_messages))
r = requests.get(URL + bridgestring + formstring)
pagetext = r.text.replace('static/style.css','https://rss-bridge.org/bridge01/static/style.css')
pagetext = pagetext.encode("utf_8")
termpad = requests.post(url="https://termpad.com/", data=pagetext)
termpadurl = termpad.text
termpadurl = termpadurl.replace('termpad.com/','termpad.com/raw/')
termpadurl = termpadurl.replace('\n','')
with open(os.getcwd() + '/comment.txt', 'a+') as file:
file.write("\n")
file.write("| [`" + bridgeid + '-' + status + '-context' + str(formid) + "`](" + termpadurl + ") | " + date_time + " |")
else: else:
# if there are errors (which means that a required value has no example or default value), log out which error appeared # if all example/default values are present, form the full request string, run the request, add a <base> tag with
termpad = requests.post(url="https://termpad.com/", data=str(errormessages)) # the url of em's public instance to the response text (so that relative paths work, e.g. to the static css file) and
termpadurl = termpad.text # then upload it to termpad.com, a pastebin-like-site.
termpadurl = termpadurl.replace('termpad.com/','termpad.com/raw/') response = requests.get(instance.url + bridgestring + formstring)
termpadurl = termpadurl.replace('\n','') page_text = response.text.replace('<head>','<head><base href="https://rss-bridge.org/bridge01/" target="_blank">')
with open(os.getcwd() + '/comment.txt', 'a+') as file: page_text = page_text.encode("utf_8")
file.write("\n") soup = BeautifulSoup(page_text, "html.parser")
file.write("| [`" + bridgeid + '-' + status + '-context' + str(formid) + "`](" + termpadurl + ") | " + date_time + " |") status_messages = list(map(lambda e: f'⚠️ `{e.text.strip().splitlines()[0]}`', soup.find_all('pre')))
formid += 1 if response.status_code != 200:
status_messages = [f'❌ `HTTP status {response.status_code} {response.reason}`'] + status_messages
else:
feed_items = soup.select('.feeditem')
feed_items_length = len(feed_items)
if feed_items_length <= 0:
status_messages += [f'⚠️ `The feed has no items`']
elif feed_items_length == 1 and len(soup.select('.error')) > 0:
status_messages = [f'❌ `{feed_items[0].text.strip().splitlines()[0]}`'] + status_messages
status = '<br>'.join(status_messages)
if status.strip() == '':
status = '✔️'
if with_upload:
termpad = requests.post(url="https://termpad.com/", data=page_text)
termpad_url = termpad.text.strip()
termpad_url = termpad_url.replace('termpad.com/','termpad.com/raw/')
table_rows.append(f'| {bridge_name} | [{form_number} {context_name}{instance_suffix}]({termpad_url}) | {status} |')
form_number += 1
return table_rows
gitstatus = ["current", "pr"] if __name__ == '__main__':
now = datetime.now() parser = argparse.ArgumentParser()
date_time = now.strftime("%Y-%m-%d, %H:%M:%S") parser.add_argument('-i', '--instances', nargs='+')
parser.add_argument('-nu', '--no-upload', action='store_true')
with open(os.getcwd() + '/comment.txt', 'w+') as file: parser.add_argument('-t', '--comment-title', default='Pull request artifacts')
file.write(''' ## Pull request artifacts args = parser.parse_args()
| file | last change | instances = []
| ---- | ------ |''') if args.instances:
for instance_arg in args.instances:
for status in gitstatus: # run this twice, once for the current version, once for the PR version instance_arg_parts = instance_arg.split('::')
if status == "current": instance = Instance()
port = "3000" # both ports are defined in the corresponding workflow .yml file instance.name = instance_arg_parts[1] if len(instance_arg_parts) >= 2 else ''
elif status == "pr": instance.url = instance_arg_parts[0]
port = "3001" instances.append(instance)
URL = "http://localhost:" + port else:
page = requests.get(URL) # Use python requests to grab the rss-bridge main page instance = Instance()
soup = BeautifulSoup(page.content, "html.parser") # use bs4 to turn the page into soup instance.name = 'current'
bridges = soup.find_all("section") # get a soup-formatted list of all bridges on the rss-bridge page instance.url = 'http://localhost:3000'
testBridges(bridges,status) # run the main scraping code with the list of bridges and the info if this is for the current version or the pr version instances.append(instance)
instance = Instance()
instance.name = 'pr'
instance.url = 'http://localhost:3001'
instances.append(instance)
main(instances=instances, with_upload=not args.no_upload, comment_title=args.comment_title);