DaveT OK.. here's a script to scrape the site. it's python, it uses beautifulsoup4 so.. you will need to install that if you don't have it installed.
pip install beautifulsoup4 requests
obviously include your username and password.
import requests
from bs4 import BeautifulSoup
LOGIN_URL = 'https://www.karaoke-version.com/my/login.html'
BASE_URL = 'https://www.karaoke-version.com/my/download.html?page='
TARGET_PREFIX = '/custombackingtrack/'
# Your login credentials
payload = {
'frm_login': 'YOUR_USERNAME',
'frm_password': 'YOUR_PASSWORD'
}
def get_links_from_page(session, url):
response = session.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
# Only keep links that start with the desired prefix
links = [a['href'] for a in soup.find_all('a', href=True) if a['href'].startswith(TARGET_PREFIX)]
return links
def get_number_of_pages(session, url):
response = session.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
# Get all elements with the class 'mr-1'
pagination_elements = soup.select('.mr-1')
return len(pagination_elements)
all_links = []
with requests.Session() as session:
# Login
post = session.post(LOGIN_URL, data=payload)
# Check if login was successful
if post.status_code == 200:
total_pages = get_number_of_pages(session, BASE_URL + '1') # Assuming you start at page 1
for page_num in range(1, total_pages + 1):
current_page_url = BASE_URL + str(page_num)
all_links.extend(get_links_from_page(session, current_page_url))
print(all_links)