import cfscrape from bs4 import BeautifulSoup import csv location = '' name = '' path = '' # input(location) # input(name) # URL = 'avito.ru/' + location + '?q=' + name pages = 1 def src(pages): URL = 'https://www.avito.ru/omsk/bytovaya_elektronika' + '?' 'p=' + str(pages) return URL a = [] divs = [] def get_html(URL): scrape = cfscrape.CloudflareScraper() http_html = scrape.get(URL) http_html = http_html.content return http_html def parse(http_html): soup = BeautifulSoup(http_html, 'html.parser') blocks = soup.find_all('div', class_='items-items-38oUm') max_page = soup.find_all('span', class_='pagination-item-1WyVp') for mxp in max_page: all_pages = mxp.text for numbers in all_pages: try: page_num = int(all_pages) max_num_page = 0 if page_num > max_num_page: max_num_page = page_num except: continue for x in blocks: blocker = x.find_all('div', class_='iva-item-root-G3n7v photo-slider-slider-3tEix iva-item-list-2_PpT ' 'iva-item-redesign-1OBTh items-item-1Hoqq items-listItem-11orH ' 'js-catalog-item-enum') for z in blocker: a.append({ 'name': z.find('h3').text, 'price': z.find('span', class_='price-text-1HrJ_ text-text-1PdBw text-size-s-1PUdo').get_text(), 'wallet': z.find('span', class_='price-currency-LOpM3'), 'address': 'avito.ru' + z.find('a')['href'], }) return max_num_page def result(pages): count = int(input('Enter count of pages = ')) tsx = 0 if count <= parse(get_html(src(pages))): while tsx < count: res = parse(get_html(src(pages))) print(a) print('Количество элементов на странице = ', (len(a))) print(src(pages)) print('Page number:', pages) print('Max page = ', parse(get_html(src(pages)))) a.clear() pages = pages + 1 tsx = tsx + 1 elif count > parse(get_html(src(pages))): print('max pages = ', parse(get_html(src(pages)))) return 'ok' print(result(pages))