scrarls/scrarls.py

80 lines
2.8 KiB
Python

#!/usr/bin/env python
import re
import sys
from datetime import datetime
import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from sqlalchemy import create_engine, Column, Integer, String, Text, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class TvShow(Base):
__tablename__ = 'tvshows'
id = Column(Integer, primary_key=True)
rlsbb_id = Column(Integer, nullable=False)
article_title = Column(String(255), nullable=False)
title = Column(String(255), nullable=False)
date = Column(DateTime, nullable=False)
summary = Column(Text, nullable=True)
image_url = Column(String(length=255), nullable=True)
download_url = Column(String(length=255), nullable=True)
class TvShowItem(scrapy.Item):
post_id: scrapy.Field = scrapy.Field()
post_title: scrapy.Field = scrapy.Field()
title:scrapy.Field = scrapy.Field()
date: scrapy.Field = scrapy.Field()
summary: scrapy.Field = scrapy.Field()
image_url: scrapy.Field = scrapy.Field()
download_url: scrapy.Field = scrapy.Field()
class TvShow(CrawlSpider):
name: str = "rlsb_tvshow"
allowed_domains: list[str] = ["rlsbb.ru"]
start_urls: list[str] = ["https://rlsbb.ru/category/tv-shows/"]
custom_settings = {
'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36',
'AUTOTHROTTLE_ENABLED': True,
'DOWNLOAD_DELAY': 10,
}
rules: list[Rule] = [
Rule(LinkExtractor(allow=r"/tv-shows/page/"), callback="parse", follow=True)
]
def parse(self, response):
for article in response.css("article"):
item = TvShowItem()
item['article_id'] = article.attrib['id'],
item['article_title'] = article.css('h1.entry-title > a::text').get(),
item['title'] = article.css('.entry-summary > p:nth-child(4) > strong::text').get(),
item['date'] = self.parse_date(article.css('.entry-meta-header-before::text').getall()[1].strip()),
item['summary'] = article.xpath('.//div[@class="entry-summary"]/node()').extract(),
item['image_url'] = article.css('.entry-summary > p > img::attr(src)').get(),
item['download_url'] = article.css('.entry-summary > p > a[href ^= "https://rapidgator"]::attr(href)').get()
yield item
def parse_date(self, formatted_date: str):
formatted_date = re.sub(r'(\d)(st|nd|rd|th)', r'\1', formatted_date)
return datetime.strptime(formatted_date, "Posted on %B %d, %Y at %H:%M in")
def main():
process = CrawlerProcess()
_ = process.crawl(TvShow)
process.start()
return 0
if __name__ == "__main__":
sys.exit(main())