Compare commits
5 Commits
ada99be262
...
8adc0623bd
| Author | SHA1 | Date | |
|---|---|---|---|
| 8adc0623bd | |||
| 0db07013ce | |||
| eaf854c3eb | |||
| 359b3271e4 | |||
| 15e1f837c8 |
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
/*.db
|
||||||
116
scrarls.py
116
scrarls.py
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
import scrapy
|
import scrapy
|
||||||
@ -9,24 +10,69 @@ from scrapy.crawler import CrawlerProcess
|
|||||||
from scrapy.spiders import CrawlSpider, Rule
|
from scrapy.spiders import CrawlSpider, Rule
|
||||||
from scrapy.linkextractors import LinkExtractor
|
from scrapy.linkextractors import LinkExtractor
|
||||||
|
|
||||||
from sqlalchemy import create_engine, Column, Integer, String, Text, DateTime
|
from sqlalchemy import create_engine, func, select, Engine, Integer, String, Text, DateTime
|
||||||
from sqlalchemy.ext.declarative import declarative_base
|
from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column
|
||||||
from sqlalchemy.orm import sessionmaker
|
from sqlalchemy.orm import sessionmaker
|
||||||
|
|
||||||
Base = declarative_base()
|
|
||||||
|
class Base(DeclarativeBase):
|
||||||
|
"""Base déclarative pour SQLAlchemy 2.0."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class TvShow(Base):
|
class TvShowDB(Base):
|
||||||
__tablename__ = 'tvshows'
|
"""Modèle pour le stockage des épisodes (SQLAlchemy 2.0)."""
|
||||||
id = Column(Integer, primary_key=True)
|
|
||||||
rlsbb_id = Column(Integer, nullable=False)
|
|
||||||
article_title = Column(String(255), nullable=False)
|
|
||||||
title = Column(String(255), nullable=False)
|
|
||||||
date = Column(DateTime, nullable=False)
|
|
||||||
summary = Column(Text, nullable=True)
|
|
||||||
image_url = Column(String(length=255), nullable=True)
|
|
||||||
download_url = Column(String(length=255), nullable=True)
|
|
||||||
|
|
||||||
|
__tablename__: str = "tvshows"
|
||||||
|
|
||||||
|
id: Mapped[int] = mapped_column(
|
||||||
|
Integer,
|
||||||
|
primary_key=True,
|
||||||
|
autoincrement=True
|
||||||
|
)
|
||||||
|
post_id: Mapped[str] = mapped_column(
|
||||||
|
String(length=255),
|
||||||
|
nullable=False,
|
||||||
|
unique=True,
|
||||||
|
index=True
|
||||||
|
)
|
||||||
|
post_title: Mapped[str] = mapped_column(
|
||||||
|
String(255),
|
||||||
|
nullable=False
|
||||||
|
)
|
||||||
|
title: Mapped[str] = mapped_column(
|
||||||
|
String(255),
|
||||||
|
nullable=False,
|
||||||
|
index=True
|
||||||
|
)
|
||||||
|
date: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime,
|
||||||
|
nullable=False,
|
||||||
|
index=True
|
||||||
|
)
|
||||||
|
summary: Mapped[str | None] = mapped_column(
|
||||||
|
Text,
|
||||||
|
nullable=True
|
||||||
|
)
|
||||||
|
image_url: Mapped[str | None] = mapped_column(
|
||||||
|
String(255),
|
||||||
|
nullable=True
|
||||||
|
)
|
||||||
|
download_url: Mapped[str | None] = mapped_column(
|
||||||
|
String(255),
|
||||||
|
nullable=True
|
||||||
|
)
|
||||||
|
created_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime,
|
||||||
|
server_default=func.datetime('now'),
|
||||||
|
nullable=False
|
||||||
|
)
|
||||||
|
updated_at: Mapped[datetime] = mapped_column(
|
||||||
|
DateTime,
|
||||||
|
server_default=func.datetime('now'),
|
||||||
|
onupdate=func.datetime('now'),
|
||||||
|
nullable=False
|
||||||
|
)
|
||||||
|
|
||||||
class TvShowItem(scrapy.Item):
|
class TvShowItem(scrapy.Item):
|
||||||
post_id: scrapy.Field = scrapy.Field()
|
post_id: scrapy.Field = scrapy.Field()
|
||||||
@ -38,6 +84,33 @@ class TvShowItem(scrapy.Item):
|
|||||||
download_url: scrapy.Field = scrapy.Field()
|
download_url: scrapy.Field = scrapy.Field()
|
||||||
|
|
||||||
|
|
||||||
|
class SQLAlchemyPipeline:
|
||||||
|
def __init__(self):
|
||||||
|
self.engine: Engine = create_engine('sqlite:///tvshows.db', echo=True)
|
||||||
|
Base.metadata.create_all(self.engine)
|
||||||
|
self.Session = sessionmaker(bind=self.engine)
|
||||||
|
|
||||||
|
def process_item(self, item, spider):
|
||||||
|
session = self.Session()
|
||||||
|
try:
|
||||||
|
stmt = select(TvShowDB).where(TvShowDB.post_id == item["post_id"])
|
||||||
|
show = session.scalars(stmt).first()
|
||||||
|
print(f"{show=}")
|
||||||
|
if not show:
|
||||||
|
show = TvShowDB(**item)
|
||||||
|
session.add(show)
|
||||||
|
else:
|
||||||
|
for key, value in item.items():
|
||||||
|
setattr(show, key, value)
|
||||||
|
session.commit()
|
||||||
|
except Exception as e:
|
||||||
|
session.rollback()
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
session.close()
|
||||||
|
return item
|
||||||
|
|
||||||
|
|
||||||
class TvShow(CrawlSpider):
|
class TvShow(CrawlSpider):
|
||||||
name: str = "rlsb_tvshow"
|
name: str = "rlsb_tvshow"
|
||||||
allowed_domains: list[str] = ["rlsbb.ru"]
|
allowed_domains: list[str] = ["rlsbb.ru"]
|
||||||
@ -46,6 +119,9 @@ class TvShow(CrawlSpider):
|
|||||||
'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36',
|
'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36',
|
||||||
'AUTOTHROTTLE_ENABLED': True,
|
'AUTOTHROTTLE_ENABLED': True,
|
||||||
'DOWNLOAD_DELAY': 10,
|
'DOWNLOAD_DELAY': 10,
|
||||||
|
'ITEM_PIPELINES': {
|
||||||
|
'__main__.SQLAlchemyPipeline': 300,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
rules: list[Rule] = [
|
rules: list[Rule] = [
|
||||||
Rule(LinkExtractor(allow=r"/tv-shows/page/"), callback="parse", follow=True)
|
Rule(LinkExtractor(allow=r"/tv-shows/page/"), callback="parse", follow=True)
|
||||||
@ -54,13 +130,13 @@ class TvShow(CrawlSpider):
|
|||||||
def parse(self, response):
|
def parse(self, response):
|
||||||
for article in response.css("article"):
|
for article in response.css("article"):
|
||||||
item = TvShowItem()
|
item = TvShowItem()
|
||||||
item['article_id'] = article.attrib['id'],
|
item['post_id'] = article.attrib['id']
|
||||||
item['article_title'] = article.css('h1.entry-title > a::text').get(),
|
item['post_title'] = article.css('h1.entry-title > a::text').get()
|
||||||
item['title'] = article.css('.entry-summary > p:nth-child(4) > strong::text').get(),
|
item['title'] = article.css('.entry-summary > p:nth-child(4) > strong::text').get()
|
||||||
item['date'] = self.parse_date(article.css('.entry-meta-header-before::text').getall()[1].strip()),
|
item['date'] = self.parse_date(article.css('.entry-meta-header-before::text').getall()[1].strip())
|
||||||
item['summary'] = article.xpath('.//div[@class="entry-summary"]/node()').extract(),
|
item['summary'] = "".join(article.xpath('.//div[@class="entry-summary"]/node()').extract())
|
||||||
item['image_url'] = article.css('.entry-summary > p > img::attr(src)').get(),
|
item['image_url'] = article.css('.entry-summary > p > img::attr(src)').get()
|
||||||
item['download_url'] = article.css('.entry-summary > p > a[href ^= "https://rapidgator"]::attr(href)').get()
|
item['download_url'] = "#".join(article.css('.entry-summary > p > a[href ^= "https://rapidgator"]::attr(href)').extract())
|
||||||
yield item
|
yield item
|
||||||
|
|
||||||
def parse_date(self, formatted_date: str):
|
def parse_date(self, formatted_date: str):
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user