Compare commits

..

15 Commits

Author SHA1 Message Date
edipretoro a2dce2bba9 Refactoring: extracting SQLAlchemy-related code to its own modules 2026-01-03 11:36:25 +01:00
edipretoro edbb92ff9a Adding a rule to ignore compiled files from Python 2026-01-03 11:35:48 +01:00
edipretoro a7952bc32c Processing correctly links extracted from a post 2026-01-02 15:11:22 +01:00
edipretoro 1d0cb8ed5d Updating the SQLAlchemyPipeline to use properly TvShowDB and LinkDB 2026-01-02 15:10:56 +01:00
edipretoro d8d8109cdc Updating the imports 2026-01-02 15:10:20 +01:00
edipretoro 05866cc862 Adding a links attribute to use the relationship to the LinkDB class 2026-01-02 15:09:50 +01:00
edipretoro f0e6d73dde Removing the existing download_url attribute 2026-01-02 15:09:30 +01:00
edipretoro 4bff05bc92 Adding a LinkDB model to store different links per show 2026-01-02 15:08:47 +01:00
edipretoro 8adc0623bd Adding the SQLAlchemyPipeline to the spider pipelines 2026-01-01 21:24:48 +01:00
edipretoro 0db07013ce Creating the SQLAlchemyPipeline class 2026-01-01 21:24:27 +01:00
edipretoro eaf854c3eb Adding a rule to ignore *.db files (SQLite databases) 2026-01-01 21:23:45 +01:00
edipretoro 359b3271e4 Updating the defition of our SQLAlchemy model 2026-01-01 21:23:05 +01:00
edipretoro 15e1f837c8 Updating the extraction of fields 2026-01-01 21:22:10 +01:00
edipretoro ada99be262 Renaming some fields for the TvShowItem 2026-01-01 21:15:36 +01:00
edipretoro 177652dce1 Adding a SQLAlchemy model to store scraped posts 2025-12-31 18:12:22 +01:00
3 changed files with 142 additions and 9 deletions
+2
View File
@@ -0,0 +1,2 @@
/*.db
/__pycache__/
+85
View File
@@ -0,0 +1,85 @@
from datetime import datetime
from sqlalchemy import ForeignKey, func, Integer, Boolean, String, Text, DateTime
from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column, relationship
class Base(DeclarativeBase):
"""Base déclarative pour SQLAlchemy 2.0."""
pass
class TvShowDB(Base):
"""Modèle pour le stockage des épisodes (SQLAlchemy 2.0)."""
__tablename__: str = "tvshows"
id: Mapped[int] = mapped_column(
Integer,
primary_key=True,
autoincrement=True
)
post_id: Mapped[str] = mapped_column(
String(length=255),
nullable=False,
unique=True,
index=True
)
post_title: Mapped[str] = mapped_column(
String(255),
nullable=False
)
title: Mapped[str] = mapped_column(
String(255),
nullable=False,
index=True
)
date: Mapped[datetime] = mapped_column(
DateTime,
nullable=False,
index=True
)
summary: Mapped[str | None] = mapped_column(
Text,
nullable=True
)
image_url: Mapped[str | None] = mapped_column(
String(255),
nullable=True
)
created_at: Mapped[datetime] = mapped_column(
DateTime,
server_default=func.datetime('now'),
nullable=False
)
updated_at: Mapped[datetime] = mapped_column(
DateTime,
server_default=func.datetime('now'),
onupdate=func.datetime('now'),
nullable=False
)
links: Mapped[list["LinkDB"]] = relationship(back_populates="show")
class LinkDB(Base):
"""Modèle pour le stockage des liens de téléchargement (SQLAlchemy 2.0)."""
__tablename__: str = "links"
id: Mapped[int] = mapped_column(
Integer,
primary_key=True,
autoincrement=True
)
link: Mapped[str] = mapped_column(
String(255),
nullable=False
)
is_downloaded: Mapped[bool] = mapped_column(
Boolean,
default=False
)
show_id: Mapped[int] = mapped_column(ForeignKey("tvshows.id"))
show: Mapped["TvShowDB"] = relationship(back_populates="links")
+55 -9
View File
@@ -2,6 +2,7 @@
import re import re
import sys import sys
from datetime import datetime from datetime import datetime
import scrapy import scrapy
@@ -9,10 +10,16 @@ from scrapy.crawler import CrawlerProcess
from scrapy.spiders import CrawlSpider, Rule from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor from scrapy.linkextractors import LinkExtractor
from sqlalchemy import create_engine, select, Engine
from sqlalchemy.orm import sessionmaker
from models import Base, TvShowDB, LinkDB
class TvShowItem(scrapy.Item): class TvShowItem(scrapy.Item):
article_id: scrapy.Field = scrapy.Field() post_id: scrapy.Field = scrapy.Field()
article_title: scrapy.Field = scrapy.Field() post_title: scrapy.Field = scrapy.Field()
title:scrapy.Field = scrapy.Field() title:scrapy.Field = scrapy.Field()
date: scrapy.Field = scrapy.Field() date: scrapy.Field = scrapy.Field()
summary: scrapy.Field = scrapy.Field() summary: scrapy.Field = scrapy.Field()
@@ -20,6 +27,42 @@ class TvShowItem(scrapy.Item):
download_url: scrapy.Field = scrapy.Field() download_url: scrapy.Field = scrapy.Field()
class SQLAlchemyPipeline:
def __init__(self):
self.engine: Engine = create_engine('sqlite:///tvshows.db', echo=True)
Base.metadata.create_all(self.engine)
self.Session = sessionmaker(bind=self.engine)
def process_item(self, item, spider):
session = self.Session()
try:
stmt = select(TvShowDB).where(TvShowDB.post_id == item["post_id"])
show = session.scalars(stmt).first()
print(f"{show=}")
if not show:
show = TvShowDB(
post_id=item["post_id"],
post_title=item["post_title"],
title=item["title"],
date=item["date"],
summary=item["summary"],
image_url=item["image_url"],
links=[LinkDB(link=url) for url in item["download_url"]]
)
session.add(show)
else:
for key, value in item.items():
if key != "download_url":
setattr(show, key, value)
session.commit()
except Exception as e:
session.rollback()
raise
finally:
session.close()
return item
class TvShow(CrawlSpider): class TvShow(CrawlSpider):
name: str = "rlsb_tvshow" name: str = "rlsb_tvshow"
allowed_domains: list[str] = ["rlsbb.ru"] allowed_domains: list[str] = ["rlsbb.ru"]
@@ -28,6 +71,9 @@ class TvShow(CrawlSpider):
'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36', 'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36',
'AUTOTHROTTLE_ENABLED': True, 'AUTOTHROTTLE_ENABLED': True,
'DOWNLOAD_DELAY': 10, 'DOWNLOAD_DELAY': 10,
'ITEM_PIPELINES': {
'__main__.SQLAlchemyPipeline': 300,
},
} }
rules: list[Rule] = [ rules: list[Rule] = [
Rule(LinkExtractor(allow=r"/tv-shows/page/"), callback="parse", follow=True) Rule(LinkExtractor(allow=r"/tv-shows/page/"), callback="parse", follow=True)
@@ -36,13 +82,13 @@ class TvShow(CrawlSpider):
def parse(self, response): def parse(self, response):
for article in response.css("article"): for article in response.css("article"):
item = TvShowItem() item = TvShowItem()
item['article_id'] = article.attrib['id'], item['post_id'] = article.attrib['id']
item['article_title'] = article.css('h1.entry-title > a::text').get(), item['post_title'] = article.css('h1.entry-title > a::text').get()
item['title'] = article.css('.entry-summary > p:nth-child(4) > strong::text').get(), item['title'] = article.css('.entry-summary > p:nth-child(4) > strong::text').get()
item['date'] = self.parse_date(article.css('.entry-meta-header-before::text').getall()[1].strip()), item['date'] = self.parse_date(article.css('.entry-meta-header-before::text').getall()[1].strip())
item['summary'] = article.xpath('.//div[@class="entry-summary"]/node()').extract(), item['summary'] = "".join(article.xpath('.//div[@class="entry-summary"]/node()').extract())
item['image_url'] = article.css('.entry-summary > p > img::attr(src)').get(), item['image_url'] = article.css('.entry-summary > p > img::attr(src)').get()
item['download_url'] = article.css('.entry-summary > p > a[href ^= "https://rapidgator"]::attr(href)').get() item['download_url'] = article.css('.entry-summary > p > a[href ^= "https://rapidgator"]::attr(href)').extract()
yield item yield item
def parse_date(self, formatted_date: str): def parse_date(self, formatted_date: str):