Compare commits

..

No commits in common. "a7952bc32c45a1427631b26cade9bbe39cf2c9b2" and "8adc0623bd0d79db8b16075a3cbe05214ed5e620" have entirely different histories.

View File

@ -10,8 +10,8 @@ from scrapy.crawler import CrawlerProcess
from scrapy.spiders import CrawlSpider, Rule from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor from scrapy.linkextractors import LinkExtractor
from sqlalchemy import ForeignKey, create_engine, func, select, Engine, Integer, Boolean, String, Text, DateTime from sqlalchemy import create_engine, func, select, Engine, Integer, String, Text, DateTime
from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column, relationship from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column
from sqlalchemy.orm import sessionmaker from sqlalchemy.orm import sessionmaker
@ -58,6 +58,10 @@ class TvShowDB(Base):
String(255), String(255),
nullable=True nullable=True
) )
download_url: Mapped[str | None] = mapped_column(
String(255),
nullable=True
)
created_at: Mapped[datetime] = mapped_column( created_at: Mapped[datetime] = mapped_column(
DateTime, DateTime,
server_default=func.datetime('now'), server_default=func.datetime('now'),
@ -69,30 +73,6 @@ class TvShowDB(Base):
onupdate=func.datetime('now'), onupdate=func.datetime('now'),
nullable=False nullable=False
) )
links: Mapped[list["LinkDB"]] = relationship(back_populates="show")
class LinkDB(Base):
"""Modèle pour le stockage des liens de téléchargement (SQLAlchemy 2.0)."""
__tablename__: str = "links"
id: Mapped[int] = mapped_column(
Integer,
primary_key=True,
autoincrement=True
)
link: Mapped[str] = mapped_column(
String(255),
nullable=False
)
is_downloaded: Mapped[bool] = mapped_column(
Boolean,
default=False
)
show_id: Mapped[int] = mapped_column(ForeignKey("tvshows.id"))
show: Mapped["TvShowDB"] = relationship(back_populates="links")
class TvShowItem(scrapy.Item): class TvShowItem(scrapy.Item):
post_id: scrapy.Field = scrapy.Field() post_id: scrapy.Field = scrapy.Field()
@ -117,20 +97,11 @@ class SQLAlchemyPipeline:
show = session.scalars(stmt).first() show = session.scalars(stmt).first()
print(f"{show=}") print(f"{show=}")
if not show: if not show:
show = TvShowDB( show = TvShowDB(**item)
post_id=item["post_id"],
post_title=item["post_title"],
title=item["title"],
date=item["date"],
summary=item["summary"],
image_url=item["image_url"],
links=[LinkDB(link=url) for url in item["download_url"]]
)
session.add(show) session.add(show)
else: else:
for key, value in item.items(): for key, value in item.items():
if key != "download_url": setattr(show, key, value)
setattr(show, key, value)
session.commit() session.commit()
except Exception as e: except Exception as e:
session.rollback() session.rollback()
@ -165,7 +136,7 @@ class TvShow(CrawlSpider):
item['date'] = self.parse_date(article.css('.entry-meta-header-before::text').getall()[1].strip()) item['date'] = self.parse_date(article.css('.entry-meta-header-before::text').getall()[1].strip())
item['summary'] = "".join(article.xpath('.//div[@class="entry-summary"]/node()').extract()) item['summary'] = "".join(article.xpath('.//div[@class="entry-summary"]/node()').extract())
item['image_url'] = article.css('.entry-summary > p > img::attr(src)').get() item['image_url'] = article.css('.entry-summary > p > img::attr(src)').get()
item['download_url'] = article.css('.entry-summary > p > a[href ^= "https://rapidgator"]::attr(href)').extract() item['download_url'] = "#".join(article.css('.entry-summary > p > a[href ^= "https://rapidgator"]::attr(href)').extract())
yield item yield item
def parse_date(self, formatted_date: str): def parse_date(self, formatted_date: str):