Commit c908a556 authored by Renán Sosa Guillen's avatar Renán Sosa Guillen

laJornadaMaya update

parent 566f8f06
...@@ -3,12 +3,18 @@ ...@@ -3,12 +3,18 @@
# Define here the models for your scraped items # Define here the models for your scraped items
# #
# See documentation in: # See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html # https://doc.scrapy.org/en/latest/topics/items.html
import scrapy import scrapy
class LajornadamayaItem(scrapy.Item): class NoticiasItem(scrapy.Item):
# define the fields for your item here like: # define the fields for your item here like:
# name = scrapy.Field() # name = scrapy.Field()
pass title = scrapy.Field()
text = scrapy.Field()
date = scrapy.Field()
location = scrapy.Field()
author = scrapy.Field()
topic = scrapy.Field()
url = scrapy.Field()
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# Define here the models for your spider middleware # Define here the models for your spider middleware
# #
# See documentation in: # See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html # https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals from scrapy import signals
...@@ -20,14 +20,14 @@ class LajornadamayaSpiderMiddleware(object): ...@@ -20,14 +20,14 @@ class LajornadamayaSpiderMiddleware(object):
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s return s
def process_spider_input(response, spider): def process_spider_input(self, response, spider):
# Called for each response that goes through the spider # Called for each response that goes through the spider
# middleware and into the spider. # middleware and into the spider.
# Should return None or raise an exception. # Should return None or raise an exception.
return None return None
def process_spider_output(response, result, spider): def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after # Called with the results returned from the Spider, after
# it has processed the response. # it has processed the response.
...@@ -35,7 +35,7 @@ class LajornadamayaSpiderMiddleware(object): ...@@ -35,7 +35,7 @@ class LajornadamayaSpiderMiddleware(object):
for i in result: for i in result:
yield i yield i
def process_spider_exception(response, exception, spider): def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method # Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception. # (from other spider middleware) raises an exception.
...@@ -43,7 +43,7 @@ class LajornadamayaSpiderMiddleware(object): ...@@ -43,7 +43,7 @@ class LajornadamayaSpiderMiddleware(object):
# or Item objects. # or Item objects.
pass pass
def process_start_requests(start_requests, spider): def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works # Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except # similarly to the process_spider_output() method, except
# that it doesn’t have a response associated. # that it doesn’t have a response associated.
...@@ -54,3 +54,50 @@ class LajornadamayaSpiderMiddleware(object): ...@@ -54,3 +54,50 @@ class LajornadamayaSpiderMiddleware(object):
def spider_opened(self, spider): def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name) spider.logger.info('Spider opened: %s' % spider.name)
class LajornadamayaDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
...@@ -3,9 +3,73 @@ ...@@ -3,9 +3,73 @@
# Define your item pipelines here # Define your item pipelines here
# #
# Don't forget to add your pipeline to the ITEM_PIPELINES setting # Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
from collections import OrderedDict
class JsonWriterPipeline(object):
def __init__(self, filename):
self.filename = filename
@classmethod
def from_crawler(cls, crawler):
# Here you get whatever value was passed through the "filename" command line parameter
settings = crawler.settings
filename = settings.get('filename')
# Instantiate the pipeline with the file name
return cls(filename)
def open_spider(self, spider):
self.counter = 0
self.file = open(self.filename, 'w')
self.file.write("[")
def close_spider(self, spider):
self.file.write("]")
self.file.close()
class LajornadamayaPipeline(object):
def process_item(self, item, spider): def process_item(self, item, spider):
# print("this is my item", item)
row = []
try:
row.append(("date", item['date']))
except:
pass
try:
row.append(("topic", item['topic']))
except:
pass
try:
row.append(("title", item['title']))
except:
pass
try:
row.append(("author", item['author']))
except:
pass
try:
row.append(("location", item['location']))
except:
pass
try:
row.append(("text", item['text']))
except:
pass
try:
row.append(("url", item['url']))
except:
pass
line = OrderedDict(row)
self.counter += 1
if self.counter == 1:
self.file.write(json.dumps(line))
elif self.counter > 1:
self.file.write(",\n" + json.dumps(line))
return item return item
...@@ -5,9 +5,9 @@ ...@@ -5,9 +5,9 @@
# For simplicity, this file contains only settings considered important or # For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation: # commonly used. You can find more settings consulting the documentation:
# #
# http://doc.scrapy.org/en/latest/topics/settings.html # https://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html # https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html # https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'laJornadaMaya' BOT_NAME = 'laJornadaMaya'
...@@ -19,21 +19,21 @@ NEWSPIDER_MODULE = 'laJornadaMaya.spiders' ...@@ -19,21 +19,21 @@ NEWSPIDER_MODULE = 'laJornadaMaya.spiders'
#USER_AGENT = 'laJornadaMaya (+http://www.yourdomain.com)' #USER_AGENT = 'laJornadaMaya (+http://www.yourdomain.com)'
# Obey robots.txt rules # Obey robots.txt rules
ROBOTSTXT_OBEY = True # ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16) # Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32 #CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0) # Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay # See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs # See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3 DOWNLOAD_DELAY = 0.5
# The download delay setting will honor only one of: # The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16 #CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16 #CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default) # Disable cookies (enabled by default)
#COOKIES_ENABLED = False COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default) # Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False #TELNETCONSOLE_ENABLED = False
...@@ -45,31 +45,31 @@ ROBOTSTXT_OBEY = True ...@@ -45,31 +45,31 @@ ROBOTSTXT_OBEY = True
#} #}
# Enable or disable spider middlewares # Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html # See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = { #SPIDER_MIDDLEWARES = {
# 'laJornadaMaya.middlewares.LajornadamayaSpiderMiddleware': 543, # 'laJornadaMaya.middlewares.LajornadamayaSpiderMiddleware': 543,
#} #}
# Enable or disable downloader middlewares # Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = { #DOWNLOADER_MIDDLEWARES = {
# 'laJornadaMaya.middlewares.MyCustomDownloaderMiddleware': 543, # 'laJornadaMaya.middlewares.LajornadamayaDownloaderMiddleware': 543,
#} #}
# Enable or disable extensions # Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html # See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = { #EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None, # 'scrapy.extensions.telnet.TelnetConsole': None,
#} #}
# Configure item pipelines # Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html # See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = { ITEM_PIPELINES = {
# 'laJornadaMaya.pipelines.LajornadamayaPipeline': 300, 'laJornadaMaya.pipelines.JsonWriterPipeline': 300,
#} }
# Enable and configure the AutoThrottle extension (disabled by default) # Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html # See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True #AUTOTHROTTLE_ENABLED = True
# The initial download delay # The initial download delay
#AUTOTHROTTLE_START_DELAY = 5 #AUTOTHROTTLE_START_DELAY = 5
...@@ -82,7 +82,7 @@ ROBOTSTXT_OBEY = True ...@@ -82,7 +82,7 @@ ROBOTSTXT_OBEY = True
#AUTOTHROTTLE_DEBUG = False #AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default) # Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True #HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0 #HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache' #HTTPCACHE_DIR = 'httpcache'
......
import scrapy, json, re # -*- coding: utf-8 -*-
from datetime import datetime, date, timedelta, tzinfo
""" """
Esta version descarga ingresando una fecha. MEDIA:
USO: La Jornada Maya, Yucatán
scrapy crawl noticias -t json --nolog -o noticias.json -a year=2017 -a month=3 -a day=22 USAGE:
$ cd laJornadaMaya/
No es recomendable para fechas de mas de un mes de antiguas. ------------------------------------------------------------------------------------------------------------
## Get all the news from the most current to the oldest. It's necessary to use the parse_date_files.py file
for the news contained in noticias.json being splitted into files by date. ##
$ scrapy crawl noticias --nolog -s filename=noticias.json
------------------------------------------------------------------------------------------------------------
## Get all the news from the most current to a specific date. ##
$ scrapy crawl noticias --nolog -s filename=2018-08-10.json -a year=2018 -a month=8 -a day=10
""" """
import scrapy, re, json
from datetime import datetime, date
from laJornadaMaya.items import NoticiasItem
TAG_RE = re.compile(r'<[^>]+>') TAG_RE = re.compile(r'<[^>]+>')
def remove_tags(text): def remove_tags(text):
return TAG_RE.sub('', text) return TAG_RE.sub('', text)
class UTC(tzinfo):
"""clase para el 'time zone' (zona horaria)"""
def utcoffset(self, dt): class ImportantFlowData(scrapy.Item):
# zona horaria para yucatan (centro de mexico): utc-6 """
return timedelta(hours=-6) Useful data for the flow of the implementation
"""
to_next_page = scrapy.Field()
is_last_link = scrapy.Field()
return_url = scrapy.Field()
current_page = scrapy.Field()
news_section = scrapy.Field()
def tzname(self, dt):
# nombre de la zona horaria
return 'UTC-6'
class NoticiasItem(scrapy.Item): class QuotesSpider(scrapy.Spider):
title = scrapy.Field() """
text = scrapy.Field() Basic Scrapy Spider class
date = scrapy.Field() """
location = scrapy.Field() name = "noticias"
author = scrapy.Field()
topic = scrapy.Field()
url = scrapy.Field()
class QuotesSpider(scrapy.Spider): def start_requests(self):
name = "noticias" year = getattr(self, "year", None)
month = getattr(self, "month", None)
def start_requests(self): day = getattr(self, "day", None)
# self.found = False
# self.flag = False self.base_url = "https://www.lajornadamaya.mx/"
self.tz = UTC() section_list = ["yucatan", "quintana-roo", "campeche", "nacional", "internacional", "deportes", "opinion"]
self.year = getattr(self, 'year', None)
self.month = getattr(self, 'month', None) if year is not None and month is not None and day is not None:
self.day = getattr(self, 'day', None) self.stop_date = date(int(year), int(month), int(day))
self.req_date = date(int(self.year), int(self.month), int(self.day))
self.date_format = "%Y-%m-%d" for s in section_list:
self.baseURL = 'https://www.lajornadamaya.mx' flow_info = ImportantFlowData()
section_list = ['yucatan', 'quintana-roo', 'campeche', 'deportes', 'nacional', flow_info['to_next_page'] = False
'internacional', 'opinion'] flow_info['news_section'] = s
# section_list = ['deportes']
request = scrapy.Request(url=self.base_url + s, callback=self.parse_with_stop_date_1)
for section in section_list: request.meta['item'] = flow_info
self.section = section yield request
for count in range(0,2):
if ( count == 0 ): else:
yield scrapy.Request(url=self.baseURL+'/'+section, callback=self.parse_2) if year is None and month is None and day is None:
elif (count == 1): self.stop_date = None
# self.section = section
self.page = 0 for s in section_list:
self.flag = False flow_info = ImportantFlowData()
self.found = False flow_info['news_section'] = s
page = -1
if not ( section == 'opinion' ): request = scrapy.Request(url=self.base_url + s, callback=self.parse)
while True: request.meta['item'] = flow_info
if ( self.flag ): yield request
self.flag = False
break else:
page+=1 print "Unable to execute this crawler with current given parameters."
yield scrapy.Request(url=self.baseURL+'/'+section+'?p='+str(page), callback=self.parse) print "Enter all parameters: year, month and day, or none of them."
if ( self.found ):
self.found = False
self.page -= 1 def parse(self, response):
if ( self.page > 0 ): flow_info = response.meta['item']
self.page -= 1 s = flow_info['news_section']
for pag in range(self.page, self.page+6): link_list = response.css('div.slide').css('h1.title > a::attr(href)').extract()
yield scrapy.Request(url=self.baseURL+'/'+section+'?p='+str(pag), callback=self.parse_page, dont_filter=True) link_list.extend(response.css('div.moreArticles').css('h2.title > a::attr(href)').extract())
else: for link in link_list:
yield scrapy.Request(url=self.baseURL+'/notas?opinion', callback=self.parse_page) item_info = ImportantFlowData()
item_info['news_section'] = s
def parse_2(self, response): # para las primeras noticias reqst = scrapy.Request(url=self.base_url + link, callback=self.parse_item)
path_list = ['//h1[@class="title"]/a/@href', '//h2[@class="title"]/a/@href'] reqst.meta['item'] = item_info
link_list = [] yield reqst
for path in path_list:
link_list += response.xpath(path).extract() p = 0
flow_info = ImportantFlowData()
for link in link_list: flow_info['current_page'] = p
if ( link[:link.rfind('/')] == self.year+'-'+self.month.zfill(2)+'-'+self.day.zfill(2) ): flow_info['news_section'] = s
item = NoticiasItem()
request = scrapy.Request(url=self.base_url + s + "?p=" + str(p), callback=self.parse_2)
d = link[:link.rfind('/')] request.meta['item'] = flow_info
if len(d) == 10: yield request
d = map(int, d.split('-'))
d = datetime(d[0], d[1], d[2], tzinfo=self.tz).isoformat('T')
elif len(d) == 19: def parse_2(self, response):
d, t = d.split(' ') flow_info = response.meta['item']
d = map(int, d.split('-')) p = flow_info['current_page']
t = map(int, t.split(':')) s = flow_info['news_section']
d = datetime(d[0],d[1],d[2],t[0],t[1],t[2],tzinfo=self.tz).isoformat('T')
news_list = json.loads(response.body)
item['date'] = d if len(news_list) > 0:
url = self.base_url[:self.base_url.rfind("/")]
item['topic'] = response.url[response.url.rfind('/')+1:].title() for news in news_list:
# yield scrapy.Request(url=self.baseURL+'/'+link, callback=self.parse_item_2) item_info = ImportantFlowData()
request = scrapy.Request(url=self.baseURL+'/'+link, callback=self.parse_item_2) item_info['news_section'] = s
request.meta['item'] = item
yield request reqst = scrapy.Request(url=url + news['url'], callback=self.parse_item)
reqst.meta['item'] = item_info
yield reqst
def parse(self, response): # para los json
json_response = json.loads(response.text) p += 1
flow_info['current_page'] = p
if not ( response.url[response.url.rfind('/')+1:] == 'notas?opinion' ):
json_list = json_response request = scrapy.Request(url=self.base_url + s + "?p=" + str(p), callback=self.parse_2)
else: request.meta['item'] = flow_info
json_list = json_response['articles'] yield request
for line in json_list:
this_date = datetime.strptime(line['publishDate'][:line['publishDate'].rfind(' ')], self.date_format)
this_date = this_date.date() def parse_with_stop_date_1(self, response):
flow_info = response.meta['item']
if ( this_date == self.req_date ): s = flow_info['news_section']
self.page = int(response.url[response.url.rfind('=')+1:])
self.found = True if not flow_info['to_next_page']:
self.flag = True link_list = response.css('div.slide').css('h1.title > a::attr(href)').extract()
break link_list.extend(response.css('div.moreArticles').css('h2.title > a::attr(href)').extract())
elif ( this_date < self.req_date ):
self.flag = True for link in link_list:
break item_info = ImportantFlowData()
item_info['news_section'] = s
item_info['return_url'] = response.url
def parse_item_2(self, response): # para las primeras noticias
item = response.meta['item'] if link_list.index(link) == link_list.index(link_list[-1]):
# item = NoticiasItem() item_info['is_last_link'] = True
text = '' else:
# item['date'] = response.url[:response.url.rfind('/')][response.url[:response.url.rfind('/')].rfind('/')+1:] item_info['is_last_link'] = False
# item['topic'] = self.section.title()
item['title'] = response.xpath('//article/h1/text()').extract_first() request = scrapy.Request(url=self.base_url + link, callback=self.parse_item_with_stop_date)
for paragraph in response.xpath('//*[@class="txt"]').extract(): request.meta['item'] = item_info
text += remove_tags(paragraph) + '\n' yield request
item['text'] = text
item['url'] = response.url else:
print item['title'] flow_info = ImportantFlowData()
yield item p = 0
flow_info['current_page'] = p
flow_info['news_section'] = s
def parse_page(self, response): # para los json
json_response = json.loads(response.text) request = scrapy.Request(url=self.base_url + s + "?p=" + str(p), callback=self.parse_with_stop_date_2)
request.meta['item'] = flow_info
if not ( response.url[response.url.rfind('/')+1:] == 'notas?opinion' ): yield request
topic = response.url[response.url.rfind('/')+1:response.url.rfind('=')-2].title()
json_list = json_response
else:
json_list = json_response['articles'] def parse_with_stop_date_2(self, response):
topic = 'Opinion' flow_info = response.meta['item']
p = flow_info['current_page']
for line in json_list: s = flow_info['news_section']
this_date = datetime.strptime(line['publishDate'][:line['publishDate'].rfind(' ')], self.date_format)
this_date = this_date.date() news_list = json.loads(response.body)
if len(news_list) > 0:
if ( this_date == self.req_date ): url = self.base_url[:self.base_url.rfind("/")]
item = NoticiasItem()
# item['date'] = line['publishDate'] for news in news_list:
d = line['publishDate'] news_date = datetime.strptime(news['publishDate'], '%Y-%m-%d %H:%M:%S').date()
if len(d) == 10:
d = map(int, d.split('-')) if news_date >= self.stop_date:
d = datetime(d[0], d[1], d[2], tzinfo=self.tz).isoformat('T') item_info = ImportantFlowData()
item_info['news_section'] = s
elif len(d) == 19:
d, t = d.split(' ') reqst = scrapy.Request(url=url + news['url'], callback=self.parse_item)
d = map(int, d.split('-')) reqst.meta['item'] = item_info
t = map(int, t.split(':')) yield reqst
d = datetime(d[0],d[1],d[2],t[0],t[1],t[2],tzinfo=self.tz).isoformat('T')
if news_list.index(news) == news_list.index(news_list[-1]):
item['date'] = d p += 1
flow_info = ImportantFlowData()
flow_info['current_page'] = p
item['topic'] = topic flow_info['news_section'] = s
item['title'] = line['name']
if not ( response.url[response.url.rfind('/')+1:] == 'notas?opinion' ): request = scrapy.Request(url=self.base_url + s + "?p=" + str(p), callback=self.parse_with_stop_date_2)
request = scrapy.Request(url=self.baseURL+line['url'], callback=self.parse_item) request.meta['item'] = flow_info
else: yield request
request = scrapy.Request(url=self.baseURL+'/'+line['publishDate'][:line['publishDate'].rfind(' ')]+'/'+line['uriComponent'], callback=self.parse_item)
request.meta['item'] = item else:
yield request break
def parse_item(self, response): # para los json
item = response.meta['item'] def parse_item(self, response):
text = '' flow_info = response.meta['item']
for paragraph in response.xpath('//*[@class="txt"]').extract(): item = NoticiasItem()
text += remove_tags(paragraph) + '\n' text = ''
item['text'] = text
item['url'] = response.url news_date = response.url[:response.url.rfind("/")]
print item['title'] news_date = news_date[news_date.rfind("/")+1:]
yield item news_date = datetime.strptime(news_date, '%Y-%m-%d').isoformat("T")
title = response.xpath('//article/h1').extract_first()
if title is not None : title = remove_tags(title)
topic = flow_info['news_section'].capitalize()
for p in response.xpath('//article').css('div.txt').extract():
text += remove_tags(p) + "\n"
## News item info ##
item['date'] = news_date
item['title'] = title
item['topic'] = topic
item['text'] = text.strip()
item['url'] = response.url
yield item
def parse_item_with_stop_date(self, response):
news_date = response.url[:response.url.rfind("/")]
news_date = news_date[news_date.rfind("/")+1:]
news_date = datetime.strptime(news_date, '%Y-%m-%d').date()
if news_date >= self.stop_date:
flow_info = response.meta['item']
item = NoticiasItem()
text = ''
news_date = datetime.strptime(news_date.isoformat(), '%Y-%m-%d').isoformat("T")
title = response.xpath('//article/h1').extract_first()
if title is not None : title = remove_tags(title)
topic = flow_info['news_section'].capitalize()
for p in response.xpath('//article').css('div.txt').extract():
text += remove_tags(p) + "\n"
## News item info ##
item['date'] = news_date
item['title'] = title
item['topic'] = topic
item['text'] = text.strip()
item['url'] = response.url
yield item
if flow_info['is_last_link']:
flow_info['to_next_page'] = True
request = scrapy.Request(url=flow_info['return_url'], callback=self.parse_with_stop_date_1, dont_filter=True)
request.meta['item'] = flow_info
yield request
# Automatically created by: scrapy startproject # Automatically created by: scrapy startproject
# #
# For more information about the [deploy] section see: # For more information about the [deploy] section see:
# https://scrapyd.readthedocs.org/en/latest/deploy.html # https://scrapyd.readthedocs.io/en/latest/deploy.html
[settings] [settings]
default = laJornadaMaya.settings default = laJornadaMaya.settings
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment