Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
crawlersNoticias
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
4
Issues
4
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
m3
crawlersNoticias
Commits
30d91be1
Commit
30d91be1
authored
Jan 08, 2018
by
Renán Sosa Guillen
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
crawlers
parent
4c9562a7
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
657 additions
and
433 deletions
+657
-433
noticias.py
descarga_por_dia/laJornada/laJornada/spiders/noticias.py
+657
-433
noticias.pyc
descarga_por_dia/laJornada/laJornada/spiders/noticias.pyc
+0
-0
noticias.pyc
...arga_por_dia/laVerdadYuc/laVerdadYuc/spiders/noticias.pyc
+0
-0
No files found.
descarga_por_dia/laJornada/laJornada/spiders/noticias.py
View file @
30d91be1
from
datetime
import
date
,
datetime
,
timedelta
,
tzinfo
,
time
import
scrapy
,
re
from
datetime
import
date
,
datetime
,
timedelta
,
tzinfo
,
time
from
collections
import
OrderedDict
##
scrapy crawl noticias -t json --nolog -o noticias.json -a year=2017 -a month=3 -a day=22
"""
scrapy crawl noticias -t json --nolog -o noticias.json -a year=2017 -a month=3 -a day=22
"""
TAG_RE
=
re
.
compile
(
r'<[^>]+>'
)
def
remove_tags
(
text
):
return
TAG_RE
.
sub
(
''
,
text
)
# re: r'(Twitter:\s+)?(@[\w.%+-]+.)?'
TW_RE
=
re
.
compile
(
r"""
(Twitter: # inicio de bloque, contiene la cadena 'Twitter:' (case insensitive)
\s+ # cualquier espacio (\t\n\r\f), una o mas ocurrencias
)? # fin de bloque, ninguna o una ocurrencia del bloque
(@ # inicio de bloque, contiene caracter '@'
[\w.
%+-
]+ # cualquier caracter alfanumerico mas los signos (.
%+-
), una o mas ocurrencias
. # cualquier caracter, excepto '\n'
)? # fin de bloque, ninguna o una ocurrencia del bloque
"""
,
re
.
X
|
re
.
I
)
# banderas: verbose|case insensitive
# re: r'(Facebook|Vk):\s+[\w.%+-]+.'
FB_RE
=
re
.
compile
(
r"""
(Facebook|Vk) # bloque, contiene la cadena 'Facebook' o 'Vk' (case insensitive)
: # contiene el caracter ':'
\s+ # cualquier espacio (\t\n\r\f), una o mas ocurrencias
[\w.
%+-
]+ # cualquier caracter alfanumerico mas los signos (.
%+-
), una o mas ocurrencias
. # cualquier caracter, excepto '\n'
"""
,
re
.
X
|
re
.
I
)
# banderas: verbose|case insensitive
# re: r'\(?(Foro:\s+)?(https?:\/\/)?([w{3}.])?[\w%+-]+(\.[a-zA-Z]{2,6}){1,2}[/\w.#$%&+-]*\)?.'
URL_RE
=
re
.
compile
(
r"""
\(? # contiene o no caracter '(', ninguna o una vez
(Foro: # inicio de bloque, contiene la cadena 'Foro:' (case insensitive)
\s+ # cualquier espacio (\t\n\r\f), una o mas ocurrencias
)? # fin de bloque, ninguna o una ocurrencia del bloque
(http # inicio de bloque, contiene cadena 'http'
s? # contiene o no caracter 's'
:\/\/ # contiene cadena '://'
)? # fin de bloque, ninguna o una ocurrencia del bloque
([w{3}.])? # el caracter 'w' tres veces y/o punto (www.), ninguna o una vez
[\w
%+-
]+ # cualquier caracter alfanumerico mas los signos (
%+-
), una o mas ocurrencias
(\. # inicio de bloque, contiene caracter '.'
[a-zA-Z]{2,6} # 2 a 6 letras, minusculas o mayusculas
){1,2} # fin de bloque, bloque se repite de 1 a 2 veces
[/\w.#$
%
&+-]* # seguido de '/', cualquier caracter alfanumerico mas los signos (.#$
%
&+-), cero o mas ocurrencias
\)? # contiene o no caracter ')', ninguna o una vez
. # cualquier caracter, excepto '\n'
"""
,
re
.
X
|
re
.
I
)
# banderas: verbose|case insensitive
# re: r'[\w.-]+@[\w-]+(\.[a-zA-Z]{2,6}){1,2}\s?'
EMAIL_RE
=
re
.
compile
(
r"""
[\w.-]+ # cualquier caracter alfanumerico mas los signos (.-), una o mas repeticiones
@ # seguido de '@'
[\w-]+ # cualquier caracter alfanumerico mas el signo '-', una o mas repeticiones
(\. # inicio de bloque, contiene '.'
[a-zA-Z]{2,6} # 2 a 6 letras, minusculas o mayusculas
){1,2} # fin de bloque, bloque se repite de 1 a 2 veces
\s? # cualquier espacio (\t\n\r\f), ninguna o una coincidencia
"""
,
re
.
X
|
re
.
I
)
# banderas: verbose|case insensitive
DIVP_RE
=
re
.
compile
(
r'(<div class="(credito-(autor|titulo)|hemero)">.*?<\/div>|<p class="s-s">.{,35}<\/p>|<span class="loc">.*?<\/span>)'
,
re
.
S
)
TRANSLATION_RE
=
re
.
compile
(
r'Traducci.n.*'
,
re
.
I
|
re
.
S
)
def
clean_raw
(
rawText
):
text
=
rawText
.
replace
(
"* * *"
,
''
)
text
=
DIVP_RE
.
sub
(
''
,
text
)
text
=
TRANSLATION_RE
.
sub
(
''
,
text
)
return
text
def
text_cleaning
(
text
):
"""
Function for cleaning news text
"""
"""
Elimina los espacios dobles, triples o mayores, innecesarios dentro del texto. Primero divide el texto de
acuerdo a los saltos de linea, despues divide cada segmento en palabras sin tomar en cuenta espacios. Luego
las palabras son agrupadas en nuevos segmentos con un solo espacio entre ellas y se agregan los saltos de
linea necesarios.
"""
newText
=
''
counter
=
0
text
=
text
.
replace
(
u'
\u0164
'
,
''
)
text
=
text
.
replace
(
"Afp"
,
''
)
for
segment
in
text
.
split
(
"
\n
"
):
counter
+=
1
if
counter
==
1
:
newText
+=
" "
.
join
(
segment
.
split
())
elif
counter
>
1
:
newText
+=
"
\n
"
+
" "
.
join
(
segment
.
split
())
"""---------------------------------------------------------------------------------------------------"""
"""
Elimina del texto la info de facebook, twitter, foro y correo electronico.
"""
newText
=
TW_RE
.
sub
(
''
,
newText
)
newText
=
FB_RE
.
sub
(
''
,
newText
)
newText
=
EMAIL_RE
.
sub
(
''
,
newText
)
newText
=
URL_RE
.
sub
(
''
,
newText
)
newText
=
TRANSLATION_RE
.
sub
(
''
,
newText
)
"""---------------------------------------------------------------------------------------------------"""
return
newText
class
UTC
(
tzinfo
):
"""clase para el 'time zone' (zona horaria)"""
...
...
@@ -52,6 +147,10 @@ class QuotesSpider(scrapy.Spider):
self
.
comparison_date_7
=
date
(
2009
,
2
,
15
)
self
.
date
=
date
(
int
(
year
),
int
(
month
),
int
(
day
))
self
.
parse_month
=
{
'enero'
:
1
,
'febrero'
:
2
,
'marzo'
:
3
,
'abril'
:
4
,
'mayo'
:
5
,
'junio'
:
6
,
'julio'
:
7
,
'agosto'
:
8
,
'septiembre'
:
9
,
'octubre'
:
10
,
'noviembre'
:
11
,
'diciembre'
:
12
}
# self.section_list = ['opinion', 'politica', 'economia', 'mundo', 'estados', 'ciencias',
# 'capital', 'sociedad', 'cultura', 'espectaculos', 'deportes']
...
...
@@ -63,12 +162,12 @@ class QuotesSpider(scrapy.Spider):
# else:
# yield scrapy.Request(url=self.baseURL+section, callback=self.parse_2)
if
self
.
date
<=
self
.
comparison_date_2
:
section_list
=
[
'index.html'
,
'edito.html'
,
'opinion.html'
,
'correo
.html'
,
'politica.html'
,
section_list
=
[
'index.html'
,
'edito.html'
,
'opinion
.html'
,
'politica.html'
,
'economia.html'
,
'cultura.html'
,
'espectaculos.html'
,
'estados.html'
,
'capital.html'
,
'mundo.html'
,
'soc-jus.html'
,
'deportes.html'
]
parse_s
=
{
'index.html'
:
'Portada'
,
'edito.html'
:
'Editorial'
,
'opinion.html'
:
'Opinion'
,
'correo.html'
:
'Correo'
,
'politica.html'
:
'Politica'
,
'economia.html'
:
'Economia'
,
'politica.html'
:
'Politica'
,
'economia.html'
:
'Economia'
,
'cultura.html'
:
'Cultura'
,
'espectaculos.html'
:
'Espectaculos'
,
'estados.html'
:
'Estados'
,
'capital.html'
:
'Capital'
,
'mundo.html'
:
'Mundo'
,
'soc-jus.html'
:
'Sociedad'
,
'deportes.html'
:
'Deportes'
}
...
...
@@ -79,7 +178,7 @@ class QuotesSpider(scrapy.Spider):
item
[
'date'
]
=
datetime
.
combine
(
self
.
date
,
time
())
.
replace
(
tzinfo
=
self
.
tz
)
.
isoformat
(
'T'
)
item
[
'topic'
]
=
parse_s
[
s
]
if
s
==
'edito.html'
or
s
==
'corre
o.html'
:
if
s
==
'edit
o.html'
:
request
=
scrapy
.
Request
(
url
=
self
.
baseURL
+
s
,
callback
=
self
.
parse_item
)
else
:
request
=
scrapy
.
Request
(
url
=
self
.
baseURL
+
s
,
callback
=
self
.
parse
)
...
...
@@ -88,20 +187,20 @@ class QuotesSpider(scrapy.Spider):
yield
request
elif
self
.
date
>
self
.
comparison_date_2
and
self
.
date
<=
self
.
comparison_date_3
:
section_list
=
[
'index.html'
,
'edito.html'
,
'opinion.html'
,
'correo
.html'
,
'politica.html'
,
section_list
=
[
'index.html'
,
'edito.html'
,
'opinion
.html'
,
'politica.html'
,
'economia.html'
,
'cultura.html'
,
'espectaculos.html'
,
'estados.html'
,
'capital.html'
,
'mundo.html'
,
'soc-jus.html'
,
'deportes.html'
,
'index.php'
,
'edito.php'
,
'opinion.php'
,
'correo
.php'
,
'politica.php'
,
'index.php'
,
'edito.php'
,
'opinion
.php'
,
'politica.php'
,
'economia.php'
,
'cultura.php'
,
'espectaculos.php'
,
'estados.php'
,
'capital.php'
,
'mundo.php'
,
'soc-jus.php'
,
'deportes.php'
]
parse_s
=
{
'index.html'
:
'Portada'
,
'edito.html'
:
'Editorial'
,
'opinion.html'
:
'Opinion'
,
'correo.html'
:
'Correo'
,
'politica.html'
:
'Politica'
,
'economia.html'
:
'Economia'
,
'politica.html'
:
'Politica'
,
'economia.html'
:
'Economia'
,
'cultura.html'
:
'Cultura'
,
'espectaculos.html'
:
'Espectaculos'
,
'estados.html'
:
'Estados'
,
'capital.html'
:
'Capital'
,
'mundo.html'
:
'Mundo'
,
'soc-jus.html'
:
'Sociedad'
,
'deportes.html'
:
'Deportes'
,
'index.php'
:
'Portada'
,
'edito.php'
:
'Editorial'
,
'opinion.php'
:
'Opinion'
,
'correo.php'
:
'Correo'
,
'politica.php'
:
'Politica'
,
'economia.php'
:
'Economia'
,
'politica.php'
:
'Politica'
,
'economia.php'
:
'Economia'
,
'cultura.php'
:
'Cultura'
,
'espectaculos.php'
:
'Espectaculos'
,
'estados.php'
:
'Estados'
,
'capital.php'
:
'Capital'
,
'mundo.php'
:
'Mundo'
,
'soc-jus.php'
:
'Sociedad'
,
'deportes.php'
:
'Deportes'
}
...
...
@@ -250,9 +349,9 @@ class QuotesSpider(scrapy.Spider):
def
parse_5
(
self
,
response
):
if
(
response
.
url
[:
response
.
url
.
rfind
(
'/'
)
+
1
]
==
self
.
baseURL
)
:
# verifica que se conserva la misma URL base
if
response
.
url
[:
response
.
url
.
rfind
(
'/'
)
+
1
]
==
self
.
baseURL
:
# verifica que se conserva la misma URL base
section
=
response
.
url
[
response
.
url
.
rfind
(
'/'
)
+
1
:]
if
(
section
==
'opinion'
)
:
# la seccion 'opinion' tiene una estructura diferente a las otras
if
section
==
'opinion'
:
# la seccion 'opinion' tiene una estructura diferente a las otras
path_list
=
[
'//*[@id="columnas"]/p/a/@href'
,
'//*[@id="opinion"]/p/a/@href'
]
else
:
...
...
@@ -266,66 +365,80 @@ class QuotesSpider(scrapy.Spider):
def
parse_6
(
self
,
response
):
if
(
response
.
url
[:
response
.
url
.
rfind
(
'/'
)
+
1
]
==
self
.
baseURL
):
if
response
.
url
[:
response
.
url
.
rfind
(
'/'
)
+
1
]
==
self
.
baseURL
:
# linkSet = set()
# path_list = ['//*[@class="itemfirst"]/div/a/@href', '//*[@class="item start"]/div/a/@href',
# '//*[@class="item"]/div/a/@href']
#
# for path in path_list:
# for link in response.xpath(path).extract():
# if link not in linkSet:
# linkSet.add(link)
# yield scrapy.Request(url=self.baseURL+link, callback=self.parse_item_4)
linkSet
=
set
()
path_list
=
[
'//*[@class="itemfirst"]/div/a/@href'
,
'//*[@class="item start"]/div/a/@href'
,
'//*[@class="item"]/div/a/@href'
]
for
path
in
path_list
:
for
link
in
response
.
xpath
(
path
)
.
extract
():
if
link
not
in
linkSet
:
linkLst
=
[]
linkLst
.
extend
(
response
.
xpath
(
'//*[@class="itemfirst"]/div/a/@href'
)
.
extract
())
linkLst
.
extend
(
response
.
xpath
(
'//*[@class="item start"]/div/a/@href'
)
.
extract
())
linkLst
.
extend
(
response
.
xpath
(
'//*[@class="item"]/div/a/@href'
)
.
extract
())
for
l
in
linkLst
:
link
=
self
.
baseURL
+
l
if
not
link
in
linkSet
:
linkSet
.
add
(
link
)
yield
scrapy
.
Request
(
url
=
self
.
baseURL
+
link
,
callback
=
self
.
parse_item_4
)
yield
scrapy
.
Request
(
url
=
link
,
callback
=
self
.
parse_item_4
)
def
parse_item
(
self
,
response
):
"""
FECHAS <= 2004-12-12
"""
item
=
response
.
meta
[
'item'
]
flag
=
True
text
=
''
try
:
title
=
response
.
xpath
(
'//font[@size="5"]'
)
.
extract_first
(
)
item
[
'title'
]
=
remove_tags
(
title
)
title
=
remove_tags
(
response
.
xpath
(
'//font[@size="5"]'
)
.
extract_first
()
)
item
[
'title'
]
=
title
except
:
try
:
title
=
response
.
xpath
(
'//p/font[@size="5"]'
)
.
extract_first
(
)
item
[
'title'
]
=
remove_tags
(
title
)
title
=
remove_tags
(
response
.
xpath
(
'//p/font[@size="5"]'
)
.
extract_first
()
)
item
[
'title'
]
=
title
except
:
try
:
title
=
response
.
xpath
(
'//p/font[@size="5"]'
)
.
extract
()[
1
]
item
[
'title'
]
=
remove_tags
(
title
)
title
=
remove_tags
(
response
.
xpath
(
'//p/font[@size="5"]'
)
.
extract
()[
1
])
item
[
'title'
]
=
title
except
:
try
:
title
=
response
.
xpath
(
'//font[@size="4"]'
)
.
extract_first
(
)
item
[
'title'
]
=
remove_tags
(
title
)
title
=
remove_tags
(
response
.
xpath
(
'//font[@size="4"]'
)
.
extract_first
()
)
item
[
'title'
]
=
title
except
:
try
:
title
=
response
.
xpath
(
'//p/font[@size="4"]'
)
.
extract_first
(
)
item
[
'title'
]
=
remove_tags
(
title
)
title
=
remove_tags
(
response
.
xpath
(
'//p/font[@size="4"]'
)
.
extract_first
()
)
item
[
'title'
]
=
title
except
:
try
:
title
=
response
.
xpath
(
'//p/font[@size="4"][1]'
)
.
extract
()[
1
]
item
[
'title'
]
=
remove_tags
(
title
)
title
=
remove_tags
(
response
.
xpath
(
'//p/font[@size="4"][1]'
)
.
extract
()[
1
])
item
[
'title'
]
=
title
except
:
try
:
title
=
response
.
xpath
(
'//font[@size="3"]'
)
.
extract_first
(
)
item
[
'title'
]
=
remove_tags
(
title
)
title
=
remove_tags
(
response
.
xpath
(
'//font[@size="3"]'
)
.
extract_first
()
)
item
[
'title'
]
=
title
except
:
try
:
title
=
response
.
xpath
(
'//p/font[@size="3"]'
)
.
extract_first
(
)
item
[
'title'
]
=
remove_tags
(
title
)
title
=
remove_tags
(
response
.
xpath
(
'//p/font[@size="3"]'
)
.
extract_first
()
)
item
[
'title'
]
=
title
except
:
try
:
title
=
response
.
xpath
(
'//p/font[@size="3"][1]'
)
.
extract
()[
1
]
item
[
'title'
]
=
remove_tags
(
title
)
title
=
remove_tags
(
response
.
xpath
(
'//p/font[@size="3"][1]'
)
.
extract
()[
1
])
item
[
'title'
]
=
title
except
:
try
:
title
=
response
.
xpath
(
'//font[@size="+1"]'
)
.
extract_first
(
)
item
[
'title'
]
=
remove_tags
(
title
)
title
=
remove_tags
(
response
.
xpath
(
'//font[@size="+1"]'
)
.
extract_first
()
)
item
[
'title'
]
=
title
except
:
try
:
title
=
response
.
xpath
(
'//font[@size="+0"]'
)
.
extract_first
(
)
item
[
'title'
]
=
remove_tags
(
title
)
title
=
remove_tags
(
response
.
xpath
(
'//font[@size="+0"]'
)
.
extract_first
()
)
item
[
'title'
]
=
title
except
:
if
self
.
date
<=
date
(
1999
,
10
,
3
):
# en esta fecha hay un cambio respecto a las otras en cuanto al html de la pag
try
:
...
...
@@ -340,34 +453,86 @@ class QuotesSpider(scrapy.Spider):
if
flag
:
if
self
.
date
<=
self
.
comparison_date_1
:
"""
FECHAS > 1999-10-03 Y FECHAS <= 2001-12-07
"""
for
p
in
response
.
css
(
'p'
)
.
extract
():
text
+=
remove_tags
(
p
)
.
replace
(
'
\r
'
,
''
)
## no toma en cuenta los primeros indices donde esta el titulo
text
=
text
.
replace
(
'
\t
'
,
''
)
# text += remove_tags(p).replace('\r','') ## no toma en cuenta los primeros indices donde esta el titulo
# text = text.replace('\t','')
p
=
clean_raw
(
p
)
newsText
=
remove_tags
(
p
)
text
+=
text_cleaning
(
newsText
)
m
=
re
.
search
(
title
,
text
)
if
title
[
-
1
]
==
"?"
:
text
=
text
[
m
.
end
()
+
1
:]
else
:
text
=
text
[
m
.
end
():]
text
=
text
.
lstrip
(
"
\n
"
)
text
=
text
.
rstrip
(
"
\n
"
)
elif
self
.
date
>
self
.
comparison_date_1
and
self
.
date
<=
self
.
comparison_date_3
:
"""
FECHAS > 2001-12-07 Y FECHAS <= 2003-04-25
"""
for
p
in
response
.
xpath
(
'//table[@bordercolor="#CCCCCC"]'
)
.
css
(
'p'
)
.
extract
():
text
+=
remove_tags
(
p
)
.
replace
(
'
\r
'
,
''
)
text
=
text
.
replace
(
'
\t
'
,
''
)
# text += remove_tags(p).replace('\r','')
# text = text.replace('\t','')
p
=
clean_raw
(
p
)
newsText
=
remove_tags
(
p
)
text
+=
text_cleaning
(
newsText
)
m
=
re
.
search
(
title
,
text
)
if
title
[
-
1
]
==
"?"
:
text
=
text
[
m
.
end
()
+
1
:]
else
:
text
=
text
[
m
.
end
():]
text
=
text
.
lstrip
(
"
\n
"
)
text
=
text
.
rstrip
(
"
\n
"
)
elif
self
.
date
>
self
.
comparison_date_3
and
self
.
date
<=
self
.
comparison_date_4
:
"""
FECHAS > 2003-04-25 Y FECHAS <= 2004-11-16
"""
p
=
response
.
css
(
'p'
)
.
extract
()
for
i
in
range
(
0
,
len
(
p
)):
text
+=
remove_tags
(
p
[
i
])
.
replace
(
'
\r
'
,
''
)
text
=
text
.
replace
(
'
\t
'
,
''
)
# text += remove_tags(p[i]).replace('\r','')
# text = text.replace('\t','')
aux
=
clean_raw
(
p
[
i
])
newsText
=
remove_tags
(
aux
)
.
lstrip
(
"
\n
"
)
newsText
=
newsText
.
rstrip
(
"
\n
"
)
text
+=
text_cleaning
(
newsText
)
elif
self
.
date
>
self
.
comparison_date_4
and
self
.
date
<=
self
.
comparison_date_5
:
"""
FECHAS > 2004-11-16 Y FECHAS <= 2004-12-12
"""
p
=
response
.
css
(
'p'
)
.
extract
()
for
i
in
range
(
3
,
len
(
p
)):
text
+=
remove_tags
(
p
[
i
])
.
replace
(
'
\r
'
,
''
)
text
=
text
.
replace
(
'
\t
'
,
''
)
# text += remove_tags(p[i]).replace('\r','')
# text = text.replace('\t','')
aux
=
clean_raw
(
p
[
i
])
newsText
=
remove_tags
(
aux
)
.
lstrip
(
"
\n
"
)
newsText
=
newsText
.
rstrip
(
"
\n
"
)
text
+=
text_cleaning
(
newsText
)
if
text
==
''
:
for
i
in
range
(
0
,
len
(
p
)):
text
+=
remove_tags
(
p
[
i
])
.
replace
(
'
\r
'
,
''
)
text
=
text
.
replace
(
'
\t
'
,
''
)
# text += remove_tags(p[i]).replace('\r','')
# text = text.replace('\t','')
aux
=
clean_raw
(
p
[
i
])
newsText
=
remove_tags
(
aux
)
.
lstrip
(
"
\n
"
)
newsText
=
newsText
.
rstrip
(
"
\n
"
)
text
+=
text_cleaning
(
newsText
)
else
:
text
=
remove_tags
(
response
.
body
)
text
=
text
[
len
(
title
):]
"""
FECHAS <= 1999-10-03
"""
# text = remove_tags(response.body)
# text = text[len(title):]
m
=
re
.
search
(
title
,
response
.
body
)
body
=
response
.
body
[
m
.
end
():]
body
=
clean_raw
(
body
)
newsText
=
remove_tags
(
body
)
.
lstrip
(
"
\n
"
)
newsText
=
newsText
.
rstrip
(
"
\n
"
)
text
+=
text_cleaning
(
newsText
)
item
[
'text'
]
=
text
item
[
'url'
]
=
response
.
url
...
...
@@ -376,27 +541,39 @@ class QuotesSpider(scrapy.Spider):
def
parse_item_2
(
self
,
response
):
"""
FECHAS > 2004-12-12 Y FECHAS <= 2005-01-31
"""
item
=
response
.
meta
[
'item'
]
text
=
''
title_list
=
[]
title_list
.
extend
(
response
.
xpath
(
'//*[@id="contenido"]/h1/text()'
)
.
extract
())
title_list
.
extend
(
response
.
xpath
(
'//h1/text()'
)
.
extract
())
for
t
in
title_list
:
if
t
is
not
None
or
t
!=
''
:
# titleLst = []
# titleLst.extend(response.xpath('//*[@id="contenido"]/h1/text()').extract())
# titleLst.extend(response.xpath('//h1/text()').extract())
titleSet
=
set
()
titleSet
.
add
(
response
.
xpath
(
'//*[@id="contenido"]/h1'
)
.
extract_first
())
titleSet
.
add
(
response
.
xpath
(
'//h1'
)
.
extract_first
())
for
t
in
titleSet
:
if
t
is
not
None
and
t
!=
''
:
title
=
remove_tags
(
t
)
.
replace
(
'
\r
'
,
''
)
title
=
title
.
replace
(
'
\t
'
,
''
)
item
[
'title'
]
=
title
p
=
response
.
css
(
'p'
)
.
extract
()
for
i
in
range
(
4
,
len
(
p
)):
text
+=
remove_tags
(
p
[
i
])
.
replace
(
'
\r
'
,
''
)
text
=
text
.
replace
(
'
\t
'
,
''
)
# text += remove_tags(p[i]).replace('\r','')
# text = text.replace('\t','')
newsText
=
remove_tags
(
p
[
i
])
.
lstrip
(
"
\n
"
)
newsText
=
newsText
.
rstrip
(
"
\n
"
)
text
+=
text_cleaning
(
newsText
)
if
text
==
''
:
for
i
in
range
(
0
,
len
(
p
)):
text
+=
remove_tags
(
p
[
i
])
.
replace
(
'
\r
'
,
''
)
text
=
text
.
replace
(
'
\t
'
,
''
)
# text += remove_tags(p[i]).replace('\r','')
# text = text.replace('\t','')
newsText
=
remove_tags
(
p
[
i
])
.
lstrip
(
"
\n
"
)
newsText
=
newsText
.
rstrip
(
"
\n
"
)
text
+=
text_cleaning
(
newsText
)
item
[
'text'
]
=
text
item
[
'url'
]
=
response
.
url
...
...
@@ -405,21 +582,36 @@ class QuotesSpider(scrapy.Spider):
def
parse_item_3
(
self
,
response
):
"""
FECHAS > 2005-01-31 Y FECHAS <= 2009-02-15
"""
item
=
NoticiasItem
()
text
=
''
titleSet
=
set
()
# item['date'] = self.date
item
[
'date'
]
=
datetime
.
combine
(
self
.
date
,
time
())
.
replace
(
tzinfo
=
self
.
tz
)
.
isoformat
(
'T'
)
title
=
response
.
xpath
(
'//*[@class="documentContent"]/h1[@class="title"]/text()'
)
.
extract
()
if
(
len
(
title
)
>
0
):
item
[
'title'
]
=
title
[
0
]
else
:
item
[
'title'
]
=
response
.
xpath
(
'//*[@class="documentContent"]/h1/text()'
)
.
extract_first
()
# title = response.xpath('//*[@class="documentContent"]/h1[@class="title"]/text()').extract()
# if len(title) > 0:
# item['title'] = title[0]
# else:
# item['title'] = response.xpath('//*[@class="documentContent"]/h1/text()').extract_first()
titleSet
.
add
(
response
.
xpath
(
'//*[@class="documentContent"]/h1[@class="title"]'
)
.
extract_first
())
titleSet
.
add
(
response
.
xpath
(
'//*[@class="documentContent"]/h1'
)
.
extract_first
())
for
t
in
titleSet
:
if
t
is
not
None
and
t
!=
''
:
title
=
remove_tags
(
t
)
.
replace
(
'
\r
'
,
''
)
title
=
title
.
replace
(
'
\t
'
,
''
)
item
[
'title'
]
=
title
item
[
'topic'
]
=
response
.
xpath
(
'//*[@id="portal-breadcrumbs"]/a[2]/text()'
)
.
extract_first
()
for
p
in
response
.
xpath
(
'//*[@class="documentContent"]/p'
)
.
extract
():
text
+=
remove_tags
(
p
)
.
replace
(
'
\r
'
,
''
)
text
=
text
.
replace
(
'
\t
'
,
''
)
# text += remove_tags(p).replace('\r','')
# text = text.replace('\t','')
newsText
=
remove_tags
(
p
)
.
lstrip
(
"
\n
"
)
newsText
=
newsText
.
rstrip
(
"
\n
"
)
text
+=
text_cleaning
(
newsText
)
item
[
'text'
]
=
text
item
[
'url'
]
=
response
.
url
...
...
@@ -428,23 +620,55 @@ class QuotesSpider(scrapy.Spider):
def
parse_item_4
(
self
,
response
):
"""
FECHAS > 2009-02-15
"""
d
=
response
.
xpath
(
'//*[@class="main-fecha"]/text()'
)
.
extract_first
()
d
=
d
.
replace
(
'de'
,
''
)
.
replace
(
' '
,
' '
)
.
split
(
' '
)
newsDate
=
date
(
int
(
d
[
3
]),
self
.
parse_month
[
d
[
2
]
.
lower
()],
int
(
d
[
1
]))
if
newsDate
==
self
.
date
:
item
=
NoticiasItem
()
text
=
''
# path_list = ['//*[@class="col"]/p', '//*[@class="col col1"]/p', '//*[@class="col col2"]/p']
path_list
=
[
'//*[@class="col"]'
,
'//*[@class="col col1"]'
,
'//*[@class="col col2"]'
]
# path_list = ['//*[@class="col"]', '//*[@class="col col1"]', '//*[@class="col col2"]']
textLst
=
[]
textLst
.
extend
(
response
.
xpath
(
'//*[@class="col"]'
)
.
extract
())
textLst
.
extend
(
response
.
xpath
(
'//*[@class="col col1"]'
)
.
extract
())
textLst
.
extend
(
response
.
xpath
(
'//*[@class="col col2"]'
)
.
extract
())
# item['date'] = self.date
item
[
'date'
]
=
datetime
.
combine
(
self
.
d
ate
,
time
())
.
replace
(
tzinfo
=
self
.
tz
)
.
isoformat
(
'T'
)
item
[
'date'
]
=
datetime
.
combine
(
newsD
ate
,
time
())
.
replace
(
tzinfo
=
self
.
tz
)
.
isoformat
(
'T'
)
item
[
'title'
]
=
remove_tags
(
response
.
xpath
(
'//*[@class="cabeza"]'
)
.
extract_first
())
item
[
'topic'
]
=
response
.
xpath
(
'//*[@class="breadcrumb gui"]/span[2]/a/text()'
)
.
extract_first
()
for
path
in
path_list
:
for
p
in
response
.
xpath
(
path
)
.
extract
():
text
+=
remove_tags
(
p
)
.
replace
(
'
\r
'
,
''
)
text
=
text
.
replace
(
'
\t
'
,
''
)
author
=
response
.
xpath
(
'//*[@class="credito-autor"]/text()'
)
.
extract_first
()
if
author
is
None
or
author
==
''
:
author
=
response
.
xpath
(
'//*[@class="credito-articulo"]/text()'
)
.
extract_first
()
item
[
'author'
]
=
author
location
=
remove_tags
(
response
.
xpath
(
'//p[@class="s-s"]'
)
.
extract_first
())
if
location
is
not
None
and
location
!=
''
and
len
(
location
)
<=
35
:
item
[
'location'
]
=
location
else
:
item
[
'location'
]
=
None
for
p
in
textLst
:
# text += remove_tags(p).replace('\r', '')
# text = text.replace('\t', '')
p
=
clean_raw
(
p
)
# newsText = remove_tags(p).lstrip("\n")
# newsText = newsText.rstrip("\n")
# text += text_cleaning(newsText)
text
+=
remove_tags
(
p
)
text
=
text
.
lstrip
(
"
\n
"
)
text
=
text
.
rstrip
(
"
\n
"
)
text
=
text_cleaning
(
text
)
item
[
'text'
]
=
text
item
[
'url'
]
=
response
.
url
# print item['title']
# print 'title: ' + item['title'] + '\nurl: ' + item['url'] + '\n'
yield
item
descarga_por_dia/laJornada/laJornada/spiders/noticias.pyc
View file @
30d91be1
No preview for this file type
descarga_por_dia/laVerdadYuc/laVerdadYuc/spiders/noticias.pyc
View file @
30d91be1
No preview for this file type
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment