Wednesday, May 27, 2020
Tuesday, May 26, 2020
Spark Practice 2
>>> spark=SparkSession.builder.appName("Analyzing London Crime data").getOrCreate()
>>> data=spark.data.format("csv").option("header","true").load("/home/rameshkumar/datasets/london_crime_by_lsoa.csv")
Traceback (most recent call last):
File "", line 1, in
AttributeError: 'SparkSession' object has no attribute 'data'
>>> data=spark.read.format("csv").option("header","true").load("/home/rameshkumar/datasets/london_crime_by_lsoa.csv")
Traceback (most recent call last):
File "/usr/hdp/current/spark2-client/python/pyspark/sql/utils.py", line 63, in deco
return f(*a, **kw)
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/protocol.py", line 320, in get_return_value
py4j.protocol.Py4JJavaError: An error occurred while calling o59.load.
: org.apache.spark.sql.AnalysisException: Path does not exist: hdfs://nn01.itversity.com:8020/home/rameshkumar/datasets/london_crime_by_lsoa.csv;
at org.apache.spark.sql.execution.datasources.DataSource$.org$apache$spark$sql$execution$datasources$DataSource$$checkAndGlobPathIfNecessary(DataSource.scala:715)
at org.apache.spark.sql.execution.datasources.DataSource$$anonfun$15.apply(DataSource.scala:389)
at org.apache.spark.sql.execution.datasources.DataSource$$anonfun$15.apply(DataSource.scala:389)
at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)
at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)
at scala.collection.immutable.List.foreach(List.scala:381)
at scala.collection.TraversableLike$class.flatMap(TraversableLike.scala:241)
at scala.collection.immutable.List.flatMap(List.scala:344)
at org.apache.spark.sql.execution.datasources.DataSource.resolveRelation(DataSource.scala:388)
at org.apache.spark.sql.DataFrameReader.loadV1Source(DataFrameReader.scala:239)
at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:227)
at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:174)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:214)
at java.lang.Thread.run(Thread.java:748)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "", line 1, in
File "/usr/hdp/current/spark2-client/python/pyspark/sql/readwriter.py", line 166, in load
return self._df(self._jreader.load(path))
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/java_gateway.py", line 1160, in __call__
File "/usr/hdp/current/spark2-client/python/pyspark/sql/utils.py", line 69, in deco
raise AnalysisException(s.split(': ', 1)[1], stackTrace)
pyspark.sql.utils.AnalysisException: 'Path does not exist: hdfs://nn01.itversity.com:8020/home/rameshkumar/datasets/london_crime_by_lsoa.csv;'
>>> data=spark.read.format("csv").option("header","true").load("file:///home/rameshkumar/datasets/london_crime_by_lsoa.csv")
[Stage 0:> ( >>> data.printSchema()
root
|-- lsoa_code: string (nullable = true)
|-- borough: string (nullable = true)
|-- major_category: string (nullable = true)
|-- minor_category: string (nullable = true)
|-- value: string (nullable = true)
|-- year: string (nullable = true)
|-- month: string (nullable = true)
>>> data.count()
[Stage 1:> (0 + 16) / 16]
[Stage 1:======================================================================================================> (8[Stage 1:=========================================================================================================================================================> (12 13490604
>>>
>>> data.limit(5).show()
[Stage 3:> ( +---------+----------+--------------------+--------------------+-----+----+-----+
|lsoa_code| borough| major_category| minor_category|value|year|month|
+---------+----------+--------------------+--------------------+-----+----+-----+
|E01001116| Croydon| Burglary|Burglary in Other...| 0|2016| 11|
|E01001646| Greenwich|Violence Against ...| Other violence| 0|2016| 11|
|E01000677| Bromley|Violence Against ...| Other violence| 0|2015| 5|
|E01003774| Redbridge| Burglary|Burglary in Other...| 0|2016| 3|
|E01004563|Wandsworth| Robbery| Personal Property| 0|2008| 6|
+---------+----------+--------------------+--------------------+-----+----+-----+
>>> data.dropna()
DataFrame[lsoa_code: string, borough: string, major_category: string, minor_category: string, value: string, year: string, month: string]
>>> data.show(5)
[Stage 4:> ( +---------+----------+--------------------+--------------------+-----+----+-----+
|lsoa_code| borough| major_category| minor_category|value|year|month|
+---------+----------+--------------------+--------------------+-----+----+-----+
|E01001116| Croydon| Burglary|Burglary in Other...| 0|2016| 11|
|E01001646| Greenwich|Violence Against ...| Other violence| 0|2016| 11|
|E01000677| Bromley|Violence Against ...| Other violence| 0|2015| 5|
|E01003774| Redbridge| Burglary|Burglary in Other...| 0|2016| 3|
|E01004563|Wandsworth| Robbery| Personal Property| 0|2008| 6|
+---------+----------+--------------------+--------------------+-----+----+-----+
only showing top 5 rows
>>> data=data.drop('lsoa_code')
>>> data.show(5)
[Stage 5:> ( +----------+--------------------+--------------------+-----+----+-----+
| borough| major_category| minor_category|value|year|month|
+----------+--------------------+--------------------+-----+----+-----+
| Croydon| Burglary|Burglary in Other...| 0|2016| 11|
| Greenwich|Violence Against ...| Other violence| 0|2016| 11|
| Bromley|Violence Against ...| Other violence| 0|2015| 5|
| Redbridge| Burglary|Burglary in Other...| 0|2016| 3|
|Wandsworth| Robbery| Personal Property| 0|2008| 6|
+----------+--------------------+--------------------+-----+----+-----+
only showing top 5 rows
>>> total_boroughs=data.select('borough')
>>> total_boroughs.show()
[Stage 6:> ( +--------------------+
| borough|
+--------------------+
| Croydon|
| Greenwich|
| Bromley|
| Redbridge|
| Wandsworth|
| Ealing|
| Ealing|
| Hounslow|
| Newham|
| Sutton|
| Haringey|
| Lambeth|
|Richmond upon Thames|
| Wandsworth|
| Hillingdon|
| Havering|
|Barking and Dagenham|
| Redbridge|
|Kingston upon Thames|
| Sutton|
+--------------------+
only showing top 20 rows
>>> total_boroughs=data.select('borough').distinct()
>>> total_boroughs.show()
[Stage 7:> (0 [Stage 7:===================================================================================================================> (9[Stage 7:==================================================================================================================================================================================> (14[Stage 7:===============================================================================================================================================================================================> (15 +--------------------+
| borough|
+--------------------+
| Croydon|
| Wandsworth|
| Bexley|
| Lambeth|
|Barking and Dagenham|
| Camden|
| Greenwich|
| Newham|
| Tower Hamlets|
| Hounslow|
| Barnet|
| Harrow|
|Kensington and Ch...|
| Islington|
| Brent|
| Haringey|
| Bromley|
| Merton|
| Westminster|
| Hackney|
+--------------------+
only showing top 20 rows
>>> total_boroughs.count()
[Stage 15:> (0 [Stage 15:=========================> (2 [Stage 15:===============================================================> (5 [Stage 15:===========================================================================================================================================> (11[Stage 15:==============================================================================================================================================================================================> (15 33
>>> hackney_data=data.filter(data['borough']==='Hackney')
File "", line 1
hackney_data=data.filter(data['borough']==='Hackney')
^
SyntaxError: invalid syntax
>>> hackney_data=data.filter(data['borough']=='Hackney')
>>> hackney_data.show(5)
[Stage 18:> ( +-------+--------------------+--------------------+-----+----+-----+
|borough| major_category| minor_category|value|year|month|
+-------+--------------------+--------------------+-----+----+-----+
|Hackney| Criminal Damage|Criminal Damage T...| 0|2011| 6|
|Hackney|Violence Against ...| Harassment| 1|2013| 2|
|Hackney| Criminal Damage|Other Criminal Da...| 0|2011| 7|
|Hackney|Violence Against ...| Wounding/GBH| 0|2013| 12|
|Hackney| Theft and Handling| Other Theft Person| 0|2016| 8|
+-------+--------------------+--------------------+-----+----+-----+
only showing top 5 rows
>>> data_2015_2016=data.filter(data['year'].isin(['2015','2016']))
>>> data_2015_2016.sample(fraction=0.1).show()
[Stage 19:> ( +--------------------+--------------------+--------------------+-----+----+-----+
| borough| major_category| minor_category|value|year|month|
+--------------------+--------------------+--------------------+-----+----+-----+
| Croydon| Theft and Handling|Motor Vehicle Int...| 0|2016| 3|
| Waltham Forest| Burglary|Burglary in Other...| 0|2015| 6|
|Hammersmith and F...| Drugs| Possession Of Drugs| 1|2016| 2|
| Wandsworth| Theft and Handling|Handling Stolen G...| 0|2016| 2|
| Waltham Forest|Violence Against ...| Common Assault| 0|2016| 6|
|Barking and Dagenham| Criminal Damage|Criminal Damage T...| 0|2015| 4|
| Bexley| Robbery| Personal Property| 0|2016| 1|
| Croydon|Violence Against ...| Offensive Weapon| 0|2016| 2|
| Merton|Violence Against ...| Harassment| 1|2016| 6|
| Hackney| Theft and Handling| Other Theft Person| 0|2016| 8|
| Tower Hamlets| Theft and Handling| Other Theft| 2|2016| 1|
| Haringey|Violence Against ...| Harassment| 0|2016| 12|
| Westminster| Theft and Handling|Motor Vehicle Int...| 0|2016| 11|
| Lewisham| Drugs| Possession Of Drugs| 0|2015| 5|
| Haringey| Criminal Damage|Criminal Damage T...| 0|2015| 2|
| Lewisham| Fraud or Forgery|Other Fraud & For...| 0|2015| 6|
| Camden| Criminal Damage|Criminal Damage T...| 0|2015| 8|
| Southwark| Burglary|Burglary in Other...| 0|2016| 8|
| Lambeth| Theft and Handling| Other Theft| 9|2015| 5|
|Hammersmith and F...| Burglary|Burglary in Other...| 0|2016| 5|
+--------------------+--------------------+--------------------+-----+----+-----+
only showing top 20 rows
>>> data_2014_onwards=data.filter(data['year']>=2014)
>>> data_2014_onwards.sample(fraction=0.1).show()
[Stage 20:> ( +--------------------+--------------------+--------------------+-----+----+-----+
| borough| major_category| minor_category|value|year|month|
+--------------------+--------------------+--------------------+-----+----+-----+
| Bromley|Violence Against ...| Other violence| 0|2015| 5|
| Haringey|Violence Against ...| Wounding/GBH| 0|2015| 12|
| Haringey| Criminal Damage|Criminal Damage T...| 0|2016| 12|
| Ealing| Robbery| Personal Property| 0|2014| 9|
| Lambeth| Drugs| Drug Trafficking| 0|2015| 9|
| Newham|Violence Against ...| Harassment| 0|2014| 1|
| Lambeth| Sexual Offences| Rape| 0|2014| 3|
| Brent| Theft and Handling| Other Theft Person| 1|2016| 12|
| Ealing|Other Notifiable ...| Other Notifiable| 0|2016| 12|
| Barnet| Theft and Handling|Handling Stolen G...| 0|2016| 4|
| Havering|Other Notifiable ...| Other Notifiable| 0|2016| 6|
|Hammersmith and F...| Criminal Damage|Criminal Damage T...| 0|2015| 8|
| Islington| Burglary|Burglary in Other...| 1|2016| 7|
| Hillingdon|Violence Against ...| Wounding/GBH| 0|2016| 1|
| Lambeth| Drugs| Possession Of Drugs| 0|2014| 5|
| Greenwich| Criminal Damage|Criminal Damage T...| 0|2015| 1|
| Greenwich| Criminal Damage|Criminal Damage T...| 0|2016| 10|
| Hillingdon|Other Notifiable ...| Other Notifiable| 0|2015| 6|
| Wandsworth|Violence Against ...| Harassment| 0|2016| 11|
| Merton|Violence Against ...| Harassment| 1|2016| 6|
+--------------------+--------------------+--------------------+-----+----+-----+
only showing top 20 rows
>>> borough_crime_count=data.groupBy('borough').count()
>>> borough_crime_count.show(5)
[Stage 21:> (0 [Stage 21:============> (1 [Stage 21:=========================> (2 [Stage 21:==================================================> (4 [Stage 21:=========================================================================================> (7[Stage 21:==================================================================================================================> (9[Stage 21:====================================================================================================================================================================> (13[Stage 21:=================================================================================================================================================================================> (14[Stage 21:==============================================================================================================================================================================================> (15 +--------------------+------+
| borough| count|
+--------------------+------+
| Croydon|602100|
| Wandsworth|498636|
| Bexley|385668|
| Lambeth|519048|
|Barking and Dagenham|311040|
+--------------------+------+
only showing top 5 rows
>>> borough_conviction_sum=data.groupBy('borough').agg({"value":"sum"})
>>> borough_conviction_sum.show(5)
[Stage 29:> (0 [Stage 29:============> (1 [Stage 29:=========================> (2 [Stage 29:==================================================> (4 [Stage 29:============================================================================> (6 [Stage 29:======================================================================================================> (8[Stage 29:===========================================================================================================================================> (11 +--------------------+----------+
| borough|sum(value)|
+--------------------+----------+
| Croydon| 260294.0|
| Wandsworth| 204741.0|
| Bexley| 114136.0|
| Lambeth| 292178.0|
|Barking and Dagenham| 149447.0|
+--------------------+----------+
only showing top 5 rows
>>> total_borough_convictions=borough_conviction_sum.agg({"convictions":"sum"})
Traceback (most recent call last):
File "/usr/hdp/current/spark2-client/python/pyspark/sql/utils.py", line 63, in deco
return f(*a, **kw)
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/protocol.py", line 320, in get_return_value
py4j.protocol.Py4JJavaError: An error occurred while calling o135.agg.
: org.apache.spark.sql.AnalysisException: Cannot resolve column name "convictions" among (borough, sum(value));
at org.apache.spark.sql.Dataset$$anonfun$resolve$1.apply(Dataset.scala:222)
at org.apache.spark.sql.Dataset$$anonfun$resolve$1.apply(Dataset.scala:222)
at scala.Option.getOrElse(Option.scala:121)
at org.apache.spark.sql.Dataset.resolve(Dataset.scala:221)
at org.apache.spark.sql.Dataset.col(Dataset.scala:1241)
at org.apache.spark.sql.Dataset.apply(Dataset.scala:1208)
at org.apache.spark.sql.RelationalGroupedDataset$$anonfun$agg$2.apply(RelationalGroupedDataset.scala:172)
at org.apache.spark.sql.RelationalGroupedDataset$$anonfun$agg$2.apply(RelationalGroupedDataset.scala:171)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
at scala.collection.immutable.Map$Map1.foreach(Map.scala:116)
at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
at scala.collection.AbstractTraversable.map(Traversable.scala:104)
at org.apache.spark.sql.RelationalGroupedDataset.agg(RelationalGroupedDataset.scala:171)
at org.apache.spark.sql.RelationalGroupedDataset.agg(RelationalGroupedDataset.scala:190)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:214)
at java.lang.Thread.run(Thread.java:748)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "", line 1, in
File "/usr/hdp/current/spark2-client/python/pyspark/sql/dataframe.py", line 1327, in agg
return self.groupBy().agg(*exprs)
File "/usr/hdp/current/spark2-client/python/pyspark/sql/group.py", line 88, in agg
jdf = self._jgd.agg(exprs[0])
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/java_gateway.py", line 1160, in __call__
File "/usr/hdp/current/spark2-client/python/pyspark/sql/utils.py", line 69, in deco
raise AnalysisException(s.split(': ', 1)[1], stackTrace)
pyspark.sql.utils.AnalysisException: 'Cannot resolve column name "convictions" among (borough, sum(value));'
>>> total_borough_convictions = borough_conviction_sum.agg({"convictions":"sum"})
Traceback (most recent call last):
File "/usr/hdp/current/spark2-client/python/pyspark/sql/utils.py", line 63, in deco
return f(*a, **kw)
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/protocol.py", line 320, in get_return_value
py4j.protocol.Py4JJavaError: An error occurred while calling o167.agg.
: org.apache.spark.sql.AnalysisException: Cannot resolve column name "convictions" among (borough, sum(value));
at org.apache.spark.sql.Dataset$$anonfun$resolve$1.apply(Dataset.scala:222)
at org.apache.spark.sql.Dataset$$anonfun$resolve$1.apply(Dataset.scala:222)
at scala.Option.getOrElse(Option.scala:121)
at org.apache.spark.sql.Dataset.resolve(Dataset.scala:221)
at org.apache.spark.sql.Dataset.col(Dataset.scala:1241)
at org.apache.spark.sql.Dataset.apply(Dataset.scala:1208)
at org.apache.spark.sql.RelationalGroupedDataset$$anonfun$agg$2.apply(RelationalGroupedDataset.scala:172)
at org.apache.spark.sql.RelationalGroupedDataset$$anonfun$agg$2.apply(RelationalGroupedDataset.scala:171)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
at scala.collection.immutable.Map$Map1.foreach(Map.scala:116)
at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
at scala.collection.AbstractTraversable.map(Traversable.scala:104)
at org.apache.spark.sql.RelationalGroupedDataset.agg(RelationalGroupedDataset.scala:171)
at org.apache.spark.sql.RelationalGroupedDataset.agg(RelationalGroupedDataset.scala:190)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:214)
at java.lang.Thread.run(Thread.java:748)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "", line 1, in
File "/usr/hdp/current/spark2-client/python/pyspark/sql/dataframe.py", line 1327, in agg
return self.groupBy().agg(*exprs)
File "/usr/hdp/current/spark2-client/python/pyspark/sql/group.py", line 88, in agg
jdf = self._jgd.agg(exprs[0])
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/java_gateway.py", line 1160, in __call__
File "/usr/hdp/current/spark2-client/python/pyspark/sql/utils.py", line 69, in deco
raise AnalysisException(s.split(': ', 1)[1], stackTrace)
pyspark.sql.utils.AnalysisException: 'Cannot resolve column name "convictions" among (borough, sum(value));'
>>> borough_conviction_sum=data.groupBy('borough').agg({"value":"sum"}).withColumnRenamed("sum(value)","convictions")
>>> total_borough_convictions = borough_conviction_sum.agg({"convictions":"sum"})
>>> total_borough_convictions.show()
[Stage 37:> (0 [Stage 37:============> (1 [Stage 37:==================================================> (4 [Stage 37:============================================================================> (6 [Stage 37:======================================================================================================> (8[Stage 37:==============================================================================================================================================================================================> (15 +----------------+
|sum(convictions)|
+----------------+
| 6447758.0|
+----------------+
>>> total_convictions = total_borough_convictions.collect()
[Stage 40:> (0 [Stage 40:============> (1 [Stage 40:=========================================================================================> (7[Stage 40:==============================================================================================================================> (10[Stage 40:========================================================================================================================================================> (12[Stage 40:=================================================================================================================================================================================> (14[Stage 40:==============================================================================================================================================================================================> (15 >>> total_convictions = total_borough_convictions.collect()[0][0]
>>> total_convictions = total_borough_convictions.collect()
>>> total_convictions = total_borough_convictions.collect()[0][0]
>>> import pyspark.sql.functions as func
>>> borough_percentage_contribution = borough_conviction_sum.withColumn(\
... func.round(borough_conviction_sum.convictions / total_convictions * 100, 2))
Traceback (most recent call last):
File "", line 2, in
TypeError: withColumn() missing 1 required positional argument: 'col'
>>> borough_percentage_contribution = borough_conviction_sum.withColumn(
... "% contribution",
... func.round(borough_conviction_sum.convictions / total_convictions * 100, 2))
>>> borough_percentage_contribution.printSchema()
root
|-- borough: string (nullable = true)
|-- convictions: double (nullable = true)
|-- % contribution: double (nullable = true)
>>> borough_percentage_contribution.orderBy(borough_percentage_contribution[2].desc()).show()
[Stage 52:> (0 [Stage 52:============> (1 [Stage 52:=========================> (2 [Stage 52:==================================================> (4 [Stage 52:======================================================================================================> (8[Stage 52:====================================================================================================================================================================> (13 +--------------------+-----------+--------------+
| borough|convictions|% contribution|
+--------------------+-----------+--------------+
| Westminster| 455028.0| 7.06|
| Lambeth| 292178.0| 4.53|
| Southwark| 278809.0| 4.32|
| Camden| 275147.0| 4.27|
| Newham| 262024.0| 4.06|
| Croydon| 260294.0| 4.04|
| Ealing| 251562.0| 3.9|
| Islington| 230286.0| 3.57|
| Tower Hamlets| 228613.0| 3.55|
| Brent| 227551.0| 3.53|
| Hackney| 217119.0| 3.37|
| Lewisham| 215137.0| 3.34|
| Haringey| 213272.0| 3.31|
| Barnet| 212191.0| 3.29|
| Hillingdon| 209680.0| 3.25|
| Wandsworth| 204741.0| 3.18|
| Waltham Forest| 203879.0| 3.16|
| Enfield| 193880.0| 3.01|
| Hounslow| 186772.0| 2.9|
|Hammersmith and F...| 185259.0| 2.87|
+--------------------+-----------+--------------+
only showing top 20 rows
>>> conviction_monthly=data.filter(data['year']==2014).groupBy("month").agg({'value":"sum"}).withColumnRenamed("sum(value)","convictions")
File "", line 1
conviction_monthly=data.filter(data['year']==2014).groupBy("month").agg({'value":"sum"}).withColumnRenamed("sum(value)","convictions")
^
SyntaxError: EOL while scanning string literal
>>> conviction_monthly=data.filter(data['year']==2014).groupBy("month").agg({"value":"sum"}).withColumnRenamed("sum(value)","convictions")
>>> total_conviction_monthly = conviction_monthly.agg({"convictions":"sum"}) .collect()[0][0]
[Stage 54:> (0 [Stage 54:============> (1 >>> total_conviction_monthly = conviction_monthly.agg({"convictions":"sum"}) .collect()
[Stage 57:> (0 [Stage 57:============> (1 [Stage 57:======================================================================================================> (8[Stage 57:========================================================================================================================================================> (12[Stage 57:==============================================================================================================================================================================================> (15 >>> total_conviction_monthly = conviction_monthly .withColumn("percent",
... func.round(conviction_monthly.convictions/total_conviction_monthly * 100, 2))
Traceback (most recent call last):
File "", line 2, in
File "/usr/hdp/current/spark2-client/python/pyspark/sql/column.py", line 116, in _
njc = getattr(self._jc, name)(jc)
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/java_gateway.py", line 1160, in __call__
File "/usr/hdp/current/spark2-client/python/pyspark/sql/utils.py", line 63, in deco
return f(*a, **kw)
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/protocol.py", line 320, in get_return_value
py4j.protocol.Py4JJavaError: An error occurred while calling o247.divide.
: java.lang.RuntimeException: Unsupported literal type class java.util.ArrayList [[680183.0]]
at org.apache.spark.sql.catalyst.expressions.Literal$.apply(literals.scala:77)
at org.apache.spark.sql.catalyst.expressions.Literal$$anonfun$create$2.apply(literals.scala:163)
at org.apache.spark.sql.catalyst.expressions.Literal$$anonfun$create$2.apply(literals.scala:163)
at scala.util.Try.getOrElse(Try.scala:79)
at org.apache.spark.sql.catalyst.expressions.Literal$.create(literals.scala:162)
at org.apache.spark.sql.functions$.typedLit(functions.scala:113)
at org.apache.spark.sql.functions$.lit(functions.scala:96)
at org.apache.spark.sql.Column.$div(Column.scala:746)
at org.apache.spark.sql.Column.divide(Column.scala:761)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:214)
at java.lang.Thread.run(Thread.java:748)
>>> total_conviction_monthly.columns
Traceback (most recent call last):
File "", line 1, in
AttributeError: 'list' object has no attribute 'columns'
>>> total_conviction_monthly = conviction_monthly .withColumn("percent",
... func.round(conviction_monthly.convictions/total_conviction_monthly * 100, 2))
Traceback (most recent call last):
File "", line 2, in
File "/usr/hdp/current/spark2-client/python/pyspark/sql/column.py", line 116, in _
njc = getattr(self._jc, name)(jc)
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/java_gateway.py", line 1160, in __call__
File "/usr/hdp/current/spark2-client/python/pyspark/sql/utils.py", line 63, in deco
return f(*a, **kw)
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/protocol.py", line 320, in get_return_value
py4j.protocol.Py4JJavaError: An error occurred while calling o272.divide.
: java.lang.RuntimeException: Unsupported literal type class java.util.ArrayList [[680183.0]]
at org.apache.spark.sql.catalyst.expressions.Literal$.apply(literals.scala:77)
at org.apache.spark.sql.catalyst.expressions.Literal$$anonfun$create$2.apply(literals.scala:163)
at org.apache.spark.sql.catalyst.expressions.Literal$$anonfun$create$2.apply(literals.scala:163)
at scala.util.Try.getOrElse(Try.scala:79)
at org.apache.spark.sql.catalyst.expressions.Literal$.create(literals.scala:162)
at org.apache.spark.sql.functions$.typedLit(functions.scala:113)
at org.apache.spark.sql.functions$.lit(functions.scala:96)
at org.apache.spark.sql.Column.$div(Column.scala:746)
at org.apache.spark.sql.Column.divide(Column.scala:761)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:214)
at java.lang.Thread.run(Thread.java:748)
>>> total_conviction_monthly.columns
Traceback (most recent call last):
File "", line 1, in
AttributeError: 'list' object has no attribute 'columns'
>>>
>>>
>>> total_conviction_monthly = conviction_monthly .withColumn("percent",
... func.round(conviction_monthly.convictions/total_conviction_monthly * 100, 2))
Traceback (most recent call last):
File "", line 2, in
File "/usr/hdp/current/spark2-client/python/pyspark/sql/column.py", line 116, in _
njc = getattr(self._jc, name)(jc)
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/java_gateway.py", line 1160, in __call__
File "/usr/hdp/current/spark2-client/python/pyspark/sql/utils.py", line 63, in deco
return f(*a, **kw)
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/protocol.py", line 320, in get_return_value
py4j.protocol.Py4JJavaError: An error occurred while calling o297.divide.
: java.lang.RuntimeException: Unsupported literal type class java.util.ArrayList [[680183.0]]
at org.apache.spark.sql.catalyst.expressions.Literal$.apply(literals.scala:77)
at org.apache.spark.sql.catalyst.expressions.Literal$$anonfun$create$2.apply(literals.scala:163)
at org.apache.spark.sql.catalyst.expressions.Literal$$anonfun$create$2.apply(literals.scala:163)
at scala.util.Try.getOrElse(Try.scala:79)
at org.apache.spark.sql.catalyst.expressions.Literal$.create(literals.scala:162)
at org.apache.spark.sql.functions$.typedLit(functions.scala:113)
at org.apache.spark.sql.functions$.lit(functions.scala:96)
at org.apache.spark.sql.Column.$div(Column.scala:746)
at org.apache.spark.sql.Column.divide(Column.scala:761)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:214)
at java.lang.Thread.run(Thread.java:748)
>>>
>>> crimes_category.orderBy(crimes_category.convictions.desc()).show()
Traceback (most recent call last):
File "", line 1, in
NameError: name 'crimes_category' is not defined
>>> crimes_category=data.groupBy("major_category").agg({"value":"sum"}).withColumnRenamed("sum(value)","convictions")
>>> crimes_category.orderBy(crimes_category.convictions.desc()).show()
[Stage 60:> (0 [Stage 60:======================================> (3 [Stage 60:==================================================================================================================> (9[Stage 60:==============================================================================================================================> (10[Stage 60:===========================================================================================================================================> (11[Stage 60:==============================================================================================================================================================================================> (15 +--------------------+-----------+
| major_category|convictions|
+--------------------+-----------+
| Theft and Handling| 2661861.0|
|Violence Against ...| 1558081.0|
| Burglary| 754293.0|
| Criminal Damage| 630938.0|
| Drugs| 470765.0|
| Robbery| 258873.0|
|Other Notifiable ...| 106349.0|
| Fraud or Forgery| 5325.0|
| Sexual Offences| 1273.0|
+--------------------+-----------+
>>> year_df=data.select('year')
>>> year_df.agg({'year':'min'}).show()
[Stage 62:> (0 [Stage 62:======================================> (3 [Stage 62:======================================================================================================> (8[Stage 62:====================================================================================================================================================================> (13[Stage 62:==============================================================================================================================================================================================> (15 +---------+
|min(year)|
+---------+
| 2008|
+---------+
>>> year_df.agg({'year':'max'}).show()
[Stage 64:> (0 [Stage 64:============> (1 [Stage 64:============================================================================> (6 [Stage 64:==============================================================================================================================> (10[Stage 64:==============================================================================================================================================================================================> (15 +---------+
|max(year)|
+---------+
| 2016|
+---------+
>>> year_df.describe().show()
[Stage 66:> (0 [Stage 66:============> (1 [Stage 66:=========================> (2 [Stage 66:============================================================================> (6 [Stage 66:====================================================================================================================================================================> (13 +-------+------------------+
|summary| year|
+-------+------------------+
| count| 13490604|
| mean| 2012.0|
| stddev|2.5819889931674394|
| min| 2008|
| max| 2016|
+-------+------------------+
>>> data.crosstab('borough', 'major_category') .select('borough_major_category', 'Burglary', 'Drugs', 'Fraud or Forgery', 'Robbery') .show()
[Stage 68:> (0 [Stage 68:============> (1 [Stage 68:=========================================================================================> (7[Stage 68:====================================================================================================================================================================> (13 +----------------------+--------+-----+----------------+-------+
|borough_major_category|Burglary|Drugs|Fraud or Forgery|Robbery|
+----------------------+--------+-----+----------------+-------+
| Havering| 32400|32616| 5508| 27648|
| Merton| 26784|29160| 5724| 23652|
| Haringey| 31320|35424| 10368| 29484|
| Tower Hamlets| 31104|37368| 5400| 28512|
| Bromley| 42552|42336| 8532| 34668|
| Enfield| 39528|44064| 9720| 35532|
| Kingston upon Thames| 21168|22140| 3780| 15660|
| Westminster| 27648|32616| 8748| 25056|
| Richmond upon Thames| 24840|23004| 2808| 18468|
| Lewisham| 36504|43740| 11016| 34884|
| Brent| 37368|46980| 9288| 34128|
| Barking and Dagenham| 23760|26244| 7236| 22248|
| Redbridge| 34776|36504| 8532| 32400|
| Islington| 26568|34128| 5184| 26244|
| Hackney| 31104|38772| 7560| 29160|
| Newham| 35424|41580| 10692| 34452|
| City of London| 540| 756| 0| 540|
| Sutton| 26136|26784| 3024| 21060|
| Southwark| 35856|45144| 11772| 35316|
| Harrow| 29592|31212| 4752| 23976|
+----------------------+--------+-----+----------------+-------+
only showing top 20 rows
>>> data=spark.data.format("csv").option("header","true").load("/home/rameshkumar/datasets/london_crime_by_lsoa.csv")
Traceback (most recent call last):
File "
AttributeError: 'SparkSession' object has no attribute 'data'
>>> data=spark.read.format("csv").option("header","true").load("/home/rameshkumar/datasets/london_crime_by_lsoa.csv")
Traceback (most recent call last):
File "/usr/hdp/current/spark2-client/python/pyspark/sql/utils.py", line 63, in deco
return f(*a, **kw)
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/protocol.py", line 320, in get_return_value
py4j.protocol.Py4JJavaError: An error occurred while calling o59.load.
: org.apache.spark.sql.AnalysisException: Path does not exist: hdfs://nn01.itversity.com:8020/home/rameshkumar/datasets/london_crime_by_lsoa.csv;
at org.apache.spark.sql.execution.datasources.DataSource$.org$apache$spark$sql$execution$datasources$DataSource$$checkAndGlobPathIfNecessary(DataSource.scala:715)
at org.apache.spark.sql.execution.datasources.DataSource$$anonfun$15.apply(DataSource.scala:389)
at org.apache.spark.sql.execution.datasources.DataSource$$anonfun$15.apply(DataSource.scala:389)
at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)
at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)
at scala.collection.immutable.List.foreach(List.scala:381)
at scala.collection.TraversableLike$class.flatMap(TraversableLike.scala:241)
at scala.collection.immutable.List.flatMap(List.scala:344)
at org.apache.spark.sql.execution.datasources.DataSource.resolveRelation(DataSource.scala:388)
at org.apache.spark.sql.DataFrameReader.loadV1Source(DataFrameReader.scala:239)
at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:227)
at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:174)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:214)
at java.lang.Thread.run(Thread.java:748)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "
File "/usr/hdp/current/spark2-client/python/pyspark/sql/readwriter.py", line 166, in load
return self._df(self._jreader.load(path))
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/java_gateway.py", line 1160, in __call__
File "/usr/hdp/current/spark2-client/python/pyspark/sql/utils.py", line 69, in deco
raise AnalysisException(s.split(': ', 1)[1], stackTrace)
pyspark.sql.utils.AnalysisException: 'Path does not exist: hdfs://nn01.itversity.com:8020/home/rameshkumar/datasets/london_crime_by_lsoa.csv;'
>>> data=spark.read.format("csv").option("header","true").load("file:///home/rameshkumar/datasets/london_crime_by_lsoa.csv")
[Stage 0:> ( >>> data.printSchema()
root
|-- lsoa_code: string (nullable = true)
|-- borough: string (nullable = true)
|-- major_category: string (nullable = true)
|-- minor_category: string (nullable = true)
|-- value: string (nullable = true)
|-- year: string (nullable = true)
|-- month: string (nullable = true)
>>> data.count()
[Stage 1:> (0 + 16) / 16]
[Stage 1:======================================================================================================> (8[Stage 1:=========================================================================================================================================================> (12 13490604
>>>
>>> data.limit(5).show()
[Stage 3:> ( +---------+----------+--------------------+--------------------+-----+----+-----+
|lsoa_code| borough| major_category| minor_category|value|year|month|
+---------+----------+--------------------+--------------------+-----+----+-----+
|E01001116| Croydon| Burglary|Burglary in Other...| 0|2016| 11|
|E01001646| Greenwich|Violence Against ...| Other violence| 0|2016| 11|
|E01000677| Bromley|Violence Against ...| Other violence| 0|2015| 5|
|E01003774| Redbridge| Burglary|Burglary in Other...| 0|2016| 3|
|E01004563|Wandsworth| Robbery| Personal Property| 0|2008| 6|
+---------+----------+--------------------+--------------------+-----+----+-----+
>>> data.dropna()
DataFrame[lsoa_code: string, borough: string, major_category: string, minor_category: string, value: string, year: string, month: string]
>>> data.show(5)
[Stage 4:> ( +---------+----------+--------------------+--------------------+-----+----+-----+
|lsoa_code| borough| major_category| minor_category|value|year|month|
+---------+----------+--------------------+--------------------+-----+----+-----+
|E01001116| Croydon| Burglary|Burglary in Other...| 0|2016| 11|
|E01001646| Greenwich|Violence Against ...| Other violence| 0|2016| 11|
|E01000677| Bromley|Violence Against ...| Other violence| 0|2015| 5|
|E01003774| Redbridge| Burglary|Burglary in Other...| 0|2016| 3|
|E01004563|Wandsworth| Robbery| Personal Property| 0|2008| 6|
+---------+----------+--------------------+--------------------+-----+----+-----+
only showing top 5 rows
>>> data=data.drop('lsoa_code')
>>> data.show(5)
[Stage 5:> ( +----------+--------------------+--------------------+-----+----+-----+
| borough| major_category| minor_category|value|year|month|
+----------+--------------------+--------------------+-----+----+-----+
| Croydon| Burglary|Burglary in Other...| 0|2016| 11|
| Greenwich|Violence Against ...| Other violence| 0|2016| 11|
| Bromley|Violence Against ...| Other violence| 0|2015| 5|
| Redbridge| Burglary|Burglary in Other...| 0|2016| 3|
|Wandsworth| Robbery| Personal Property| 0|2008| 6|
+----------+--------------------+--------------------+-----+----+-----+
only showing top 5 rows
>>> total_boroughs=data.select('borough')
>>> total_boroughs.show()
[Stage 6:> ( +--------------------+
| borough|
+--------------------+
| Croydon|
| Greenwich|
| Bromley|
| Redbridge|
| Wandsworth|
| Ealing|
| Ealing|
| Hounslow|
| Newham|
| Sutton|
| Haringey|
| Lambeth|
|Richmond upon Thames|
| Wandsworth|
| Hillingdon|
| Havering|
|Barking and Dagenham|
| Redbridge|
|Kingston upon Thames|
| Sutton|
+--------------------+
only showing top 20 rows
>>> total_boroughs=data.select('borough').distinct()
>>> total_boroughs.show()
[Stage 7:> (0 [Stage 7:===================================================================================================================> (9[Stage 7:==================================================================================================================================================================================> (14[Stage 7:===============================================================================================================================================================================================> (15 +--------------------+
| borough|
+--------------------+
| Croydon|
| Wandsworth|
| Bexley|
| Lambeth|
|Barking and Dagenham|
| Camden|
| Greenwich|
| Newham|
| Tower Hamlets|
| Hounslow|
| Barnet|
| Harrow|
|Kensington and Ch...|
| Islington|
| Brent|
| Haringey|
| Bromley|
| Merton|
| Westminster|
| Hackney|
+--------------------+
only showing top 20 rows
>>> total_boroughs.count()
[Stage 15:> (0 [Stage 15:=========================> (2 [Stage 15:===============================================================> (5 [Stage 15:===========================================================================================================================================> (11[Stage 15:==============================================================================================================================================================================================> (15 33
>>> hackney_data=data.filter(data['borough']==='Hackney')
File "
hackney_data=data.filter(data['borough']==='Hackney')
^
SyntaxError: invalid syntax
>>> hackney_data=data.filter(data['borough']=='Hackney')
>>> hackney_data.show(5)
[Stage 18:> ( +-------+--------------------+--------------------+-----+----+-----+
|borough| major_category| minor_category|value|year|month|
+-------+--------------------+--------------------+-----+----+-----+
|Hackney| Criminal Damage|Criminal Damage T...| 0|2011| 6|
|Hackney|Violence Against ...| Harassment| 1|2013| 2|
|Hackney| Criminal Damage|Other Criminal Da...| 0|2011| 7|
|Hackney|Violence Against ...| Wounding/GBH| 0|2013| 12|
|Hackney| Theft and Handling| Other Theft Person| 0|2016| 8|
+-------+--------------------+--------------------+-----+----+-----+
only showing top 5 rows
>>> data_2015_2016=data.filter(data['year'].isin(['2015','2016']))
>>> data_2015_2016.sample(fraction=0.1).show()
[Stage 19:> ( +--------------------+--------------------+--------------------+-----+----+-----+
| borough| major_category| minor_category|value|year|month|
+--------------------+--------------------+--------------------+-----+----+-----+
| Croydon| Theft and Handling|Motor Vehicle Int...| 0|2016| 3|
| Waltham Forest| Burglary|Burglary in Other...| 0|2015| 6|
|Hammersmith and F...| Drugs| Possession Of Drugs| 1|2016| 2|
| Wandsworth| Theft and Handling|Handling Stolen G...| 0|2016| 2|
| Waltham Forest|Violence Against ...| Common Assault| 0|2016| 6|
|Barking and Dagenham| Criminal Damage|Criminal Damage T...| 0|2015| 4|
| Bexley| Robbery| Personal Property| 0|2016| 1|
| Croydon|Violence Against ...| Offensive Weapon| 0|2016| 2|
| Merton|Violence Against ...| Harassment| 1|2016| 6|
| Hackney| Theft and Handling| Other Theft Person| 0|2016| 8|
| Tower Hamlets| Theft and Handling| Other Theft| 2|2016| 1|
| Haringey|Violence Against ...| Harassment| 0|2016| 12|
| Westminster| Theft and Handling|Motor Vehicle Int...| 0|2016| 11|
| Lewisham| Drugs| Possession Of Drugs| 0|2015| 5|
| Haringey| Criminal Damage|Criminal Damage T...| 0|2015| 2|
| Lewisham| Fraud or Forgery|Other Fraud & For...| 0|2015| 6|
| Camden| Criminal Damage|Criminal Damage T...| 0|2015| 8|
| Southwark| Burglary|Burglary in Other...| 0|2016| 8|
| Lambeth| Theft and Handling| Other Theft| 9|2015| 5|
|Hammersmith and F...| Burglary|Burglary in Other...| 0|2016| 5|
+--------------------+--------------------+--------------------+-----+----+-----+
only showing top 20 rows
>>> data_2014_onwards=data.filter(data['year']>=2014)
>>> data_2014_onwards.sample(fraction=0.1).show()
[Stage 20:> ( +--------------------+--------------------+--------------------+-----+----+-----+
| borough| major_category| minor_category|value|year|month|
+--------------------+--------------------+--------------------+-----+----+-----+
| Bromley|Violence Against ...| Other violence| 0|2015| 5|
| Haringey|Violence Against ...| Wounding/GBH| 0|2015| 12|
| Haringey| Criminal Damage|Criminal Damage T...| 0|2016| 12|
| Ealing| Robbery| Personal Property| 0|2014| 9|
| Lambeth| Drugs| Drug Trafficking| 0|2015| 9|
| Newham|Violence Against ...| Harassment| 0|2014| 1|
| Lambeth| Sexual Offences| Rape| 0|2014| 3|
| Brent| Theft and Handling| Other Theft Person| 1|2016| 12|
| Ealing|Other Notifiable ...| Other Notifiable| 0|2016| 12|
| Barnet| Theft and Handling|Handling Stolen G...| 0|2016| 4|
| Havering|Other Notifiable ...| Other Notifiable| 0|2016| 6|
|Hammersmith and F...| Criminal Damage|Criminal Damage T...| 0|2015| 8|
| Islington| Burglary|Burglary in Other...| 1|2016| 7|
| Hillingdon|Violence Against ...| Wounding/GBH| 0|2016| 1|
| Lambeth| Drugs| Possession Of Drugs| 0|2014| 5|
| Greenwich| Criminal Damage|Criminal Damage T...| 0|2015| 1|
| Greenwich| Criminal Damage|Criminal Damage T...| 0|2016| 10|
| Hillingdon|Other Notifiable ...| Other Notifiable| 0|2015| 6|
| Wandsworth|Violence Against ...| Harassment| 0|2016| 11|
| Merton|Violence Against ...| Harassment| 1|2016| 6|
+--------------------+--------------------+--------------------+-----+----+-----+
only showing top 20 rows
>>> borough_crime_count=data.groupBy('borough').count()
>>> borough_crime_count.show(5)
[Stage 21:> (0 [Stage 21:============> (1 [Stage 21:=========================> (2 [Stage 21:==================================================> (4 [Stage 21:=========================================================================================> (7[Stage 21:==================================================================================================================> (9[Stage 21:====================================================================================================================================================================> (13[Stage 21:=================================================================================================================================================================================> (14[Stage 21:==============================================================================================================================================================================================> (15 +--------------------+------+
| borough| count|
+--------------------+------+
| Croydon|602100|
| Wandsworth|498636|
| Bexley|385668|
| Lambeth|519048|
|Barking and Dagenham|311040|
+--------------------+------+
only showing top 5 rows
>>> borough_conviction_sum=data.groupBy('borough').agg({"value":"sum"})
>>> borough_conviction_sum.show(5)
[Stage 29:> (0 [Stage 29:============> (1 [Stage 29:=========================> (2 [Stage 29:==================================================> (4 [Stage 29:============================================================================> (6 [Stage 29:======================================================================================================> (8[Stage 29:===========================================================================================================================================> (11 +--------------------+----------+
| borough|sum(value)|
+--------------------+----------+
| Croydon| 260294.0|
| Wandsworth| 204741.0|
| Bexley| 114136.0|
| Lambeth| 292178.0|
|Barking and Dagenham| 149447.0|
+--------------------+----------+
only showing top 5 rows
>>> total_borough_convictions=borough_conviction_sum.agg({"convictions":"sum"})
Traceback (most recent call last):
File "/usr/hdp/current/spark2-client/python/pyspark/sql/utils.py", line 63, in deco
return f(*a, **kw)
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/protocol.py", line 320, in get_return_value
py4j.protocol.Py4JJavaError: An error occurred while calling o135.agg.
: org.apache.spark.sql.AnalysisException: Cannot resolve column name "convictions" among (borough, sum(value));
at org.apache.spark.sql.Dataset$$anonfun$resolve$1.apply(Dataset.scala:222)
at org.apache.spark.sql.Dataset$$anonfun$resolve$1.apply(Dataset.scala:222)
at scala.Option.getOrElse(Option.scala:121)
at org.apache.spark.sql.Dataset.resolve(Dataset.scala:221)
at org.apache.spark.sql.Dataset.col(Dataset.scala:1241)
at org.apache.spark.sql.Dataset.apply(Dataset.scala:1208)
at org.apache.spark.sql.RelationalGroupedDataset$$anonfun$agg$2.apply(RelationalGroupedDataset.scala:172)
at org.apache.spark.sql.RelationalGroupedDataset$$anonfun$agg$2.apply(RelationalGroupedDataset.scala:171)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
at scala.collection.immutable.Map$Map1.foreach(Map.scala:116)
at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
at scala.collection.AbstractTraversable.map(Traversable.scala:104)
at org.apache.spark.sql.RelationalGroupedDataset.agg(RelationalGroupedDataset.scala:171)
at org.apache.spark.sql.RelationalGroupedDataset.agg(RelationalGroupedDataset.scala:190)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:214)
at java.lang.Thread.run(Thread.java:748)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "
File "/usr/hdp/current/spark2-client/python/pyspark/sql/dataframe.py", line 1327, in agg
return self.groupBy().agg(*exprs)
File "/usr/hdp/current/spark2-client/python/pyspark/sql/group.py", line 88, in agg
jdf = self._jgd.agg(exprs[0])
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/java_gateway.py", line 1160, in __call__
File "/usr/hdp/current/spark2-client/python/pyspark/sql/utils.py", line 69, in deco
raise AnalysisException(s.split(': ', 1)[1], stackTrace)
pyspark.sql.utils.AnalysisException: 'Cannot resolve column name "convictions" among (borough, sum(value));'
>>> total_borough_convictions = borough_conviction_sum.agg({"convictions":"sum"})
Traceback (most recent call last):
File "/usr/hdp/current/spark2-client/python/pyspark/sql/utils.py", line 63, in deco
return f(*a, **kw)
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/protocol.py", line 320, in get_return_value
py4j.protocol.Py4JJavaError: An error occurred while calling o167.agg.
: org.apache.spark.sql.AnalysisException: Cannot resolve column name "convictions" among (borough, sum(value));
at org.apache.spark.sql.Dataset$$anonfun$resolve$1.apply(Dataset.scala:222)
at org.apache.spark.sql.Dataset$$anonfun$resolve$1.apply(Dataset.scala:222)
at scala.Option.getOrElse(Option.scala:121)
at org.apache.spark.sql.Dataset.resolve(Dataset.scala:221)
at org.apache.spark.sql.Dataset.col(Dataset.scala:1241)
at org.apache.spark.sql.Dataset.apply(Dataset.scala:1208)
at org.apache.spark.sql.RelationalGroupedDataset$$anonfun$agg$2.apply(RelationalGroupedDataset.scala:172)
at org.apache.spark.sql.RelationalGroupedDataset$$anonfun$agg$2.apply(RelationalGroupedDataset.scala:171)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
at scala.collection.immutable.Map$Map1.foreach(Map.scala:116)
at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
at scala.collection.AbstractTraversable.map(Traversable.scala:104)
at org.apache.spark.sql.RelationalGroupedDataset.agg(RelationalGroupedDataset.scala:171)
at org.apache.spark.sql.RelationalGroupedDataset.agg(RelationalGroupedDataset.scala:190)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:214)
at java.lang.Thread.run(Thread.java:748)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "
File "/usr/hdp/current/spark2-client/python/pyspark/sql/dataframe.py", line 1327, in agg
return self.groupBy().agg(*exprs)
File "/usr/hdp/current/spark2-client/python/pyspark/sql/group.py", line 88, in agg
jdf = self._jgd.agg(exprs[0])
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/java_gateway.py", line 1160, in __call__
File "/usr/hdp/current/spark2-client/python/pyspark/sql/utils.py", line 69, in deco
raise AnalysisException(s.split(': ', 1)[1], stackTrace)
pyspark.sql.utils.AnalysisException: 'Cannot resolve column name "convictions" among (borough, sum(value));'
>>> borough_conviction_sum=data.groupBy('borough').agg({"value":"sum"}).withColumnRenamed("sum(value)","convictions")
>>> total_borough_convictions = borough_conviction_sum.agg({"convictions":"sum"})
>>> total_borough_convictions.show()
[Stage 37:> (0 [Stage 37:============> (1 [Stage 37:==================================================> (4 [Stage 37:============================================================================> (6 [Stage 37:======================================================================================================> (8[Stage 37:==============================================================================================================================================================================================> (15 +----------------+
|sum(convictions)|
+----------------+
| 6447758.0|
+----------------+
>>> total_convictions = total_borough_convictions.collect()
[Stage 40:> (0 [Stage 40:============> (1 [Stage 40:=========================================================================================> (7[Stage 40:==============================================================================================================================> (10[Stage 40:========================================================================================================================================================> (12[Stage 40:=================================================================================================================================================================================> (14[Stage 40:==============================================================================================================================================================================================> (15 >>> total_convictions = total_borough_convictions.collect()[0][0]
>>> total_convictions = total_borough_convictions.collect()
>>> total_convictions = total_borough_convictions.collect()[0][0]
>>> import pyspark.sql.functions as func
>>> borough_percentage_contribution = borough_conviction_sum.withColumn(\
... func.round(borough_conviction_sum.convictions / total_convictions * 100, 2))
Traceback (most recent call last):
File "
TypeError: withColumn() missing 1 required positional argument: 'col'
>>> borough_percentage_contribution = borough_conviction_sum.withColumn(
... "% contribution",
... func.round(borough_conviction_sum.convictions / total_convictions * 100, 2))
>>> borough_percentage_contribution.printSchema()
root
|-- borough: string (nullable = true)
|-- convictions: double (nullable = true)
|-- % contribution: double (nullable = true)
>>> borough_percentage_contribution.orderBy(borough_percentage_contribution[2].desc()).show()
[Stage 52:> (0 [Stage 52:============> (1 [Stage 52:=========================> (2 [Stage 52:==================================================> (4 [Stage 52:======================================================================================================> (8[Stage 52:====================================================================================================================================================================> (13 +--------------------+-----------+--------------+
| borough|convictions|% contribution|
+--------------------+-----------+--------------+
| Westminster| 455028.0| 7.06|
| Lambeth| 292178.0| 4.53|
| Southwark| 278809.0| 4.32|
| Camden| 275147.0| 4.27|
| Newham| 262024.0| 4.06|
| Croydon| 260294.0| 4.04|
| Ealing| 251562.0| 3.9|
| Islington| 230286.0| 3.57|
| Tower Hamlets| 228613.0| 3.55|
| Brent| 227551.0| 3.53|
| Hackney| 217119.0| 3.37|
| Lewisham| 215137.0| 3.34|
| Haringey| 213272.0| 3.31|
| Barnet| 212191.0| 3.29|
| Hillingdon| 209680.0| 3.25|
| Wandsworth| 204741.0| 3.18|
| Waltham Forest| 203879.0| 3.16|
| Enfield| 193880.0| 3.01|
| Hounslow| 186772.0| 2.9|
|Hammersmith and F...| 185259.0| 2.87|
+--------------------+-----------+--------------+
only showing top 20 rows
>>> conviction_monthly=data.filter(data['year']==2014).groupBy("month").agg({'value":"sum"}).withColumnRenamed("sum(value)","convictions")
File "
conviction_monthly=data.filter(data['year']==2014).groupBy("month").agg({'value":"sum"}).withColumnRenamed("sum(value)","convictions")
^
SyntaxError: EOL while scanning string literal
>>> conviction_monthly=data.filter(data['year']==2014).groupBy("month").agg({"value":"sum"}).withColumnRenamed("sum(value)","convictions")
>>> total_conviction_monthly = conviction_monthly.agg({"convictions":"sum"}) .collect()[0][0]
[Stage 54:> (0 [Stage 54:============> (1 >>> total_conviction_monthly = conviction_monthly.agg({"convictions":"sum"}) .collect()
[Stage 57:> (0 [Stage 57:============> (1 [Stage 57:======================================================================================================> (8[Stage 57:========================================================================================================================================================> (12[Stage 57:==============================================================================================================================================================================================> (15 >>> total_conviction_monthly = conviction_monthly .withColumn("percent",
... func.round(conviction_monthly.convictions/total_conviction_monthly * 100, 2))
Traceback (most recent call last):
File "
File "/usr/hdp/current/spark2-client/python/pyspark/sql/column.py", line 116, in _
njc = getattr(self._jc, name)(jc)
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/java_gateway.py", line 1160, in __call__
File "/usr/hdp/current/spark2-client/python/pyspark/sql/utils.py", line 63, in deco
return f(*a, **kw)
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/protocol.py", line 320, in get_return_value
py4j.protocol.Py4JJavaError: An error occurred while calling o247.divide.
: java.lang.RuntimeException: Unsupported literal type class java.util.ArrayList [[680183.0]]
at org.apache.spark.sql.catalyst.expressions.Literal$.apply(literals.scala:77)
at org.apache.spark.sql.catalyst.expressions.Literal$$anonfun$create$2.apply(literals.scala:163)
at org.apache.spark.sql.catalyst.expressions.Literal$$anonfun$create$2.apply(literals.scala:163)
at scala.util.Try.getOrElse(Try.scala:79)
at org.apache.spark.sql.catalyst.expressions.Literal$.create(literals.scala:162)
at org.apache.spark.sql.functions$.typedLit(functions.scala:113)
at org.apache.spark.sql.functions$.lit(functions.scala:96)
at org.apache.spark.sql.Column.$div(Column.scala:746)
at org.apache.spark.sql.Column.divide(Column.scala:761)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:214)
at java.lang.Thread.run(Thread.java:748)
>>> total_conviction_monthly.columns
Traceback (most recent call last):
File "
AttributeError: 'list' object has no attribute 'columns'
>>> total_conviction_monthly = conviction_monthly .withColumn("percent",
... func.round(conviction_monthly.convictions/total_conviction_monthly * 100, 2))
Traceback (most recent call last):
File "
File "/usr/hdp/current/spark2-client/python/pyspark/sql/column.py", line 116, in _
njc = getattr(self._jc, name)(jc)
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/java_gateway.py", line 1160, in __call__
File "/usr/hdp/current/spark2-client/python/pyspark/sql/utils.py", line 63, in deco
return f(*a, **kw)
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/protocol.py", line 320, in get_return_value
py4j.protocol.Py4JJavaError: An error occurred while calling o272.divide.
: java.lang.RuntimeException: Unsupported literal type class java.util.ArrayList [[680183.0]]
at org.apache.spark.sql.catalyst.expressions.Literal$.apply(literals.scala:77)
at org.apache.spark.sql.catalyst.expressions.Literal$$anonfun$create$2.apply(literals.scala:163)
at org.apache.spark.sql.catalyst.expressions.Literal$$anonfun$create$2.apply(literals.scala:163)
at scala.util.Try.getOrElse(Try.scala:79)
at org.apache.spark.sql.catalyst.expressions.Literal$.create(literals.scala:162)
at org.apache.spark.sql.functions$.typedLit(functions.scala:113)
at org.apache.spark.sql.functions$.lit(functions.scala:96)
at org.apache.spark.sql.Column.$div(Column.scala:746)
at org.apache.spark.sql.Column.divide(Column.scala:761)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:214)
at java.lang.Thread.run(Thread.java:748)
>>> total_conviction_monthly.columns
Traceback (most recent call last):
File "
AttributeError: 'list' object has no attribute 'columns'
>>>
>>>
>>> total_conviction_monthly = conviction_monthly .withColumn("percent",
... func.round(conviction_monthly.convictions/total_conviction_monthly * 100, 2))
Traceback (most recent call last):
File "
File "/usr/hdp/current/spark2-client/python/pyspark/sql/column.py", line 116, in _
njc = getattr(self._jc, name)(jc)
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/java_gateway.py", line 1160, in __call__
File "/usr/hdp/current/spark2-client/python/pyspark/sql/utils.py", line 63, in deco
return f(*a, **kw)
File "/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/protocol.py", line 320, in get_return_value
py4j.protocol.Py4JJavaError: An error occurred while calling o297.divide.
: java.lang.RuntimeException: Unsupported literal type class java.util.ArrayList [[680183.0]]
at org.apache.spark.sql.catalyst.expressions.Literal$.apply(literals.scala:77)
at org.apache.spark.sql.catalyst.expressions.Literal$$anonfun$create$2.apply(literals.scala:163)
at org.apache.spark.sql.catalyst.expressions.Literal$$anonfun$create$2.apply(literals.scala:163)
at scala.util.Try.getOrElse(Try.scala:79)
at org.apache.spark.sql.catalyst.expressions.Literal$.create(literals.scala:162)
at org.apache.spark.sql.functions$.typedLit(functions.scala:113)
at org.apache.spark.sql.functions$.lit(functions.scala:96)
at org.apache.spark.sql.Column.$div(Column.scala:746)
at org.apache.spark.sql.Column.divide(Column.scala:761)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:214)
at java.lang.Thread.run(Thread.java:748)
>>>
>>> crimes_category.orderBy(crimes_category.convictions.desc()).show()
Traceback (most recent call last):
File "
NameError: name 'crimes_category' is not defined
>>> crimes_category=data.groupBy("major_category").agg({"value":"sum"}).withColumnRenamed("sum(value)","convictions")
>>> crimes_category.orderBy(crimes_category.convictions.desc()).show()
[Stage 60:> (0 [Stage 60:======================================> (3 [Stage 60:==================================================================================================================> (9[Stage 60:==============================================================================================================================> (10[Stage 60:===========================================================================================================================================> (11[Stage 60:==============================================================================================================================================================================================> (15 +--------------------+-----------+
| major_category|convictions|
+--------------------+-----------+
| Theft and Handling| 2661861.0|
|Violence Against ...| 1558081.0|
| Burglary| 754293.0|
| Criminal Damage| 630938.0|
| Drugs| 470765.0|
| Robbery| 258873.0|
|Other Notifiable ...| 106349.0|
| Fraud or Forgery| 5325.0|
| Sexual Offences| 1273.0|
+--------------------+-----------+
>>> year_df=data.select('year')
>>> year_df.agg({'year':'min'}).show()
[Stage 62:> (0 [Stage 62:======================================> (3 [Stage 62:======================================================================================================> (8[Stage 62:====================================================================================================================================================================> (13[Stage 62:==============================================================================================================================================================================================> (15 +---------+
|min(year)|
+---------+
| 2008|
+---------+
>>> year_df.agg({'year':'max'}).show()
[Stage 64:> (0 [Stage 64:============> (1 [Stage 64:============================================================================> (6 [Stage 64:==============================================================================================================================> (10[Stage 64:==============================================================================================================================================================================================> (15 +---------+
|max(year)|
+---------+
| 2016|
+---------+
>>> year_df.describe().show()
[Stage 66:> (0 [Stage 66:============> (1 [Stage 66:=========================> (2 [Stage 66:============================================================================> (6 [Stage 66:====================================================================================================================================================================> (13 +-------+------------------+
|summary| year|
+-------+------------------+
| count| 13490604|
| mean| 2012.0|
| stddev|2.5819889931674394|
| min| 2008|
| max| 2016|
+-------+------------------+
>>> data.crosstab('borough', 'major_category') .select('borough_major_category', 'Burglary', 'Drugs', 'Fraud or Forgery', 'Robbery') .show()
[Stage 68:> (0 [Stage 68:============> (1 [Stage 68:=========================================================================================> (7[Stage 68:====================================================================================================================================================================> (13 +----------------------+--------+-----+----------------+-------+
|borough_major_category|Burglary|Drugs|Fraud or Forgery|Robbery|
+----------------------+--------+-----+----------------+-------+
| Havering| 32400|32616| 5508| 27648|
| Merton| 26784|29160| 5724| 23652|
| Haringey| 31320|35424| 10368| 29484|
| Tower Hamlets| 31104|37368| 5400| 28512|
| Bromley| 42552|42336| 8532| 34668|
| Enfield| 39528|44064| 9720| 35532|
| Kingston upon Thames| 21168|22140| 3780| 15660|
| Westminster| 27648|32616| 8748| 25056|
| Richmond upon Thames| 24840|23004| 2808| 18468|
| Lewisham| 36504|43740| 11016| 34884|
| Brent| 37368|46980| 9288| 34128|
| Barking and Dagenham| 23760|26244| 7236| 22248|
| Redbridge| 34776|36504| 8532| 32400|
| Islington| 26568|34128| 5184| 26244|
| Hackney| 31104|38772| 7560| 29160|
| Newham| 35424|41580| 10692| 34452|
| City of London| 540| 756| 0| 540|
| Sutton| 26136|26784| 3024| 21060|
| Southwark| 35856|45144| 11772| 35316|
| Harrow| 29592|31212| 4752| 23976|
+----------------------+--------+-----+----------------+-------+
only showing top 20 rows
Subscribe to:
Posts (Atom)
Python Challenges Program
Challenges program: program 1: #Input :ABAABBCA #Output: A4B3C1 str1="ABAABBCA" str2="" d={} for x in str1: d[x]=d...
-
Conditional Functions Return Type Name(Signature) Description T if(boolean testCondition, T valueTrue, T valueFalseOrN...
-
PYSPARK Regular Expression Operations read data from hdfs data is unstructured text data we have to clean the data(regular expressio...