examples/07_tutorials/KDD2020-tutorial/pandas-subgraph-local-samples.ipynb
<i>Copyright (c) Recommenders contributors.</i>
<i>Licensed under the MIT License.</i>
from utils.PandasMagClass import MicrosoftAcademicGraph
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import seaborn as sns
import os
root = './data_folder/raw/'
mag = MicrosoftAcademicGraph(root)
# load MAG data: Papers
df_papers = mag.get_data_frame('Papers')
df_papers.head(3)
# load MAG data: Paper Author Affiliations
df_paper_author_affiliations = mag.get_data_frame('PaperAuthorAffiliations')
df_paper_author_affiliations.head(3)
# load MAG data: FieldOfStudy
df_fos = mag.get_data_frame('FieldsOfStudy')
df_fos.head(3)
# load MAG data: Paper Field Of Study
df_paper_fos = mag.get_data_frame('PaperFieldsOfStudy')
df_paper_fos.head(3)
# load MAG data: Journals
df_journals = mag.get_data_frame('Journals')
df_journals.head(3)
# load MAG data: Affiliations
df_affiliations = mag.get_data_frame('Affiliations')
df_affiliations.head(3)
# top K journals
# - papers with journals
paper_with_journals = df_papers[df_papers['JournalId'].notnull()]
# - join journals to get journal name
paper_journals = pd.merge(paper_with_journals, df_journals, on ='JournalId', how = 'inner')[['JournalId', 'NormalizedName', 'PaperId']]
# - group by journals and count papers
journals_stats = paper_journals.groupby(['JournalId', 'NormalizedName']).size().to_frame('PaperCount').reset_index().sort_values(by=['PaperCount'], ascending=False)
# - get top 10 journals
journals_stats_top_10 = journals_stats.nlargest(10, 'PaperCount')[['NormalizedName', 'PaperCount']]
journals_stats_top_10
# top K journals: figure
plt.figure(figsize=(12, 6))
ax_topk_journals = plt.subplot()
ax_topk_journals = sns.barplot(x="PaperCount", y="NormalizedName", data=journals_stats_top_10)
plt.show()
# team size average
# - group by paperId to get team size for each paper
paper_team_size = df_paper_author_affiliations.groupby(['PaperId']).size().to_frame('TeamSize').reset_index()
# - get average team size for all papers
paper_team_size_avg = paper_team_size['TeamSize'].mean()
paper_team_size_avg
# team size distribution
# - generate paper count per team size
team_size_distribution = paper_team_size.groupby(['TeamSize']).size().to_frame('PaperCount').reset_index()
# - set team size intervals
#bins = [0, 3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 5465]
bins = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 5465]
# - generate paper count per team size intervals
team_size_range = paper_team_size.groupby(pd.cut(paper_team_size['TeamSize'], bins=bins)).size().to_frame('PaperCount').reset_index()
team_size_range
# team size distribution: figure
plt.figure(figsize=(12, 6))
ax_team_size_distribution = plt.subplot()
ax_team_size_distribution = sns.barplot(x="TeamSize", y="PaperCount", data=team_size_range)
plt.show()
# top K geolocation by past 20 year: load data
df_affiliation_regions = pd.read_table(os.path.join(root, 'AffiliationRegions.txt'), low_memory=False, names=('AffiliationId', 'Name', 'Region'))
df_affiliation_regions.head(3)
# top K geolocation by past 20 year
# - filter papers publish year
papers = df_papers[(df_papers.Year >= 2000) & (df_papers.Year <= 2019)]
# - join papers with paper author affiliation
papers_with_affi = pd.merge(papers, df_paper_author_affiliations, on ='PaperId', how = 'inner')[['PaperId', 'Year', 'AffiliationId']]
# - keep only non-empty and distinct rows
papers_with_affi_distinct = papers_with_affi[papers_with_affi['AffiliationId'].notnull()].drop_duplicates()
# - join to get affiliation region
papers_affi_region = pd.merge(df_affiliation_regions, papers_with_affi_distinct, on ='AffiliationId', how = 'inner')[['PaperId', 'Year', 'AffiliationId', 'Region']]
# - eliminate double count when authors in one paper are from different affiliations but in the same country
papers_year_region = papers_affi_region.groupby(['PaperId', 'Year', 'Region']).size().reset_index()[['PaperId', 'Year', 'Region']]
# - group to get paper count per region and year
region_year_papercount = papers_year_region.groupby(['Region', 'Year']).size().to_frame('PaperCount').reset_index().sort_values(by=['PaperCount'], ascending=False)
# - get paper count per region
region_papercount = region_year_papercount.groupby(['Region'])['PaperCount'].sum().reset_index()
# - get region top paper count
top_region_papercount = region_papercount.nlargest(5, 'PaperCount')['Region'].tolist()
# - filter top countries on papers count per year and country
top_region_year_papercount = region_year_papercount[region_year_papercount['Region'].isin(set(top_region_papercount))]
top_region_year_papercount.head(10)
# top K geolocation by past 20 year: figure
plt.figure(figsize=(12, 6))
ax_topk_geolocation = plt.subplot()
ax_topk_geolocation = sns.lineplot(x="Year", y="PaperCount", hue="Region", style="Region", markers=['o','s','v','d', '^'], hue_order=top_region_papercount, dashes=False, data=top_region_year_papercount)
ax_topk_geolocation.xaxis.set_major_formatter(ticker.FuncFormatter(lambda x, pos: '{:.0f}'.format(x)))
ax_topk_geolocation.xaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
# L0 top Field Of Study distribution
L0_TopN = 5
L0_fos = df_fos.loc[df_fos['Level'] == 0]
paper_L0_fos = pd.merge(L0_fos, df_paper_fos, on = 'FieldOfStudyId', how='inner')
L0_fos_stats = paper_L0_fos.groupby('NormalizedName').agg({'PaperId':['count']})
L0_fos_stats.columns = ['PaperCnt']
L0_fos_stats.sort_values(by=['PaperCnt'], inplace = True, ascending=False)
L0_fos_stats_sorted = L0_fos_stats.reset_index()
L0_fos_stats_sorted.loc[L0_fos_stats_sorted.index >= L0_TopN, 'NormalizedName'] = 'others'
L0_fos_stats_topN = L0_fos_stats_sorted.groupby('NormalizedName').agg({'PaperCnt':['sum']})
L0_fos_stats_topN.columns = ['PaperCntSum']
L0_fos_stats_topN.sort_values(by=['PaperCntSum'], inplace = True, ascending=False)
L0_fos_stats_topN = L0_fos_stats_topN.reset_index()
L0_fos_stats_topN['Percentage'] = L0_fos_stats_topN.PaperCntSum / L0_fos_stats_topN.PaperCntSum.sum()
L0_fos_stats_topN['Percentage'] = pd.Series(["{0:.2f}%".format(val * 100) for val in L0_fos_stats_topN['Percentage']], index = L0_fos_stats_topN.index)
L0_fos_stats_topN
# L0 top Field Of Study distribution : figure
domains = list(L0_fos_stats_topN['NormalizedName'])
PaperCnt_domain = list(L0_fos_stats_topN['PaperCntSum'])
fig = plt.figure(figsize = (15, 8))
plt.pie(PaperCnt_domain, labels = domains)
plt.title("No. of papers in different domains")
plt.show()
# L1 top Field Of Study distribution
L1_TopN = 10
L1_fos = df_fos.loc[df_fos['Level'] == 1]
paper_L1_fos = pd.merge(L1_fos, df_paper_fos, on = 'FieldOfStudyId', how='inner')
L1_fos_stats = paper_L1_fos.groupby('NormalizedName').agg({'PaperId':['count']})
L1_fos_stats.columns = ['PaperCnt']
L1_fos_stats.sort_values(by=['PaperCnt'], inplace = True, ascending=False)
L1_fos_stats_sorted = L1_fos_stats.reset_index()
L1_fos_stats_topN1 = L1_fos_stats_sorted.nlargest(L1_TopN, 'PaperCnt')
L1_fos_stats_topN1['Percentage'] = L1_fos_stats_topN1.PaperCnt / L1_fos_stats_topN1.PaperCnt.sum()
L1_fos_stats_topN1['Percentage'] = pd.Series(["{0:.2f}%".format(val * 100) for val in L1_fos_stats_topN1['Percentage']], index = L1_fos_stats_topN1.index)
L1_fos_stats_topN1
# L1 top Field Of Study distribution : figure
domains = list(L1_fos_stats_topN1['NormalizedName'])
PaperCnt_domain = list(L1_fos_stats_topN1['PaperCnt'])
fig = plt.figure(figsize = (15, 8))
plt.pie(PaperCnt_domain, labels = domains)
plt.title("No. of papers in different subdomains")
plt.show()
# L0 top Field Of Study distribution by year
L0_topN = 5
L0_fos = df_fos.loc[df_fos['Level'] == 0]
paper_L0_fos = pd.merge(L0_fos, df_paper_fos, on = 'FieldOfStudyId', how='inner')[['NormalizedName', 'Level', 'PaperId']]
papers = df_papers[(df_papers.Year >= 2000) & (df_papers.Year <= 2019)]
paper_year_L0_fos = pd.merge(paper_L0_fos, papers, on = 'PaperId', how='inner')[['NormalizedName', 'Level', 'PaperId', 'Year']]
fos_L0_year_papercount = paper_year_L0_fos.groupby(['NormalizedName', 'Year']).size().to_frame('PaperCount').reset_index().sort_values(by=['PaperCount'], ascending=False)
fos_L0_papercount = fos_L0_year_papercount.groupby(['NormalizedName'])['PaperCount'].sum().reset_index()
top_fos_L0_papercount = fos_L0_papercount.nlargest(L0_topN, 'PaperCount')['NormalizedName'].tolist()
fos_L0_year_papercount.loc[~fos_L0_year_papercount['NormalizedName'].isin(set(top_fos_L0_papercount)), 'NormalizedName'] = 'others'
fos_L0_year_topN = fos_L0_year_papercount.groupby(['NormalizedName', 'Year']).agg({'PaperCount':['sum']}).reset_index()
fos_L0_year_topN.columns = ['NormalizedName','Year','PaperCountSum']
fos_L0_year_topN.head(30)
# L0 top Field Of Study distribution by year: figure
plt.figure(figsize=(12, 6))
top_fos_L0_papercount_order = top_fos_L0_papercount.copy()
top_fos_L0_papercount_order.append('others')
ax_topk_fos_L0 = plt.subplot()
ax_topk_fos_L0 = sns.lineplot(x="Year", y="PaperCountSum", hue="NormalizedName", style="NormalizedName", markers=['o','s','v','d', '^', 'h'], hue_order=top_fos_L0_papercount_order, dashes=False, data=fos_L0_year_topN)
ax_topk_fos_L0.xaxis.set_major_formatter(ticker.FuncFormatter(lambda x, pos: '{:.0f}'.format(x)))
ax_topk_fos_L0.xaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
# L1 top Field Of Study distribution by year
L1_topN = 10
L1_fos = df_fos.loc[df_fos['Level'] == 1]
paper_L1_fos = pd.merge(L1_fos, df_paper_fos, on = 'FieldOfStudyId', how='inner')[['NormalizedName', 'Level', 'PaperId']]
papers = df_papers[(df_papers.Year >= 2000) & (df_papers.Year <= 2019)]
paper_year_L1_fos = pd.merge(paper_L1_fos, papers, on = 'PaperId', how='inner')[['NormalizedName', 'Level', 'PaperId', 'Year']]
fos_L1_year_papercount = paper_year_L1_fos.groupby(['NormalizedName', 'Year']).size().to_frame('PaperCount').reset_index().sort_values(by=['PaperCount'], ascending=False)
fos_L1_papercount = fos_L1_year_papercount.groupby(['NormalizedName'])['PaperCount'].sum().reset_index()
top_fos_L1_papercount = fos_L1_papercount.nlargest(L1_topN, 'PaperCount')['NormalizedName'].tolist()
top_fos_L1_year_papercount = fos_L1_year_papercount[fos_L1_year_papercount['NormalizedName'].isin(set(top_fos_L1_papercount))]
top_fos_L1_year_papercount.head(3)
# L1 top Field Of Study distribution by year: figure
plt.figure(figsize=(12, 6))
ax_topk_fos_L1 = plt.subplot()
ax_topk_fos_L1 = sns.lineplot(x="Year", y="PaperCount", hue="NormalizedName", style="NormalizedName", markers=['o','s','v','d','^','h','<','.','p','*'], hue_order=top_fos_L1_papercount, dashes=False, data=top_fos_L1_year_papercount)
ax_topk_fos_L1.xaxis.set_major_formatter(ticker.FuncFormatter(lambda x, pos: '{:.0f}'.format(x)))
ax_topk_fos_L1.xaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()