API URL 数据无法获取,状态码 401

huangapple go评论106阅读模式
英文:

API URL data not able to fetch, status code 401

问题

我正在尝试从API URL获取数据到Excel文件中,nseindia api,这只是Nifty指数的OHLC值。

我已经创建了用于日期输入的变量。

我遇到了以下错误:

> 无法从URL获取数据:状态码:401

import requests
import pandas as pd
import xlwings as xw

def NseHistoricalData(index_type, from_date, to_date):
    url = f"https://www.nseindia.com/api/historical/indicesHistory?indexType={index_type}&from={from_date}&to={to_date}"
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36',
        'Referer': 'https://www.nseindia.com/',
        'Accept-Language': 'en-US,en;q=0.9',
        'Accept-Encoding': 'gzip, deflate, br'}
    session = requests.Session()
    response_session = session.get(url, headers=headers)

    try:
        if response_session.status_code == 200:
            data = response_session.json()['data']['indexCloseOnlineRecords']
            if data:
                # 提取所需的值
                eod_open_index_val = data[0]['EOD_OPEN_INDEX_VAL']
                eod_high_index_val = data[0]['EOD_HIGH_INDEX_VAL']
                eod_close_index_val = data[0]['EOD_CLOSE_INDEX_VAL']
                eod_low_index_val = data[0]['EOD_LOW_INDEX_VAL']

                # 打开Excel文件
                file_name = r"C:\Users\vhind\Desktop\a\Zerodha\Version_v0\Zerodha\NSE_data.xlsm"
                wb = xw.Book(file_name)
                sheet_name = "Main"

                # 将提取的值写入工作表
                wb.sheets[sheet_name].range('M8').value = eod_open_index_val
                wb.sheets[sheet_name].range('M9').value = eod_high_index_val
                wb.sheets[sheet_name].range('M10').value = eod_close_index_val
                wb.sheets[sheet_name].range('M11').value = eod_low_index_val

                # 保存对Excel文件的更改
                wb.save(file_name)

                print("已成功将历史数据写入工作表")
            else:
                print("未找到指定日期范围的数据")
        else:
            print(f"无法从URL获取数据状态码{response_session.status_code}")
    except Exception as e:
        print(f"发生错误{str(e)}")

# 从Excel表格中读取日期范围
file_name = r"C:\Users\vhind\Desktop\a\Zerodha\Version_v0\Zerodha\NSE_data.xlsm"
wb = xw.Book(file_name)
sheet_name = "Main"
from_date = wb.sheets[sheet_name].range('C15').value
to_date = wb.sheets[sheet_name].range('C16').value

# 指定历史数据的指数类型(例如,NIFTY 50)
index_type = "NIFTY%2050"

# 调用函数以获取并写入历史数据
NseHistoricalData(index_type, from_date, to_date)
英文:

I am trying to get data from the API URL to Excel file
[https://www.nseindia.com/api/historical/indicesHistory?indexType=NIFTY%2050&from=28-07-2023&to=28-07-2023][nseindia api]
it is just OHLC value for nifty index.

I have made variable to date input.
I am getting this error:

> Failed to fetch data from the URL: Status Code: 401

import requests
import pandas as pd
import xlwings as xw

def NseHistoricalData(index_type, from_date, to_date):
    url = f"https://www.nseindia.com/api/historical/indicesHistory?indexType={index_type}&from={from_date}&to={to_date}"
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36',
        'Referer': 'https://www.nseindia.com/',
        'Accept-Language': 'en-US,en;q=0.9',
        'Accept-Encoding': 'gzip, deflate, br'}
    session = requests.Session()
    response_session = session.get(url, headers=headers)

    try:
        if response_session.status_code == 200:
            data = response_session.json()['data']['indexCloseOnlineRecords']
            if data:
                # Extract the required values
                eod_open_index_val = data[0]['EOD_OPEN_INDEX_VAL']
                eod_high_index_val = data[0]['EOD_HIGH_INDEX_VAL']
                eod_close_index_val = data[0]['EOD_CLOSE_INDEX_VAL']
                eod_low_index_val = data[0]['EOD_LOW_INDEX_VAL']

                # Open the Excel file
                file_name = r"C:\Users\vhind\Desktop\a\Zerodha\Version_v0\Zerodha\NSE_data.xlsm"
                wb = xw.Book(file_name)
                sheet_name = "Main"

                # Write the extracted values to the sheet
                wb.sheets[sheet_name].range('M8').value = eod_open_index_val
                wb.sheets[sheet_name].range('M9').value = eod_high_index_val
                wb.sheets[sheet_name].range('M10').value = eod_close_index_val
                wb.sheets[sheet_name].range('M11').value = eod_low_index_val

                # Save the changes to the Excel file
                wb.save(file_name)

                print("Historical data has been written to the sheet successfully.")
            else:
                print("No data found for the specified date range.")
        else:
            print(f"Failed to fetch data from the URL. Status Code: {response_session.status_code}")
    except Exception as e:
        print(f"An error occurred: {str(e)}")

# Read the date range from the Excel sheet
file_name = r"C:\Users\vhind\Desktop\a\Zerodha\Version_v0\Zerodha\NSE_data.xlsm"
wb = xw.Book(file_name)
sheet_name = "Main"
from_date = wb.sheets[sheet_name].range('C15').value
to_date = wb.sheets[sheet_name].range('C16').value

# Specify the index type for historical data (e.g., NIFTY 50)
index_type = "NIFTY%2050"

# Call the function to fetch and write the historical data
NseHistoricalData(index_type, from_date, to_date)

答案1

得分: 1

尝试在发送请求时使用API密钥。您可以从 https://scrapeops.io/app/register/proxy 创建一个免费帐户。

import urllib

def get_scrapeops_url(url):
    API_KEY = 'YOUR_API_KEY'
    payload = {'api_key': API_KEY, 'url': url}
    proxy_url = 'https://proxy.scrapeops.io/v1/?' + urllib.parse.urlencode(payload)
    return proxy_url

def NseHistoricalData(index_type, from_date, to_date):
    # url and headers
    session = requests.Session()
    response_session = requests.get(get_scrapeops_url(url), headers=headers)

    # rest of the code
英文:

Try using an API Key when you make your request. You can create a free account from https://scrapeops.io/app/register/proxy

import urllib
def get_scrapeops_url(url):
API_KEY = 'YOUR_API_KEY'
payload = {'api_key': API_KEY, 'url': url}
proxy_url = 'https://proxy.scrapeops.io/v1/?' + urllib.parse.urlencode(payload)
return proxy_url
def NseHistoricalData(index_type, from_date, to_date):
# url and headers
session = requests.Session()
response_session = requests.get(get_scrapeops_url(url), headers=headers)
# rest of the code

huangapple
  • 本文由 发表于 2023年7月31日 20:57:39
  • 转载请务必保留本文链接:https://go.coder-hub.com/76803865.html
匿名

发表评论

匿名网友

:?: :razz: :sad: :evil: :!: :smile: :oops: :grin: :eek: :shock: :???: :cool: :lol: :mad: :twisted: :roll: :wink: :idea: :arrow: :neutral: :cry: :mrgreen:

确定